VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 82812

Last change on this file since 82812 was 81786, checked in by vboxsync, 5 years ago

VMM: Nested VMX: bugref:9180 Implement VMX-preemption timer for nested-guest. It's still disabled though.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 649.8 KB
Line 
1/* $Id: IEMAll.cpp 81786 2019-11-12 04:20:34Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vmcc.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler.
442 */
443# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
444 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
445
446#else
447# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
448# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
449# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
450# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
451# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
452# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
453# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
454# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
455# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
457
458#endif
459
460#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
461/**
462 * Check if an SVM control/instruction intercept is set.
463 */
464# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
465 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
466
467/**
468 * Check if an SVM read CRx intercept is set.
469 */
470# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
471 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
472
473/**
474 * Check if an SVM write CRx intercept is set.
475 */
476# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM read DRx intercept is set.
481 */
482# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
483 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
484
485/**
486 * Check if an SVM write DRx intercept is set.
487 */
488# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM exception intercept is set.
493 */
494# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
495 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
496
497/**
498 * Invokes the SVM \#VMEXIT handler for the nested-guest.
499 */
500# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
501 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
502
503/**
504 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
505 * corresponding decode assist information.
506 */
507# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
508 do \
509 { \
510 uint64_t uExitInfo1; \
511 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
512 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
513 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
514 else \
515 uExitInfo1 = 0; \
516 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
517 } while (0)
518
519/** Check and handles SVM nested-guest instruction intercept and updates
520 * NRIP if needed.
521 */
522# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
523 do \
524 { \
525 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
526 { \
527 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
528 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
529 } \
530 } while (0)
531
532/** Checks and handles SVM nested-guest CR0 read intercept. */
533# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
534 do \
535 { \
536 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
537 { /* probably likely */ } \
538 else \
539 { \
540 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
541 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
542 } \
543 } while (0)
544
545/**
546 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
547 */
548# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
549 do { \
550 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
551 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
552 } while (0)
553
554#else
555# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
556# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
557# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
558# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
559# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
560# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
561# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
562# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
563# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
564# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
565# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
566
567#endif
568
569
570/*********************************************************************************************************************************
571* Global Variables *
572*********************************************************************************************************************************/
573extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
574
575
576/** Function table for the ADD instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
578{
579 iemAImpl_add_u8, iemAImpl_add_u8_locked,
580 iemAImpl_add_u16, iemAImpl_add_u16_locked,
581 iemAImpl_add_u32, iemAImpl_add_u32_locked,
582 iemAImpl_add_u64, iemAImpl_add_u64_locked
583};
584
585/** Function table for the ADC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
587{
588 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
589 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
590 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
591 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
592};
593
594/** Function table for the SUB instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
596{
597 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
598 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
599 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
600 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
601};
602
603/** Function table for the SBB instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
605{
606 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
607 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
608 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
609 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
610};
611
612/** Function table for the OR instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
614{
615 iemAImpl_or_u8, iemAImpl_or_u8_locked,
616 iemAImpl_or_u16, iemAImpl_or_u16_locked,
617 iemAImpl_or_u32, iemAImpl_or_u32_locked,
618 iemAImpl_or_u64, iemAImpl_or_u64_locked
619};
620
621/** Function table for the XOR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
623{
624 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
625 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
626 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
627 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
628};
629
630/** Function table for the AND instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
632{
633 iemAImpl_and_u8, iemAImpl_and_u8_locked,
634 iemAImpl_and_u16, iemAImpl_and_u16_locked,
635 iemAImpl_and_u32, iemAImpl_and_u32_locked,
636 iemAImpl_and_u64, iemAImpl_and_u64_locked
637};
638
639/** Function table for the CMP instruction.
640 * @remarks Making operand order ASSUMPTIONS.
641 */
642IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
643{
644 iemAImpl_cmp_u8, NULL,
645 iemAImpl_cmp_u16, NULL,
646 iemAImpl_cmp_u32, NULL,
647 iemAImpl_cmp_u64, NULL
648};
649
650/** Function table for the TEST instruction.
651 * @remarks Making operand order ASSUMPTIONS.
652 */
653IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
654{
655 iemAImpl_test_u8, NULL,
656 iemAImpl_test_u16, NULL,
657 iemAImpl_test_u32, NULL,
658 iemAImpl_test_u64, NULL
659};
660
661/** Function table for the BT instruction. */
662IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
663{
664 NULL, NULL,
665 iemAImpl_bt_u16, NULL,
666 iemAImpl_bt_u32, NULL,
667 iemAImpl_bt_u64, NULL
668};
669
670/** Function table for the BTC instruction. */
671IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
672{
673 NULL, NULL,
674 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
675 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
676 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
677};
678
679/** Function table for the BTR instruction. */
680IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
681{
682 NULL, NULL,
683 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
684 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
685 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
686};
687
688/** Function table for the BTS instruction. */
689IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
690{
691 NULL, NULL,
692 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
693 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
694 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
695};
696
697/** Function table for the BSF instruction. */
698IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
699{
700 NULL, NULL,
701 iemAImpl_bsf_u16, NULL,
702 iemAImpl_bsf_u32, NULL,
703 iemAImpl_bsf_u64, NULL
704};
705
706/** Function table for the BSR instruction. */
707IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
708{
709 NULL, NULL,
710 iemAImpl_bsr_u16, NULL,
711 iemAImpl_bsr_u32, NULL,
712 iemAImpl_bsr_u64, NULL
713};
714
715/** Function table for the IMUL instruction. */
716IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
717{
718 NULL, NULL,
719 iemAImpl_imul_two_u16, NULL,
720 iemAImpl_imul_two_u32, NULL,
721 iemAImpl_imul_two_u64, NULL
722};
723
724/** Group 1 /r lookup table. */
725IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
726{
727 &g_iemAImpl_add,
728 &g_iemAImpl_or,
729 &g_iemAImpl_adc,
730 &g_iemAImpl_sbb,
731 &g_iemAImpl_and,
732 &g_iemAImpl_sub,
733 &g_iemAImpl_xor,
734 &g_iemAImpl_cmp
735};
736
737/** Function table for the INC instruction. */
738IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
739{
740 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
741 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
742 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
743 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
744};
745
746/** Function table for the DEC instruction. */
747IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
748{
749 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
750 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
751 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
752 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
753};
754
755/** Function table for the NEG instruction. */
756IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
757{
758 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
759 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
760 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
761 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
762};
763
764/** Function table for the NOT instruction. */
765IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
766{
767 iemAImpl_not_u8, iemAImpl_not_u8_locked,
768 iemAImpl_not_u16, iemAImpl_not_u16_locked,
769 iemAImpl_not_u32, iemAImpl_not_u32_locked,
770 iemAImpl_not_u64, iemAImpl_not_u64_locked
771};
772
773
774/** Function table for the ROL instruction. */
775IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
776{
777 iemAImpl_rol_u8,
778 iemAImpl_rol_u16,
779 iemAImpl_rol_u32,
780 iemAImpl_rol_u64
781};
782
783/** Function table for the ROR instruction. */
784IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
785{
786 iemAImpl_ror_u8,
787 iemAImpl_ror_u16,
788 iemAImpl_ror_u32,
789 iemAImpl_ror_u64
790};
791
792/** Function table for the RCL instruction. */
793IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
794{
795 iemAImpl_rcl_u8,
796 iemAImpl_rcl_u16,
797 iemAImpl_rcl_u32,
798 iemAImpl_rcl_u64
799};
800
801/** Function table for the RCR instruction. */
802IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
803{
804 iemAImpl_rcr_u8,
805 iemAImpl_rcr_u16,
806 iemAImpl_rcr_u32,
807 iemAImpl_rcr_u64
808};
809
810/** Function table for the SHL instruction. */
811IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
812{
813 iemAImpl_shl_u8,
814 iemAImpl_shl_u16,
815 iemAImpl_shl_u32,
816 iemAImpl_shl_u64
817};
818
819/** Function table for the SHR instruction. */
820IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
821{
822 iemAImpl_shr_u8,
823 iemAImpl_shr_u16,
824 iemAImpl_shr_u32,
825 iemAImpl_shr_u64
826};
827
828/** Function table for the SAR instruction. */
829IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
830{
831 iemAImpl_sar_u8,
832 iemAImpl_sar_u16,
833 iemAImpl_sar_u32,
834 iemAImpl_sar_u64
835};
836
837
838/** Function table for the MUL instruction. */
839IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
840{
841 iemAImpl_mul_u8,
842 iemAImpl_mul_u16,
843 iemAImpl_mul_u32,
844 iemAImpl_mul_u64
845};
846
847/** Function table for the IMUL instruction working implicitly on rAX. */
848IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
849{
850 iemAImpl_imul_u8,
851 iemAImpl_imul_u16,
852 iemAImpl_imul_u32,
853 iemAImpl_imul_u64
854};
855
856/** Function table for the DIV instruction. */
857IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
858{
859 iemAImpl_div_u8,
860 iemAImpl_div_u16,
861 iemAImpl_div_u32,
862 iemAImpl_div_u64
863};
864
865/** Function table for the MUL instruction. */
866IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
867{
868 iemAImpl_idiv_u8,
869 iemAImpl_idiv_u16,
870 iemAImpl_idiv_u32,
871 iemAImpl_idiv_u64
872};
873
874/** Function table for the SHLD instruction */
875IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
876{
877 iemAImpl_shld_u16,
878 iemAImpl_shld_u32,
879 iemAImpl_shld_u64,
880};
881
882/** Function table for the SHRD instruction */
883IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
884{
885 iemAImpl_shrd_u16,
886 iemAImpl_shrd_u32,
887 iemAImpl_shrd_u64,
888};
889
890
891/** Function table for the PUNPCKLBW instruction */
892IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
893/** Function table for the PUNPCKLBD instruction */
894IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
895/** Function table for the PUNPCKLDQ instruction */
896IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
897/** Function table for the PUNPCKLQDQ instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
899
900/** Function table for the PUNPCKHBW instruction */
901IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
902/** Function table for the PUNPCKHBD instruction */
903IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
904/** Function table for the PUNPCKHDQ instruction */
905IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
906/** Function table for the PUNPCKHQDQ instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
908
909/** Function table for the PXOR instruction */
910IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
911/** Function table for the PCMPEQB instruction */
912IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
913/** Function table for the PCMPEQW instruction */
914IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
915/** Function table for the PCMPEQD instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
917
918
919#if defined(IEM_LOG_MEMORY_WRITES)
920/** What IEM just wrote. */
921uint8_t g_abIemWrote[256];
922/** How much IEM just wrote. */
923size_t g_cbIemWrote;
924#endif
925
926
927/*********************************************************************************************************************************
928* Internal Functions *
929*********************************************************************************************************************************/
930IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
931IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
932IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
934/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
935IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
936IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
938IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
939IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
941IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
944IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
945IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
946IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
947#ifdef IEM_WITH_SETJMP
948DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
949DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
953#endif
954
955IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
956IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
957IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
958IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
959IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
960IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
965IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
966IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
967IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
969IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
970IEM_STATIC uint16_t iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
971IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
972
973#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
974IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
975IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
976IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
978IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
979IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
980IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
981#endif
982
983#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
984IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
985IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
986#endif
987
988
989/**
990 * Sets the pass up status.
991 *
992 * @returns VINF_SUCCESS.
993 * @param pVCpu The cross context virtual CPU structure of the
994 * calling thread.
995 * @param rcPassUp The pass up status. Must be informational.
996 * VINF_SUCCESS is not allowed.
997 */
998IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
999{
1000 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1001
1002 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1003 if (rcOldPassUp == VINF_SUCCESS)
1004 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1005 /* If both are EM scheduling codes, use EM priority rules. */
1006 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1007 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1008 {
1009 if (rcPassUp < rcOldPassUp)
1010 {
1011 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1012 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1013 }
1014 else
1015 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1016 }
1017 /* Override EM scheduling with specific status code. */
1018 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1019 {
1020 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1021 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1022 }
1023 /* Don't override specific status code, first come first served. */
1024 else
1025 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1026 return VINF_SUCCESS;
1027}
1028
1029
1030/**
1031 * Calculates the CPU mode.
1032 *
1033 * This is mainly for updating IEMCPU::enmCpuMode.
1034 *
1035 * @returns CPU mode.
1036 * @param pVCpu The cross context virtual CPU structure of the
1037 * calling thread.
1038 */
1039DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1040{
1041 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1042 return IEMMODE_64BIT;
1043 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1044 return IEMMODE_32BIT;
1045 return IEMMODE_16BIT;
1046}
1047
1048
1049/**
1050 * Initializes the execution state.
1051 *
1052 * @param pVCpu The cross context virtual CPU structure of the
1053 * calling thread.
1054 * @param fBypassHandlers Whether to bypass access handlers.
1055 *
1056 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1057 * side-effects in strict builds.
1058 */
1059DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1060{
1061 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1062 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1063 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1064 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1071
1072 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1073 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1074#ifdef VBOX_STRICT
1075 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1076 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1077 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1078 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1079 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1080 pVCpu->iem.s.uRexReg = 127;
1081 pVCpu->iem.s.uRexB = 127;
1082 pVCpu->iem.s.offModRm = 127;
1083 pVCpu->iem.s.uRexIndex = 127;
1084 pVCpu->iem.s.iEffSeg = 127;
1085 pVCpu->iem.s.idxPrefix = 127;
1086 pVCpu->iem.s.uVex3rdReg = 127;
1087 pVCpu->iem.s.uVexLength = 127;
1088 pVCpu->iem.s.fEvexStuff = 127;
1089 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1090# ifdef IEM_WITH_CODE_TLB
1091 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1092 pVCpu->iem.s.pbInstrBuf = NULL;
1093 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1094 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1095 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1096 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1097# else
1098 pVCpu->iem.s.offOpcode = 127;
1099 pVCpu->iem.s.cbOpcode = 127;
1100# endif
1101#endif
1102
1103 pVCpu->iem.s.cActiveMappings = 0;
1104 pVCpu->iem.s.iNextMapping = 0;
1105 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1106 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1107#if 0
1108#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1109 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1110 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1111 {
1112 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1113 Assert(pVmcs);
1114 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1115 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1116 {
1117 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1118 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1119 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1120 AssertRC(rc);
1121 }
1122 }
1123#endif
1124#endif
1125}
1126
1127#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1128/**
1129 * Performs a minimal reinitialization of the execution state.
1130 *
1131 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1132 * 'world-switch' types operations on the CPU. Currently only nested
1133 * hardware-virtualization uses it.
1134 *
1135 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1136 */
1137IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1138{
1139 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1140 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1141
1142 pVCpu->iem.s.uCpl = uCpl;
1143 pVCpu->iem.s.enmCpuMode = enmMode;
1144 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1145 pVCpu->iem.s.enmEffAddrMode = enmMode;
1146 if (enmMode != IEMMODE_64BIT)
1147 {
1148 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1149 pVCpu->iem.s.enmEffOpSize = enmMode;
1150 }
1151 else
1152 {
1153 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1154 pVCpu->iem.s.enmEffOpSize = enmMode;
1155 }
1156 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1157#ifndef IEM_WITH_CODE_TLB
1158 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1159 pVCpu->iem.s.offOpcode = 0;
1160 pVCpu->iem.s.cbOpcode = 0;
1161#endif
1162 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1163}
1164#endif
1165
1166/**
1167 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1168 *
1169 * @param pVCpu The cross context virtual CPU structure of the
1170 * calling thread.
1171 */
1172DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1173{
1174 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1175#ifdef VBOX_STRICT
1176# ifdef IEM_WITH_CODE_TLB
1177 NOREF(pVCpu);
1178# else
1179 pVCpu->iem.s.cbOpcode = 0;
1180# endif
1181#else
1182 NOREF(pVCpu);
1183#endif
1184}
1185
1186
1187/**
1188 * Initializes the decoder state.
1189 *
1190 * iemReInitDecoder is mostly a copy of this function.
1191 *
1192 * @param pVCpu The cross context virtual CPU structure of the
1193 * calling thread.
1194 * @param fBypassHandlers Whether to bypass access handlers.
1195 */
1196DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers)
1197{
1198 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1199 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1200 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1208
1209 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1210 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1211 pVCpu->iem.s.enmCpuMode = enmMode;
1212 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1213 pVCpu->iem.s.enmEffAddrMode = enmMode;
1214 if (enmMode != IEMMODE_64BIT)
1215 {
1216 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1217 pVCpu->iem.s.enmEffOpSize = enmMode;
1218 }
1219 else
1220 {
1221 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1222 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1223 }
1224 pVCpu->iem.s.fPrefixes = 0;
1225 pVCpu->iem.s.uRexReg = 0;
1226 pVCpu->iem.s.uRexB = 0;
1227 pVCpu->iem.s.uRexIndex = 0;
1228 pVCpu->iem.s.idxPrefix = 0;
1229 pVCpu->iem.s.uVex3rdReg = 0;
1230 pVCpu->iem.s.uVexLength = 0;
1231 pVCpu->iem.s.fEvexStuff = 0;
1232 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1233#ifdef IEM_WITH_CODE_TLB
1234 pVCpu->iem.s.pbInstrBuf = NULL;
1235 pVCpu->iem.s.offInstrNextByte = 0;
1236 pVCpu->iem.s.offCurInstrStart = 0;
1237# ifdef VBOX_STRICT
1238 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1239 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1240 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1241# endif
1242#else
1243 pVCpu->iem.s.offOpcode = 0;
1244 pVCpu->iem.s.cbOpcode = 0;
1245#endif
1246 pVCpu->iem.s.offModRm = 0;
1247 pVCpu->iem.s.cActiveMappings = 0;
1248 pVCpu->iem.s.iNextMapping = 0;
1249 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1250 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1251
1252#ifdef DBGFTRACE_ENABLED
1253 switch (enmMode)
1254 {
1255 case IEMMODE_64BIT:
1256 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1257 break;
1258 case IEMMODE_32BIT:
1259 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1260 break;
1261 case IEMMODE_16BIT:
1262 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1263 break;
1264 }
1265#endif
1266}
1267
1268
1269/**
1270 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1271 *
1272 * This is mostly a copy of iemInitDecoder.
1273 *
1274 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1275 */
1276DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1277{
1278 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1280 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1281 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1282 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1283 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1284 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1285 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1286 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1287
1288 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1289 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1290 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1291 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1292 pVCpu->iem.s.enmEffAddrMode = enmMode;
1293 if (enmMode != IEMMODE_64BIT)
1294 {
1295 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1296 pVCpu->iem.s.enmEffOpSize = enmMode;
1297 }
1298 else
1299 {
1300 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1301 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1302 }
1303 pVCpu->iem.s.fPrefixes = 0;
1304 pVCpu->iem.s.uRexReg = 0;
1305 pVCpu->iem.s.uRexB = 0;
1306 pVCpu->iem.s.uRexIndex = 0;
1307 pVCpu->iem.s.idxPrefix = 0;
1308 pVCpu->iem.s.uVex3rdReg = 0;
1309 pVCpu->iem.s.uVexLength = 0;
1310 pVCpu->iem.s.fEvexStuff = 0;
1311 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1312#ifdef IEM_WITH_CODE_TLB
1313 if (pVCpu->iem.s.pbInstrBuf)
1314 {
1315 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1316 - pVCpu->iem.s.uInstrBufPc;
1317 if (off < pVCpu->iem.s.cbInstrBufTotal)
1318 {
1319 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1320 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1321 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1322 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1323 else
1324 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1325 }
1326 else
1327 {
1328 pVCpu->iem.s.pbInstrBuf = NULL;
1329 pVCpu->iem.s.offInstrNextByte = 0;
1330 pVCpu->iem.s.offCurInstrStart = 0;
1331 pVCpu->iem.s.cbInstrBuf = 0;
1332 pVCpu->iem.s.cbInstrBufTotal = 0;
1333 }
1334 }
1335 else
1336 {
1337 pVCpu->iem.s.offInstrNextByte = 0;
1338 pVCpu->iem.s.offCurInstrStart = 0;
1339 pVCpu->iem.s.cbInstrBuf = 0;
1340 pVCpu->iem.s.cbInstrBufTotal = 0;
1341 }
1342#else
1343 pVCpu->iem.s.cbOpcode = 0;
1344 pVCpu->iem.s.offOpcode = 0;
1345#endif
1346 pVCpu->iem.s.offModRm = 0;
1347 Assert(pVCpu->iem.s.cActiveMappings == 0);
1348 pVCpu->iem.s.iNextMapping = 0;
1349 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1350 Assert(pVCpu->iem.s.fBypassHandlers == false);
1351
1352#ifdef DBGFTRACE_ENABLED
1353 switch (enmMode)
1354 {
1355 case IEMMODE_64BIT:
1356 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1357 break;
1358 case IEMMODE_32BIT:
1359 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1360 break;
1361 case IEMMODE_16BIT:
1362 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1363 break;
1364 }
1365#endif
1366}
1367
1368
1369
1370/**
1371 * Prefetch opcodes the first time when starting executing.
1372 *
1373 * @returns Strict VBox status code.
1374 * @param pVCpu The cross context virtual CPU structure of the
1375 * calling thread.
1376 * @param fBypassHandlers Whether to bypass access handlers.
1377 */
1378IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers)
1379{
1380 iemInitDecoder(pVCpu, fBypassHandlers);
1381
1382#ifdef IEM_WITH_CODE_TLB
1383 /** @todo Do ITLB lookup here. */
1384
1385#else /* !IEM_WITH_CODE_TLB */
1386
1387 /*
1388 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1389 *
1390 * First translate CS:rIP to a physical address.
1391 */
1392 uint32_t cbToTryRead;
1393 RTGCPTR GCPtrPC;
1394 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1395 {
1396 cbToTryRead = PAGE_SIZE;
1397 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1398 if (IEM_IS_CANONICAL(GCPtrPC))
1399 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1400 else
1401 return iemRaiseGeneralProtectionFault0(pVCpu);
1402 }
1403 else
1404 {
1405 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1406 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1407 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1408 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1409 else
1410 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1411 if (cbToTryRead) { /* likely */ }
1412 else /* overflowed */
1413 {
1414 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1415 cbToTryRead = UINT32_MAX;
1416 }
1417 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1418 Assert(GCPtrPC <= UINT32_MAX);
1419 }
1420
1421 RTGCPHYS GCPhys;
1422 uint64_t fFlags;
1423 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1424 if (RT_SUCCESS(rc)) { /* probable */ }
1425 else
1426 {
1427 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1428 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1429 }
1430 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1431 else
1432 {
1433 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1434 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1435 }
1436 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1437 else
1438 {
1439 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1440 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1441 }
1442 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1443 /** @todo Check reserved bits and such stuff. PGM is better at doing
1444 * that, so do it when implementing the guest virtual address
1445 * TLB... */
1446
1447 /*
1448 * Read the bytes at this address.
1449 */
1450 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1451 if (cbToTryRead > cbLeftOnPage)
1452 cbToTryRead = cbLeftOnPage;
1453 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1454 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1455
1456 if (!pVCpu->iem.s.fBypassHandlers)
1457 {
1458 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1459 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1460 { /* likely */ }
1461 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1462 {
1463 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1464 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1465 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1466 }
1467 else
1468 {
1469 Log((RT_SUCCESS(rcStrict)
1470 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1471 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1472 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1473 return rcStrict;
1474 }
1475 }
1476 else
1477 {
1478 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1479 if (RT_SUCCESS(rc))
1480 { /* likely */ }
1481 else
1482 {
1483 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1484 GCPtrPC, GCPhys, rc, cbToTryRead));
1485 return rc;
1486 }
1487 }
1488 pVCpu->iem.s.cbOpcode = cbToTryRead;
1489#endif /* !IEM_WITH_CODE_TLB */
1490 return VINF_SUCCESS;
1491}
1492
1493
1494/**
1495 * Invalidates the IEM TLBs.
1496 *
1497 * This is called internally as well as by PGM when moving GC mappings.
1498 *
1499 * @returns
1500 * @param pVCpu The cross context virtual CPU structure of the calling
1501 * thread.
1502 * @param fVmm Set when PGM calls us with a remapping.
1503 */
1504VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
1505{
1506#ifdef IEM_WITH_CODE_TLB
1507 pVCpu->iem.s.cbInstrBufTotal = 0;
1508 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1509 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1510 { /* very likely */ }
1511 else
1512 {
1513 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1514 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1515 while (i-- > 0)
1516 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1517 }
1518#endif
1519
1520#ifdef IEM_WITH_DATA_TLB
1521 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1522 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1523 { /* very likely */ }
1524 else
1525 {
1526 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1527 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1528 while (i-- > 0)
1529 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1530 }
1531#endif
1532 NOREF(pVCpu); NOREF(fVmm);
1533}
1534
1535
1536/**
1537 * Invalidates a page in the TLBs.
1538 *
1539 * @param pVCpu The cross context virtual CPU structure of the calling
1540 * thread.
1541 * @param GCPtr The address of the page to invalidate
1542 */
1543VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1544{
1545#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1546 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1547 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1548 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1549 uintptr_t idx = (uint8_t)GCPtr;
1550
1551# ifdef IEM_WITH_CODE_TLB
1552 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1553 {
1554 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1555 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1556 pVCpu->iem.s.cbInstrBufTotal = 0;
1557 }
1558# endif
1559
1560# ifdef IEM_WITH_DATA_TLB
1561 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1562 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1563# endif
1564#else
1565 NOREF(pVCpu); NOREF(GCPtr);
1566#endif
1567}
1568
1569
1570/**
1571 * Invalidates the host physical aspects of the IEM TLBs.
1572 *
1573 * This is called internally as well as by PGM when moving GC mappings.
1574 *
1575 * @param pVCpu The cross context virtual CPU structure of the calling
1576 * thread.
1577 */
1578VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1579{
1580#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1581 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1582
1583# ifdef IEM_WITH_CODE_TLB
1584 pVCpu->iem.s.cbInstrBufTotal = 0;
1585# endif
1586 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1587 if (uTlbPhysRev != 0)
1588 {
1589 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1590 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1591 }
1592 else
1593 {
1594 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1595 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1596
1597 unsigned i;
1598# ifdef IEM_WITH_CODE_TLB
1599 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1600 while (i-- > 0)
1601 {
1602 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1603 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1604 }
1605# endif
1606# ifdef IEM_WITH_DATA_TLB
1607 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1608 while (i-- > 0)
1609 {
1610 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1611 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1612 }
1613# endif
1614 }
1615#else
1616 NOREF(pVCpu);
1617#endif
1618}
1619
1620
1621/**
1622 * Invalidates the host physical aspects of the IEM TLBs.
1623 *
1624 * This is called internally as well as by PGM when moving GC mappings.
1625 *
1626 * @param pVM The cross context VM structure.
1627 *
1628 * @remarks Caller holds the PGM lock.
1629 */
1630VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1631{
1632 RT_NOREF_PV(pVM);
1633}
1634
1635#ifdef IEM_WITH_CODE_TLB
1636
1637/**
1638 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1639 * failure and jumps.
1640 *
1641 * We end up here for a number of reasons:
1642 * - pbInstrBuf isn't yet initialized.
1643 * - Advancing beyond the buffer boundrary (e.g. cross page).
1644 * - Advancing beyond the CS segment limit.
1645 * - Fetching from non-mappable page (e.g. MMIO).
1646 *
1647 * @param pVCpu The cross context virtual CPU structure of the
1648 * calling thread.
1649 * @param pvDst Where to return the bytes.
1650 * @param cbDst Number of bytes to read.
1651 *
1652 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1653 */
1654IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
1655{
1656#ifdef IN_RING3
1657 for (;;)
1658 {
1659 Assert(cbDst <= 8);
1660 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1661
1662 /*
1663 * We might have a partial buffer match, deal with that first to make the
1664 * rest simpler. This is the first part of the cross page/buffer case.
1665 */
1666 if (pVCpu->iem.s.pbInstrBuf != NULL)
1667 {
1668 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1669 {
1670 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1671 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1672 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1673
1674 cbDst -= cbCopy;
1675 pvDst = (uint8_t *)pvDst + cbCopy;
1676 offBuf += cbCopy;
1677 pVCpu->iem.s.offInstrNextByte += offBuf;
1678 }
1679 }
1680
1681 /*
1682 * Check segment limit, figuring how much we're allowed to access at this point.
1683 *
1684 * We will fault immediately if RIP is past the segment limit / in non-canonical
1685 * territory. If we do continue, there are one or more bytes to read before we
1686 * end up in trouble and we need to do that first before faulting.
1687 */
1688 RTGCPTR GCPtrFirst;
1689 uint32_t cbMaxRead;
1690 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1691 {
1692 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1693 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1694 { /* likely */ }
1695 else
1696 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1697 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1698 }
1699 else
1700 {
1701 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1702 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1703 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1704 { /* likely */ }
1705 else
1706 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1707 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1708 if (cbMaxRead != 0)
1709 { /* likely */ }
1710 else
1711 {
1712 /* Overflowed because address is 0 and limit is max. */
1713 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1714 cbMaxRead = X86_PAGE_SIZE;
1715 }
1716 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1717 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1718 if (cbMaxRead2 < cbMaxRead)
1719 cbMaxRead = cbMaxRead2;
1720 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1721 }
1722
1723 /*
1724 * Get the TLB entry for this piece of code.
1725 */
1726 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1727 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1728 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1729 if (pTlbe->uTag == uTag)
1730 {
1731 /* likely when executing lots of code, otherwise unlikely */
1732# ifdef VBOX_WITH_STATISTICS
1733 pVCpu->iem.s.CodeTlb.cTlbHits++;
1734# endif
1735 }
1736 else
1737 {
1738 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1739 RTGCPHYS GCPhys;
1740 uint64_t fFlags;
1741 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1742 if (RT_FAILURE(rc))
1743 {
1744 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1745 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1746 }
1747
1748 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1749 pTlbe->uTag = uTag;
1750 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1751 pTlbe->GCPhys = GCPhys;
1752 pTlbe->pbMappingR3 = NULL;
1753 }
1754
1755 /*
1756 * Check TLB page table level access flags.
1757 */
1758 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1759 {
1760 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1761 {
1762 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1763 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1764 }
1765 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1766 {
1767 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1768 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1769 }
1770 }
1771
1772 /*
1773 * Look up the physical page info if necessary.
1774 */
1775 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1776 { /* not necessary */ }
1777 else
1778 {
1779 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1780 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1781 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1782 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1783 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1784 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1785 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1786 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1787 }
1788
1789# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1790 /*
1791 * Try do a direct read using the pbMappingR3 pointer.
1792 */
1793 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1794 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1795 {
1796 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1797 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1798 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1799 {
1800 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1801 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1802 }
1803 else
1804 {
1805 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1806 Assert(cbInstr < cbMaxRead);
1807 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1808 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1809 }
1810 if (cbDst <= cbMaxRead)
1811 {
1812 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1813 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1814 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1815 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1816 return;
1817 }
1818 pVCpu->iem.s.pbInstrBuf = NULL;
1819
1820 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1821 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1822 }
1823 else
1824# endif
1825#if 0
1826 /*
1827 * If there is no special read handling, so we can read a bit more and
1828 * put it in the prefetch buffer.
1829 */
1830 if ( cbDst < cbMaxRead
1831 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1832 {
1833 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1834 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1835 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1836 { /* likely */ }
1837 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1838 {
1839 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1840 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1841 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1842 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1843 }
1844 else
1845 {
1846 Log((RT_SUCCESS(rcStrict)
1847 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1848 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1849 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1850 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1851 }
1852 }
1853 /*
1854 * Special read handling, so only read exactly what's needed.
1855 * This is a highly unlikely scenario.
1856 */
1857 else
1858#endif
1859 {
1860 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1861 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1862 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1863 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1864 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1865 { /* likely */ }
1866 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1867 {
1868 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1869 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1870 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1871 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1872 }
1873 else
1874 {
1875 Log((RT_SUCCESS(rcStrict)
1876 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1877 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1878 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1879 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1880 }
1881 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1882 if (cbToRead == cbDst)
1883 return;
1884 }
1885
1886 /*
1887 * More to read, loop.
1888 */
1889 cbDst -= cbMaxRead;
1890 pvDst = (uint8_t *)pvDst + cbMaxRead;
1891 }
1892#else
1893 RT_NOREF(pvDst, cbDst);
1894 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1895#endif
1896}
1897
1898#else
1899
1900/**
1901 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1902 * exception if it fails.
1903 *
1904 * @returns Strict VBox status code.
1905 * @param pVCpu The cross context virtual CPU structure of the
1906 * calling thread.
1907 * @param cbMin The minimum number of bytes relative offOpcode
1908 * that must be read.
1909 */
1910IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
1911{
1912 /*
1913 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1914 *
1915 * First translate CS:rIP to a physical address.
1916 */
1917 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1918 uint32_t cbToTryRead;
1919 RTGCPTR GCPtrNext;
1920 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1921 {
1922 cbToTryRead = PAGE_SIZE;
1923 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1924 if (!IEM_IS_CANONICAL(GCPtrNext))
1925 return iemRaiseGeneralProtectionFault0(pVCpu);
1926 }
1927 else
1928 {
1929 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1930 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1931 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1932 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1933 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1934 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1935 if (!cbToTryRead) /* overflowed */
1936 {
1937 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1938 cbToTryRead = UINT32_MAX;
1939 /** @todo check out wrapping around the code segment. */
1940 }
1941 if (cbToTryRead < cbMin - cbLeft)
1942 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1943 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1944 }
1945
1946 /* Only read up to the end of the page, and make sure we don't read more
1947 than the opcode buffer can hold. */
1948 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1949 if (cbToTryRead > cbLeftOnPage)
1950 cbToTryRead = cbLeftOnPage;
1951 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1952 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1953/** @todo r=bird: Convert assertion into undefined opcode exception? */
1954 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1955
1956 RTGCPHYS GCPhys;
1957 uint64_t fFlags;
1958 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1959 if (RT_FAILURE(rc))
1960 {
1961 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1962 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1963 }
1964 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1965 {
1966 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1967 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1968 }
1969 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1970 {
1971 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1972 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1973 }
1974 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1975 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1976 /** @todo Check reserved bits and such stuff. PGM is better at doing
1977 * that, so do it when implementing the guest virtual address
1978 * TLB... */
1979
1980 /*
1981 * Read the bytes at this address.
1982 *
1983 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1984 * and since PATM should only patch the start of an instruction there
1985 * should be no need to check again here.
1986 */
1987 if (!pVCpu->iem.s.fBypassHandlers)
1988 {
1989 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1990 cbToTryRead, PGMACCESSORIGIN_IEM);
1991 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1992 { /* likely */ }
1993 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1994 {
1995 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1996 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1997 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1998 }
1999 else
2000 {
2001 Log((RT_SUCCESS(rcStrict)
2002 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2003 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2004 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2005 return rcStrict;
2006 }
2007 }
2008 else
2009 {
2010 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2011 if (RT_SUCCESS(rc))
2012 { /* likely */ }
2013 else
2014 {
2015 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2016 return rc;
2017 }
2018 }
2019 pVCpu->iem.s.cbOpcode += cbToTryRead;
2020 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2021
2022 return VINF_SUCCESS;
2023}
2024
2025#endif /* !IEM_WITH_CODE_TLB */
2026#ifndef IEM_WITH_SETJMP
2027
2028/**
2029 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2030 *
2031 * @returns Strict VBox status code.
2032 * @param pVCpu The cross context virtual CPU structure of the
2033 * calling thread.
2034 * @param pb Where to return the opcode byte.
2035 */
2036DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2037{
2038 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2039 if (rcStrict == VINF_SUCCESS)
2040 {
2041 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2042 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2043 pVCpu->iem.s.offOpcode = offOpcode + 1;
2044 }
2045 else
2046 *pb = 0;
2047 return rcStrict;
2048}
2049
2050
2051/**
2052 * Fetches the next opcode byte.
2053 *
2054 * @returns Strict VBox status code.
2055 * @param pVCpu The cross context virtual CPU structure of the
2056 * calling thread.
2057 * @param pu8 Where to return the opcode byte.
2058 */
2059DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2060{
2061 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2062 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2063 {
2064 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2065 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2066 return VINF_SUCCESS;
2067 }
2068 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2069}
2070
2071#else /* IEM_WITH_SETJMP */
2072
2073/**
2074 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2075 *
2076 * @returns The opcode byte.
2077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2078 */
2079DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2080{
2081# ifdef IEM_WITH_CODE_TLB
2082 uint8_t u8;
2083 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2084 return u8;
2085# else
2086 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2087 if (rcStrict == VINF_SUCCESS)
2088 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2089 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2090# endif
2091}
2092
2093
2094/**
2095 * Fetches the next opcode byte, longjmp on error.
2096 *
2097 * @returns The opcode byte.
2098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2099 */
2100DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2101{
2102# ifdef IEM_WITH_CODE_TLB
2103 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2104 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2105 if (RT_LIKELY( pbBuf != NULL
2106 && offBuf < pVCpu->iem.s.cbInstrBuf))
2107 {
2108 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2109 return pbBuf[offBuf];
2110 }
2111# else
2112 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2113 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2114 {
2115 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2116 return pVCpu->iem.s.abOpcode[offOpcode];
2117 }
2118# endif
2119 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2120}
2121
2122#endif /* IEM_WITH_SETJMP */
2123
2124/**
2125 * Fetches the next opcode byte, returns automatically on failure.
2126 *
2127 * @param a_pu8 Where to return the opcode byte.
2128 * @remark Implicitly references pVCpu.
2129 */
2130#ifndef IEM_WITH_SETJMP
2131# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2132 do \
2133 { \
2134 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2135 if (rcStrict2 == VINF_SUCCESS) \
2136 { /* likely */ } \
2137 else \
2138 return rcStrict2; \
2139 } while (0)
2140#else
2141# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2142#endif /* IEM_WITH_SETJMP */
2143
2144
2145#ifndef IEM_WITH_SETJMP
2146/**
2147 * Fetches the next signed byte from the opcode stream.
2148 *
2149 * @returns Strict VBox status code.
2150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2151 * @param pi8 Where to return the signed byte.
2152 */
2153DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2154{
2155 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2156}
2157#endif /* !IEM_WITH_SETJMP */
2158
2159
2160/**
2161 * Fetches the next signed byte from the opcode stream, returning automatically
2162 * on failure.
2163 *
2164 * @param a_pi8 Where to return the signed byte.
2165 * @remark Implicitly references pVCpu.
2166 */
2167#ifndef IEM_WITH_SETJMP
2168# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2169 do \
2170 { \
2171 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2172 if (rcStrict2 != VINF_SUCCESS) \
2173 return rcStrict2; \
2174 } while (0)
2175#else /* IEM_WITH_SETJMP */
2176# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2177
2178#endif /* IEM_WITH_SETJMP */
2179
2180#ifndef IEM_WITH_SETJMP
2181
2182/**
2183 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2184 *
2185 * @returns Strict VBox status code.
2186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2187 * @param pu16 Where to return the opcode dword.
2188 */
2189DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2190{
2191 uint8_t u8;
2192 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2193 if (rcStrict == VINF_SUCCESS)
2194 *pu16 = (int8_t)u8;
2195 return rcStrict;
2196}
2197
2198
2199/**
2200 * Fetches the next signed byte from the opcode stream, extending it to
2201 * unsigned 16-bit.
2202 *
2203 * @returns Strict VBox status code.
2204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2205 * @param pu16 Where to return the unsigned word.
2206 */
2207DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2208{
2209 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2210 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2211 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2212
2213 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2214 pVCpu->iem.s.offOpcode = offOpcode + 1;
2215 return VINF_SUCCESS;
2216}
2217
2218#endif /* !IEM_WITH_SETJMP */
2219
2220/**
2221 * Fetches the next signed byte from the opcode stream and sign-extending it to
2222 * a word, returning automatically on failure.
2223 *
2224 * @param a_pu16 Where to return the word.
2225 * @remark Implicitly references pVCpu.
2226 */
2227#ifndef IEM_WITH_SETJMP
2228# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2229 do \
2230 { \
2231 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2232 if (rcStrict2 != VINF_SUCCESS) \
2233 return rcStrict2; \
2234 } while (0)
2235#else
2236# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2237#endif
2238
2239#ifndef IEM_WITH_SETJMP
2240
2241/**
2242 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2243 *
2244 * @returns Strict VBox status code.
2245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2246 * @param pu32 Where to return the opcode dword.
2247 */
2248DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2249{
2250 uint8_t u8;
2251 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2252 if (rcStrict == VINF_SUCCESS)
2253 *pu32 = (int8_t)u8;
2254 return rcStrict;
2255}
2256
2257
2258/**
2259 * Fetches the next signed byte from the opcode stream, extending it to
2260 * unsigned 32-bit.
2261 *
2262 * @returns Strict VBox status code.
2263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2264 * @param pu32 Where to return the unsigned dword.
2265 */
2266DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2267{
2268 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2269 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2270 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2271
2272 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2273 pVCpu->iem.s.offOpcode = offOpcode + 1;
2274 return VINF_SUCCESS;
2275}
2276
2277#endif /* !IEM_WITH_SETJMP */
2278
2279/**
2280 * Fetches the next signed byte from the opcode stream and sign-extending it to
2281 * a word, returning automatically on failure.
2282 *
2283 * @param a_pu32 Where to return the word.
2284 * @remark Implicitly references pVCpu.
2285 */
2286#ifndef IEM_WITH_SETJMP
2287#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2288 do \
2289 { \
2290 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2291 if (rcStrict2 != VINF_SUCCESS) \
2292 return rcStrict2; \
2293 } while (0)
2294#else
2295# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2296#endif
2297
2298#ifndef IEM_WITH_SETJMP
2299
2300/**
2301 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2302 *
2303 * @returns Strict VBox status code.
2304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2305 * @param pu64 Where to return the opcode qword.
2306 */
2307DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2308{
2309 uint8_t u8;
2310 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2311 if (rcStrict == VINF_SUCCESS)
2312 *pu64 = (int8_t)u8;
2313 return rcStrict;
2314}
2315
2316
2317/**
2318 * Fetches the next signed byte from the opcode stream, extending it to
2319 * unsigned 64-bit.
2320 *
2321 * @returns Strict VBox status code.
2322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2323 * @param pu64 Where to return the unsigned qword.
2324 */
2325DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2326{
2327 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2328 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2329 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2330
2331 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2332 pVCpu->iem.s.offOpcode = offOpcode + 1;
2333 return VINF_SUCCESS;
2334}
2335
2336#endif /* !IEM_WITH_SETJMP */
2337
2338
2339/**
2340 * Fetches the next signed byte from the opcode stream and sign-extending it to
2341 * a word, returning automatically on failure.
2342 *
2343 * @param a_pu64 Where to return the word.
2344 * @remark Implicitly references pVCpu.
2345 */
2346#ifndef IEM_WITH_SETJMP
2347# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2348 do \
2349 { \
2350 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2351 if (rcStrict2 != VINF_SUCCESS) \
2352 return rcStrict2; \
2353 } while (0)
2354#else
2355# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2356#endif
2357
2358
2359#ifndef IEM_WITH_SETJMP
2360/**
2361 * Fetches the next opcode byte.
2362 *
2363 * @returns Strict VBox status code.
2364 * @param pVCpu The cross context virtual CPU structure of the
2365 * calling thread.
2366 * @param pu8 Where to return the opcode byte.
2367 */
2368DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2369{
2370 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2371 pVCpu->iem.s.offModRm = offOpcode;
2372 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2373 {
2374 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2375 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2376 return VINF_SUCCESS;
2377 }
2378 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2379}
2380#else /* IEM_WITH_SETJMP */
2381/**
2382 * Fetches the next opcode byte, longjmp on error.
2383 *
2384 * @returns The opcode byte.
2385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2386 */
2387DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2388{
2389# ifdef IEM_WITH_CODE_TLB
2390 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2391 pVCpu->iem.s.offModRm = offBuf;
2392 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2393 if (RT_LIKELY( pbBuf != NULL
2394 && offBuf < pVCpu->iem.s.cbInstrBuf))
2395 {
2396 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2397 return pbBuf[offBuf];
2398 }
2399# else
2400 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2401 pVCpu->iem.s.offModRm = offOpcode;
2402 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2403 {
2404 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2405 return pVCpu->iem.s.abOpcode[offOpcode];
2406 }
2407# endif
2408 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2409}
2410#endif /* IEM_WITH_SETJMP */
2411
2412/**
2413 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2414 * on failure.
2415 *
2416 * Will note down the position of the ModR/M byte for VT-x exits.
2417 *
2418 * @param a_pbRm Where to return the RM opcode byte.
2419 * @remark Implicitly references pVCpu.
2420 */
2421#ifndef IEM_WITH_SETJMP
2422# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2423 do \
2424 { \
2425 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2426 if (rcStrict2 == VINF_SUCCESS) \
2427 { /* likely */ } \
2428 else \
2429 return rcStrict2; \
2430 } while (0)
2431#else
2432# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2433#endif /* IEM_WITH_SETJMP */
2434
2435
2436#ifndef IEM_WITH_SETJMP
2437
2438/**
2439 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2440 *
2441 * @returns Strict VBox status code.
2442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2443 * @param pu16 Where to return the opcode word.
2444 */
2445DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2446{
2447 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2448 if (rcStrict == VINF_SUCCESS)
2449 {
2450 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2451# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2452 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2453# else
2454 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2455# endif
2456 pVCpu->iem.s.offOpcode = offOpcode + 2;
2457 }
2458 else
2459 *pu16 = 0;
2460 return rcStrict;
2461}
2462
2463
2464/**
2465 * Fetches the next opcode word.
2466 *
2467 * @returns Strict VBox status code.
2468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2469 * @param pu16 Where to return the opcode word.
2470 */
2471DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
2472{
2473 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2474 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2475 {
2476 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2477# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2478 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2479# else
2480 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2481# endif
2482 return VINF_SUCCESS;
2483 }
2484 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2485}
2486
2487#else /* IEM_WITH_SETJMP */
2488
2489/**
2490 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2491 *
2492 * @returns The opcode word.
2493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2494 */
2495DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
2496{
2497# ifdef IEM_WITH_CODE_TLB
2498 uint16_t u16;
2499 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2500 return u16;
2501# else
2502 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2503 if (rcStrict == VINF_SUCCESS)
2504 {
2505 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2506 pVCpu->iem.s.offOpcode += 2;
2507# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2508 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2509# else
2510 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2511# endif
2512 }
2513 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2514# endif
2515}
2516
2517
2518/**
2519 * Fetches the next opcode word, longjmp on error.
2520 *
2521 * @returns The opcode word.
2522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2523 */
2524DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
2525{
2526# ifdef IEM_WITH_CODE_TLB
2527 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2528 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2529 if (RT_LIKELY( pbBuf != NULL
2530 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2531 {
2532 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2533# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2534 return *(uint16_t const *)&pbBuf[offBuf];
2535# else
2536 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2537# endif
2538 }
2539# else
2540 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2541 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2542 {
2543 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2544# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2545 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2546# else
2547 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2548# endif
2549 }
2550# endif
2551 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2552}
2553
2554#endif /* IEM_WITH_SETJMP */
2555
2556
2557/**
2558 * Fetches the next opcode word, returns automatically on failure.
2559 *
2560 * @param a_pu16 Where to return the opcode word.
2561 * @remark Implicitly references pVCpu.
2562 */
2563#ifndef IEM_WITH_SETJMP
2564# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2565 do \
2566 { \
2567 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2568 if (rcStrict2 != VINF_SUCCESS) \
2569 return rcStrict2; \
2570 } while (0)
2571#else
2572# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2573#endif
2574
2575#ifndef IEM_WITH_SETJMP
2576
2577/**
2578 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2579 *
2580 * @returns Strict VBox status code.
2581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2582 * @param pu32 Where to return the opcode double word.
2583 */
2584DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2585{
2586 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2587 if (rcStrict == VINF_SUCCESS)
2588 {
2589 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2590 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2591 pVCpu->iem.s.offOpcode = offOpcode + 2;
2592 }
2593 else
2594 *pu32 = 0;
2595 return rcStrict;
2596}
2597
2598
2599/**
2600 * Fetches the next opcode word, zero extending it to a double word.
2601 *
2602 * @returns Strict VBox status code.
2603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2604 * @param pu32 Where to return the opcode double word.
2605 */
2606DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2607{
2608 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2609 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2610 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2611
2612 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2613 pVCpu->iem.s.offOpcode = offOpcode + 2;
2614 return VINF_SUCCESS;
2615}
2616
2617#endif /* !IEM_WITH_SETJMP */
2618
2619
2620/**
2621 * Fetches the next opcode word and zero extends it to a double word, returns
2622 * automatically on failure.
2623 *
2624 * @param a_pu32 Where to return the opcode double word.
2625 * @remark Implicitly references pVCpu.
2626 */
2627#ifndef IEM_WITH_SETJMP
2628# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2629 do \
2630 { \
2631 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2632 if (rcStrict2 != VINF_SUCCESS) \
2633 return rcStrict2; \
2634 } while (0)
2635#else
2636# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2637#endif
2638
2639#ifndef IEM_WITH_SETJMP
2640
2641/**
2642 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2643 *
2644 * @returns Strict VBox status code.
2645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2646 * @param pu64 Where to return the opcode quad word.
2647 */
2648DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2649{
2650 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2651 if (rcStrict == VINF_SUCCESS)
2652 {
2653 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2654 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2655 pVCpu->iem.s.offOpcode = offOpcode + 2;
2656 }
2657 else
2658 *pu64 = 0;
2659 return rcStrict;
2660}
2661
2662
2663/**
2664 * Fetches the next opcode word, zero extending it to a quad word.
2665 *
2666 * @returns Strict VBox status code.
2667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2668 * @param pu64 Where to return the opcode quad word.
2669 */
2670DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2671{
2672 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2673 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2674 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2675
2676 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2677 pVCpu->iem.s.offOpcode = offOpcode + 2;
2678 return VINF_SUCCESS;
2679}
2680
2681#endif /* !IEM_WITH_SETJMP */
2682
2683/**
2684 * Fetches the next opcode word and zero extends it to a quad word, returns
2685 * automatically on failure.
2686 *
2687 * @param a_pu64 Where to return the opcode quad word.
2688 * @remark Implicitly references pVCpu.
2689 */
2690#ifndef IEM_WITH_SETJMP
2691# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2692 do \
2693 { \
2694 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2695 if (rcStrict2 != VINF_SUCCESS) \
2696 return rcStrict2; \
2697 } while (0)
2698#else
2699# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2700#endif
2701
2702
2703#ifndef IEM_WITH_SETJMP
2704/**
2705 * Fetches the next signed word from the opcode stream.
2706 *
2707 * @returns Strict VBox status code.
2708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2709 * @param pi16 Where to return the signed word.
2710 */
2711DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
2712{
2713 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2714}
2715#endif /* !IEM_WITH_SETJMP */
2716
2717
2718/**
2719 * Fetches the next signed word from the opcode stream, returning automatically
2720 * on failure.
2721 *
2722 * @param a_pi16 Where to return the signed word.
2723 * @remark Implicitly references pVCpu.
2724 */
2725#ifndef IEM_WITH_SETJMP
2726# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2727 do \
2728 { \
2729 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2730 if (rcStrict2 != VINF_SUCCESS) \
2731 return rcStrict2; \
2732 } while (0)
2733#else
2734# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2735#endif
2736
2737#ifndef IEM_WITH_SETJMP
2738
2739/**
2740 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2741 *
2742 * @returns Strict VBox status code.
2743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2744 * @param pu32 Where to return the opcode dword.
2745 */
2746DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2747{
2748 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2749 if (rcStrict == VINF_SUCCESS)
2750 {
2751 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2752# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2753 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2754# else
2755 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2756 pVCpu->iem.s.abOpcode[offOpcode + 1],
2757 pVCpu->iem.s.abOpcode[offOpcode + 2],
2758 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2759# endif
2760 pVCpu->iem.s.offOpcode = offOpcode + 4;
2761 }
2762 else
2763 *pu32 = 0;
2764 return rcStrict;
2765}
2766
2767
2768/**
2769 * Fetches the next opcode dword.
2770 *
2771 * @returns Strict VBox status code.
2772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2773 * @param pu32 Where to return the opcode double word.
2774 */
2775DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
2776{
2777 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2778 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2779 {
2780 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2781# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2782 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2783# else
2784 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2785 pVCpu->iem.s.abOpcode[offOpcode + 1],
2786 pVCpu->iem.s.abOpcode[offOpcode + 2],
2787 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2788# endif
2789 return VINF_SUCCESS;
2790 }
2791 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2792}
2793
2794#else /* !IEM_WITH_SETJMP */
2795
2796/**
2797 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2798 *
2799 * @returns The opcode dword.
2800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2801 */
2802DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
2803{
2804# ifdef IEM_WITH_CODE_TLB
2805 uint32_t u32;
2806 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2807 return u32;
2808# else
2809 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2810 if (rcStrict == VINF_SUCCESS)
2811 {
2812 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2813 pVCpu->iem.s.offOpcode = offOpcode + 4;
2814# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2815 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2816# else
2817 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2818 pVCpu->iem.s.abOpcode[offOpcode + 1],
2819 pVCpu->iem.s.abOpcode[offOpcode + 2],
2820 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2821# endif
2822 }
2823 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2824# endif
2825}
2826
2827
2828/**
2829 * Fetches the next opcode dword, longjmp on error.
2830 *
2831 * @returns The opcode dword.
2832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2833 */
2834DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
2835{
2836# ifdef IEM_WITH_CODE_TLB
2837 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2838 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2839 if (RT_LIKELY( pbBuf != NULL
2840 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2841 {
2842 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2843# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2844 return *(uint32_t const *)&pbBuf[offBuf];
2845# else
2846 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2847 pbBuf[offBuf + 1],
2848 pbBuf[offBuf + 2],
2849 pbBuf[offBuf + 3]);
2850# endif
2851 }
2852# else
2853 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2854 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2855 {
2856 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2857# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2858 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2859# else
2860 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2861 pVCpu->iem.s.abOpcode[offOpcode + 1],
2862 pVCpu->iem.s.abOpcode[offOpcode + 2],
2863 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2864# endif
2865 }
2866# endif
2867 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2868}
2869
2870#endif /* !IEM_WITH_SETJMP */
2871
2872
2873/**
2874 * Fetches the next opcode dword, returns automatically on failure.
2875 *
2876 * @param a_pu32 Where to return the opcode dword.
2877 * @remark Implicitly references pVCpu.
2878 */
2879#ifndef IEM_WITH_SETJMP
2880# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2881 do \
2882 { \
2883 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2884 if (rcStrict2 != VINF_SUCCESS) \
2885 return rcStrict2; \
2886 } while (0)
2887#else
2888# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2889#endif
2890
2891#ifndef IEM_WITH_SETJMP
2892
2893/**
2894 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2895 *
2896 * @returns Strict VBox status code.
2897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2898 * @param pu64 Where to return the opcode dword.
2899 */
2900DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2901{
2902 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2903 if (rcStrict == VINF_SUCCESS)
2904 {
2905 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2906 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2907 pVCpu->iem.s.abOpcode[offOpcode + 1],
2908 pVCpu->iem.s.abOpcode[offOpcode + 2],
2909 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2910 pVCpu->iem.s.offOpcode = offOpcode + 4;
2911 }
2912 else
2913 *pu64 = 0;
2914 return rcStrict;
2915}
2916
2917
2918/**
2919 * Fetches the next opcode dword, zero extending it to a quad word.
2920 *
2921 * @returns Strict VBox status code.
2922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2923 * @param pu64 Where to return the opcode quad word.
2924 */
2925DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2926{
2927 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2928 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2929 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2930
2931 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2932 pVCpu->iem.s.abOpcode[offOpcode + 1],
2933 pVCpu->iem.s.abOpcode[offOpcode + 2],
2934 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2935 pVCpu->iem.s.offOpcode = offOpcode + 4;
2936 return VINF_SUCCESS;
2937}
2938
2939#endif /* !IEM_WITH_SETJMP */
2940
2941
2942/**
2943 * Fetches the next opcode dword and zero extends it to a quad word, returns
2944 * automatically on failure.
2945 *
2946 * @param a_pu64 Where to return the opcode quad word.
2947 * @remark Implicitly references pVCpu.
2948 */
2949#ifndef IEM_WITH_SETJMP
2950# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2951 do \
2952 { \
2953 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2954 if (rcStrict2 != VINF_SUCCESS) \
2955 return rcStrict2; \
2956 } while (0)
2957#else
2958# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2959#endif
2960
2961
2962#ifndef IEM_WITH_SETJMP
2963/**
2964 * Fetches the next signed double word from the opcode stream.
2965 *
2966 * @returns Strict VBox status code.
2967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2968 * @param pi32 Where to return the signed double word.
2969 */
2970DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
2971{
2972 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2973}
2974#endif
2975
2976/**
2977 * Fetches the next signed double word from the opcode stream, returning
2978 * automatically on failure.
2979 *
2980 * @param a_pi32 Where to return the signed double word.
2981 * @remark Implicitly references pVCpu.
2982 */
2983#ifndef IEM_WITH_SETJMP
2984# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2985 do \
2986 { \
2987 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2988 if (rcStrict2 != VINF_SUCCESS) \
2989 return rcStrict2; \
2990 } while (0)
2991#else
2992# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2993#endif
2994
2995#ifndef IEM_WITH_SETJMP
2996
2997/**
2998 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2999 *
3000 * @returns Strict VBox status code.
3001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3002 * @param pu64 Where to return the opcode qword.
3003 */
3004DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3005{
3006 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3007 if (rcStrict == VINF_SUCCESS)
3008 {
3009 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3010 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3011 pVCpu->iem.s.abOpcode[offOpcode + 1],
3012 pVCpu->iem.s.abOpcode[offOpcode + 2],
3013 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3014 pVCpu->iem.s.offOpcode = offOpcode + 4;
3015 }
3016 else
3017 *pu64 = 0;
3018 return rcStrict;
3019}
3020
3021
3022/**
3023 * Fetches the next opcode dword, sign extending it into a quad word.
3024 *
3025 * @returns Strict VBox status code.
3026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3027 * @param pu64 Where to return the opcode quad word.
3028 */
3029DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3030{
3031 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3032 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3033 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3034
3035 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3036 pVCpu->iem.s.abOpcode[offOpcode + 1],
3037 pVCpu->iem.s.abOpcode[offOpcode + 2],
3038 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3039 *pu64 = i32;
3040 pVCpu->iem.s.offOpcode = offOpcode + 4;
3041 return VINF_SUCCESS;
3042}
3043
3044#endif /* !IEM_WITH_SETJMP */
3045
3046
3047/**
3048 * Fetches the next opcode double word and sign extends it to a quad word,
3049 * returns automatically on failure.
3050 *
3051 * @param a_pu64 Where to return the opcode quad word.
3052 * @remark Implicitly references pVCpu.
3053 */
3054#ifndef IEM_WITH_SETJMP
3055# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3056 do \
3057 { \
3058 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3059 if (rcStrict2 != VINF_SUCCESS) \
3060 return rcStrict2; \
3061 } while (0)
3062#else
3063# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3064#endif
3065
3066#ifndef IEM_WITH_SETJMP
3067
3068/**
3069 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3070 *
3071 * @returns Strict VBox status code.
3072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3073 * @param pu64 Where to return the opcode qword.
3074 */
3075DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3076{
3077 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3078 if (rcStrict == VINF_SUCCESS)
3079 {
3080 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3081# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3082 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3083# else
3084 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3085 pVCpu->iem.s.abOpcode[offOpcode + 1],
3086 pVCpu->iem.s.abOpcode[offOpcode + 2],
3087 pVCpu->iem.s.abOpcode[offOpcode + 3],
3088 pVCpu->iem.s.abOpcode[offOpcode + 4],
3089 pVCpu->iem.s.abOpcode[offOpcode + 5],
3090 pVCpu->iem.s.abOpcode[offOpcode + 6],
3091 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3092# endif
3093 pVCpu->iem.s.offOpcode = offOpcode + 8;
3094 }
3095 else
3096 *pu64 = 0;
3097 return rcStrict;
3098}
3099
3100
3101/**
3102 * Fetches the next opcode qword.
3103 *
3104 * @returns Strict VBox status code.
3105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3106 * @param pu64 Where to return the opcode qword.
3107 */
3108DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3109{
3110 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3111 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3112 {
3113# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3114 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3115# else
3116 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3117 pVCpu->iem.s.abOpcode[offOpcode + 1],
3118 pVCpu->iem.s.abOpcode[offOpcode + 2],
3119 pVCpu->iem.s.abOpcode[offOpcode + 3],
3120 pVCpu->iem.s.abOpcode[offOpcode + 4],
3121 pVCpu->iem.s.abOpcode[offOpcode + 5],
3122 pVCpu->iem.s.abOpcode[offOpcode + 6],
3123 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3124# endif
3125 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3126 return VINF_SUCCESS;
3127 }
3128 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3129}
3130
3131#else /* IEM_WITH_SETJMP */
3132
3133/**
3134 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3135 *
3136 * @returns The opcode qword.
3137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3138 */
3139DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3140{
3141# ifdef IEM_WITH_CODE_TLB
3142 uint64_t u64;
3143 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3144 return u64;
3145# else
3146 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3147 if (rcStrict == VINF_SUCCESS)
3148 {
3149 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3150 pVCpu->iem.s.offOpcode = offOpcode + 8;
3151# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3152 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3153# else
3154 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3155 pVCpu->iem.s.abOpcode[offOpcode + 1],
3156 pVCpu->iem.s.abOpcode[offOpcode + 2],
3157 pVCpu->iem.s.abOpcode[offOpcode + 3],
3158 pVCpu->iem.s.abOpcode[offOpcode + 4],
3159 pVCpu->iem.s.abOpcode[offOpcode + 5],
3160 pVCpu->iem.s.abOpcode[offOpcode + 6],
3161 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3162# endif
3163 }
3164 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3165# endif
3166}
3167
3168
3169/**
3170 * Fetches the next opcode qword, longjmp on error.
3171 *
3172 * @returns The opcode qword.
3173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3174 */
3175DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3176{
3177# ifdef IEM_WITH_CODE_TLB
3178 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3179 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3180 if (RT_LIKELY( pbBuf != NULL
3181 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3182 {
3183 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3184# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3185 return *(uint64_t const *)&pbBuf[offBuf];
3186# else
3187 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3188 pbBuf[offBuf + 1],
3189 pbBuf[offBuf + 2],
3190 pbBuf[offBuf + 3],
3191 pbBuf[offBuf + 4],
3192 pbBuf[offBuf + 5],
3193 pbBuf[offBuf + 6],
3194 pbBuf[offBuf + 7]);
3195# endif
3196 }
3197# else
3198 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3199 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3200 {
3201 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3202# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3203 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3204# else
3205 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3206 pVCpu->iem.s.abOpcode[offOpcode + 1],
3207 pVCpu->iem.s.abOpcode[offOpcode + 2],
3208 pVCpu->iem.s.abOpcode[offOpcode + 3],
3209 pVCpu->iem.s.abOpcode[offOpcode + 4],
3210 pVCpu->iem.s.abOpcode[offOpcode + 5],
3211 pVCpu->iem.s.abOpcode[offOpcode + 6],
3212 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3213# endif
3214 }
3215# endif
3216 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3217}
3218
3219#endif /* IEM_WITH_SETJMP */
3220
3221/**
3222 * Fetches the next opcode quad word, returns automatically on failure.
3223 *
3224 * @param a_pu64 Where to return the opcode quad word.
3225 * @remark Implicitly references pVCpu.
3226 */
3227#ifndef IEM_WITH_SETJMP
3228# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3229 do \
3230 { \
3231 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3232 if (rcStrict2 != VINF_SUCCESS) \
3233 return rcStrict2; \
3234 } while (0)
3235#else
3236# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3237#endif
3238
3239
3240/** @name Misc Worker Functions.
3241 * @{
3242 */
3243
3244/**
3245 * Gets the exception class for the specified exception vector.
3246 *
3247 * @returns The class of the specified exception.
3248 * @param uVector The exception vector.
3249 */
3250IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3251{
3252 Assert(uVector <= X86_XCPT_LAST);
3253 switch (uVector)
3254 {
3255 case X86_XCPT_DE:
3256 case X86_XCPT_TS:
3257 case X86_XCPT_NP:
3258 case X86_XCPT_SS:
3259 case X86_XCPT_GP:
3260 case X86_XCPT_SX: /* AMD only */
3261 return IEMXCPTCLASS_CONTRIBUTORY;
3262
3263 case X86_XCPT_PF:
3264 case X86_XCPT_VE: /* Intel only */
3265 return IEMXCPTCLASS_PAGE_FAULT;
3266
3267 case X86_XCPT_DF:
3268 return IEMXCPTCLASS_DOUBLE_FAULT;
3269 }
3270 return IEMXCPTCLASS_BENIGN;
3271}
3272
3273
3274/**
3275 * Evaluates how to handle an exception caused during delivery of another event
3276 * (exception / interrupt).
3277 *
3278 * @returns How to handle the recursive exception.
3279 * @param pVCpu The cross context virtual CPU structure of the
3280 * calling thread.
3281 * @param fPrevFlags The flags of the previous event.
3282 * @param uPrevVector The vector of the previous event.
3283 * @param fCurFlags The flags of the current exception.
3284 * @param uCurVector The vector of the current exception.
3285 * @param pfXcptRaiseInfo Where to store additional information about the
3286 * exception condition. Optional.
3287 */
3288VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3289 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3290{
3291 /*
3292 * Only CPU exceptions can be raised while delivering other events, software interrupt
3293 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3294 */
3295 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3296 Assert(pVCpu); RT_NOREF(pVCpu);
3297 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3298
3299 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3300 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3301 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3302 {
3303 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3304 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3305 {
3306 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3307 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3308 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3309 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3310 {
3311 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3312 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3313 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3314 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3315 uCurVector, pVCpu->cpum.GstCtx.cr2));
3316 }
3317 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3318 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3319 {
3320 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3321 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3322 }
3323 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3324 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3325 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3326 {
3327 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3328 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3329 }
3330 }
3331 else
3332 {
3333 if (uPrevVector == X86_XCPT_NMI)
3334 {
3335 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3336 if (uCurVector == X86_XCPT_PF)
3337 {
3338 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3339 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3340 }
3341 }
3342 else if ( uPrevVector == X86_XCPT_AC
3343 && uCurVector == X86_XCPT_AC)
3344 {
3345 enmRaise = IEMXCPTRAISE_CPU_HANG;
3346 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3347 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3348 }
3349 }
3350 }
3351 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3352 {
3353 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3354 if (uCurVector == X86_XCPT_PF)
3355 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3356 }
3357 else
3358 {
3359 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3360 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3361 }
3362
3363 if (pfXcptRaiseInfo)
3364 *pfXcptRaiseInfo = fRaiseInfo;
3365 return enmRaise;
3366}
3367
3368
3369/**
3370 * Enters the CPU shutdown state initiated by a triple fault or other
3371 * unrecoverable conditions.
3372 *
3373 * @returns Strict VBox status code.
3374 * @param pVCpu The cross context virtual CPU structure of the
3375 * calling thread.
3376 */
3377IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3378{
3379 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3380 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3381
3382 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3383 {
3384 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3385 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3386 }
3387
3388 RT_NOREF(pVCpu);
3389 return VINF_EM_TRIPLE_FAULT;
3390}
3391
3392
3393/**
3394 * Validates a new SS segment.
3395 *
3396 * @returns VBox strict status code.
3397 * @param pVCpu The cross context virtual CPU structure of the
3398 * calling thread.
3399 * @param NewSS The new SS selctor.
3400 * @param uCpl The CPL to load the stack for.
3401 * @param pDesc Where to return the descriptor.
3402 */
3403IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3404{
3405 /* Null selectors are not allowed (we're not called for dispatching
3406 interrupts with SS=0 in long mode). */
3407 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3408 {
3409 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3410 return iemRaiseTaskSwitchFault0(pVCpu);
3411 }
3412
3413 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3414 if ((NewSS & X86_SEL_RPL) != uCpl)
3415 {
3416 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3417 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3418 }
3419
3420 /*
3421 * Read the descriptor.
3422 */
3423 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3424 if (rcStrict != VINF_SUCCESS)
3425 return rcStrict;
3426
3427 /*
3428 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3429 */
3430 if (!pDesc->Legacy.Gen.u1DescType)
3431 {
3432 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3433 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3434 }
3435
3436 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3437 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3438 {
3439 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3440 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3441 }
3442 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3443 {
3444 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3445 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3446 }
3447
3448 /* Is it there? */
3449 /** @todo testcase: Is this checked before the canonical / limit check below? */
3450 if (!pDesc->Legacy.Gen.u1Present)
3451 {
3452 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3453 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3454 }
3455
3456 return VINF_SUCCESS;
3457}
3458
3459
3460/**
3461 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3462 * not (kind of obsolete now).
3463 *
3464 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3465 */
3466#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3467
3468/**
3469 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
3470 *
3471 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3472 * @param a_fEfl The new EFLAGS.
3473 */
3474#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3475
3476/** @} */
3477
3478
3479/** @name Raising Exceptions.
3480 *
3481 * @{
3482 */
3483
3484
3485/**
3486 * Loads the specified stack far pointer from the TSS.
3487 *
3488 * @returns VBox strict status code.
3489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3490 * @param uCpl The CPL to load the stack for.
3491 * @param pSelSS Where to return the new stack segment.
3492 * @param puEsp Where to return the new stack pointer.
3493 */
3494IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3495{
3496 VBOXSTRICTRC rcStrict;
3497 Assert(uCpl < 4);
3498
3499 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3500 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3501 {
3502 /*
3503 * 16-bit TSS (X86TSS16).
3504 */
3505 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3506 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3507 {
3508 uint32_t off = uCpl * 4 + 2;
3509 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3510 {
3511 /** @todo check actual access pattern here. */
3512 uint32_t u32Tmp = 0; /* gcc maybe... */
3513 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3514 if (rcStrict == VINF_SUCCESS)
3515 {
3516 *puEsp = RT_LOWORD(u32Tmp);
3517 *pSelSS = RT_HIWORD(u32Tmp);
3518 return VINF_SUCCESS;
3519 }
3520 }
3521 else
3522 {
3523 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3524 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3525 }
3526 break;
3527 }
3528
3529 /*
3530 * 32-bit TSS (X86TSS32).
3531 */
3532 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3533 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3534 {
3535 uint32_t off = uCpl * 8 + 4;
3536 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3537 {
3538/** @todo check actual access pattern here. */
3539 uint64_t u64Tmp;
3540 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3541 if (rcStrict == VINF_SUCCESS)
3542 {
3543 *puEsp = u64Tmp & UINT32_MAX;
3544 *pSelSS = (RTSEL)(u64Tmp >> 32);
3545 return VINF_SUCCESS;
3546 }
3547 }
3548 else
3549 {
3550 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3551 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3552 }
3553 break;
3554 }
3555
3556 default:
3557 AssertFailed();
3558 rcStrict = VERR_IEM_IPE_4;
3559 break;
3560 }
3561
3562 *puEsp = 0; /* make gcc happy */
3563 *pSelSS = 0; /* make gcc happy */
3564 return rcStrict;
3565}
3566
3567
3568/**
3569 * Loads the specified stack pointer from the 64-bit TSS.
3570 *
3571 * @returns VBox strict status code.
3572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3573 * @param uCpl The CPL to load the stack for.
3574 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3575 * @param puRsp Where to return the new stack pointer.
3576 */
3577IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3578{
3579 Assert(uCpl < 4);
3580 Assert(uIst < 8);
3581 *puRsp = 0; /* make gcc happy */
3582
3583 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3584 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3585
3586 uint32_t off;
3587 if (uIst)
3588 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3589 else
3590 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3591 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3592 {
3593 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3594 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3595 }
3596
3597 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3598}
3599
3600
3601/**
3602 * Adjust the CPU state according to the exception being raised.
3603 *
3604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3605 * @param u8Vector The exception that has been raised.
3606 */
3607DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
3608{
3609 switch (u8Vector)
3610 {
3611 case X86_XCPT_DB:
3612 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3613 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3614 break;
3615 /** @todo Read the AMD and Intel exception reference... */
3616 }
3617}
3618
3619
3620/**
3621 * Implements exceptions and interrupts for real mode.
3622 *
3623 * @returns VBox strict status code.
3624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3625 * @param cbInstr The number of bytes to offset rIP by in the return
3626 * address.
3627 * @param u8Vector The interrupt / exception vector number.
3628 * @param fFlags The flags.
3629 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3630 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3631 */
3632IEM_STATIC VBOXSTRICTRC
3633iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
3634 uint8_t cbInstr,
3635 uint8_t u8Vector,
3636 uint32_t fFlags,
3637 uint16_t uErr,
3638 uint64_t uCr2)
3639{
3640 NOREF(uErr); NOREF(uCr2);
3641 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3642
3643 /*
3644 * Read the IDT entry.
3645 */
3646 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3647 {
3648 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3649 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3650 }
3651 RTFAR16 Idte;
3652 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3653 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3654 {
3655 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3656 return rcStrict;
3657 }
3658
3659 /*
3660 * Push the stack frame.
3661 */
3662 uint16_t *pu16Frame;
3663 uint64_t uNewRsp;
3664 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3665 if (rcStrict != VINF_SUCCESS)
3666 return rcStrict;
3667
3668 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3669#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3670 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3671 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3672 fEfl |= UINT16_C(0xf000);
3673#endif
3674 pu16Frame[2] = (uint16_t)fEfl;
3675 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3676 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3677 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3678 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3679 return rcStrict;
3680
3681 /*
3682 * Load the vector address into cs:ip and make exception specific state
3683 * adjustments.
3684 */
3685 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3686 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3687 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3688 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3689 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3690 pVCpu->cpum.GstCtx.rip = Idte.off;
3691 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3692 IEMMISC_SET_EFL(pVCpu, fEfl);
3693
3694 /** @todo do we actually do this in real mode? */
3695 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3696 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3697
3698 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3699}
3700
3701
3702/**
3703 * Loads a NULL data selector into when coming from V8086 mode.
3704 *
3705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3706 * @param pSReg Pointer to the segment register.
3707 */
3708IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
3709{
3710 pSReg->Sel = 0;
3711 pSReg->ValidSel = 0;
3712 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3713 {
3714 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3715 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3716 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3717 }
3718 else
3719 {
3720 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3721 /** @todo check this on AMD-V */
3722 pSReg->u64Base = 0;
3723 pSReg->u32Limit = 0;
3724 }
3725}
3726
3727
3728/**
3729 * Loads a segment selector during a task switch in V8086 mode.
3730 *
3731 * @param pSReg Pointer to the segment register.
3732 * @param uSel The selector value to load.
3733 */
3734IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3735{
3736 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3737 pSReg->Sel = uSel;
3738 pSReg->ValidSel = uSel;
3739 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3740 pSReg->u64Base = uSel << 4;
3741 pSReg->u32Limit = 0xffff;
3742 pSReg->Attr.u = 0xf3;
3743}
3744
3745
3746/**
3747 * Loads a NULL data selector into a selector register, both the hidden and
3748 * visible parts, in protected mode.
3749 *
3750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3751 * @param pSReg Pointer to the segment register.
3752 * @param uRpl The RPL.
3753 */
3754IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3755{
3756 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3757 * data selector in protected mode. */
3758 pSReg->Sel = uRpl;
3759 pSReg->ValidSel = uRpl;
3760 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3761 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3762 {
3763 /* VT-x (Intel 3960x) observed doing something like this. */
3764 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3765 pSReg->u32Limit = UINT32_MAX;
3766 pSReg->u64Base = 0;
3767 }
3768 else
3769 {
3770 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3771 pSReg->u32Limit = 0;
3772 pSReg->u64Base = 0;
3773 }
3774}
3775
3776
3777/**
3778 * Loads a segment selector during a task switch in protected mode.
3779 *
3780 * In this task switch scenario, we would throw \#TS exceptions rather than
3781 * \#GPs.
3782 *
3783 * @returns VBox strict status code.
3784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3785 * @param pSReg Pointer to the segment register.
3786 * @param uSel The new selector value.
3787 *
3788 * @remarks This does _not_ handle CS or SS.
3789 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3790 */
3791IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3792{
3793 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3794
3795 /* Null data selector. */
3796 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3797 {
3798 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3799 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3800 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3801 return VINF_SUCCESS;
3802 }
3803
3804 /* Fetch the descriptor. */
3805 IEMSELDESC Desc;
3806 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3807 if (rcStrict != VINF_SUCCESS)
3808 {
3809 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3810 VBOXSTRICTRC_VAL(rcStrict)));
3811 return rcStrict;
3812 }
3813
3814 /* Must be a data segment or readable code segment. */
3815 if ( !Desc.Legacy.Gen.u1DescType
3816 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3817 {
3818 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3819 Desc.Legacy.Gen.u4Type));
3820 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3821 }
3822
3823 /* Check privileges for data segments and non-conforming code segments. */
3824 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3825 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3826 {
3827 /* The RPL and the new CPL must be less than or equal to the DPL. */
3828 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3829 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3830 {
3831 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3832 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3833 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3834 }
3835 }
3836
3837 /* Is it there? */
3838 if (!Desc.Legacy.Gen.u1Present)
3839 {
3840 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3841 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3842 }
3843
3844 /* The base and limit. */
3845 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3846 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3847
3848 /*
3849 * Ok, everything checked out fine. Now set the accessed bit before
3850 * committing the result into the registers.
3851 */
3852 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3853 {
3854 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3855 if (rcStrict != VINF_SUCCESS)
3856 return rcStrict;
3857 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3858 }
3859
3860 /* Commit */
3861 pSReg->Sel = uSel;
3862 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3863 pSReg->u32Limit = cbLimit;
3864 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3865 pSReg->ValidSel = uSel;
3866 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3867 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3868 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3869
3870 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3871 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3872 return VINF_SUCCESS;
3873}
3874
3875
3876/**
3877 * Performs a task switch.
3878 *
3879 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3880 * caller is responsible for performing the necessary checks (like DPL, TSS
3881 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3882 * reference for JMP, CALL, IRET.
3883 *
3884 * If the task switch is the due to a software interrupt or hardware exception,
3885 * the caller is responsible for validating the TSS selector and descriptor. See
3886 * Intel Instruction reference for INT n.
3887 *
3888 * @returns VBox strict status code.
3889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3890 * @param enmTaskSwitch The cause of the task switch.
3891 * @param uNextEip The EIP effective after the task switch.
3892 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3893 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3894 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3895 * @param SelTSS The TSS selector of the new task.
3896 * @param pNewDescTSS Pointer to the new TSS descriptor.
3897 */
3898IEM_STATIC VBOXSTRICTRC
3899iemTaskSwitch(PVMCPUCC pVCpu,
3900 IEMTASKSWITCH enmTaskSwitch,
3901 uint32_t uNextEip,
3902 uint32_t fFlags,
3903 uint16_t uErr,
3904 uint64_t uCr2,
3905 RTSEL SelTSS,
3906 PIEMSELDESC pNewDescTSS)
3907{
3908 Assert(!IEM_IS_REAL_MODE(pVCpu));
3909 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3910 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3911
3912 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3913 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3914 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3915 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3916 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3917
3918 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3919 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3920
3921 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3922 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3923
3924 /* Update CR2 in case it's a page-fault. */
3925 /** @todo This should probably be done much earlier in IEM/PGM. See
3926 * @bugref{5653#c49}. */
3927 if (fFlags & IEM_XCPT_FLAGS_CR2)
3928 pVCpu->cpum.GstCtx.cr2 = uCr2;
3929
3930 /*
3931 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3932 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3933 */
3934 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3935 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3936 if (uNewTSSLimit < uNewTSSLimitMin)
3937 {
3938 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3939 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3940 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3941 }
3942
3943 /*
3944 * Task switches in VMX non-root mode always cause task switches.
3945 * The new TSS must have been read and validated (DPL, limits etc.) before a
3946 * task-switch VM-exit commences.
3947 *
3948 * See Intel spec. 25.4.2 "Treatment of Task Switches".
3949 */
3950 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3951 {
3952 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
3953 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
3954 }
3955
3956 /*
3957 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3958 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3959 */
3960 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3961 {
3962 uint32_t const uExitInfo1 = SelTSS;
3963 uint32_t uExitInfo2 = uErr;
3964 switch (enmTaskSwitch)
3965 {
3966 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3967 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3968 default: break;
3969 }
3970 if (fFlags & IEM_XCPT_FLAGS_ERR)
3971 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3972 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3973 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3974
3975 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3976 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3977 RT_NOREF2(uExitInfo1, uExitInfo2);
3978 }
3979
3980 /*
3981 * Check the current TSS limit. The last written byte to the current TSS during the
3982 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3983 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3984 *
3985 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3986 * end up with smaller than "legal" TSS limits.
3987 */
3988 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
3989 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3990 if (uCurTSSLimit < uCurTSSLimitMin)
3991 {
3992 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3993 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3994 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3995 }
3996
3997 /*
3998 * Verify that the new TSS can be accessed and map it. Map only the required contents
3999 * and not the entire TSS.
4000 */
4001 void *pvNewTSS;
4002 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
4003 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4004 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4005 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4006 * not perform correct translation if this happens. See Intel spec. 7.2.1
4007 * "Task-State Segment". */
4008 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4009 if (rcStrict != VINF_SUCCESS)
4010 {
4011 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4012 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4013 return rcStrict;
4014 }
4015
4016 /*
4017 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4018 */
4019 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4020 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4021 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4022 {
4023 PX86DESC pDescCurTSS;
4024 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4025 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4026 if (rcStrict != VINF_SUCCESS)
4027 {
4028 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4029 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4030 return rcStrict;
4031 }
4032
4033 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4034 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4035 if (rcStrict != VINF_SUCCESS)
4036 {
4037 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4038 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4039 return rcStrict;
4040 }
4041
4042 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4043 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4044 {
4045 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4046 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4047 u32EFlags &= ~X86_EFL_NT;
4048 }
4049 }
4050
4051 /*
4052 * Save the CPU state into the current TSS.
4053 */
4054 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4055 if (GCPtrNewTSS == GCPtrCurTSS)
4056 {
4057 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4058 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4059 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4060 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4061 pVCpu->cpum.GstCtx.ldtr.Sel));
4062 }
4063 if (fIsNewTSS386)
4064 {
4065 /*
4066 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4067 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4068 */
4069 void *pvCurTSS32;
4070 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4071 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4072 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4073 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4074 if (rcStrict != VINF_SUCCESS)
4075 {
4076 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4077 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4078 return rcStrict;
4079 }
4080
4081 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4082 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4083 pCurTSS32->eip = uNextEip;
4084 pCurTSS32->eflags = u32EFlags;
4085 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4086 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4087 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4088 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4089 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4090 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4091 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4092 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4093 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4094 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4095 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4096 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4097 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4098 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4099
4100 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4101 if (rcStrict != VINF_SUCCESS)
4102 {
4103 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4104 VBOXSTRICTRC_VAL(rcStrict)));
4105 return rcStrict;
4106 }
4107 }
4108 else
4109 {
4110 /*
4111 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4112 */
4113 void *pvCurTSS16;
4114 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4115 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4116 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4117 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4118 if (rcStrict != VINF_SUCCESS)
4119 {
4120 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4121 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4122 return rcStrict;
4123 }
4124
4125 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4126 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4127 pCurTSS16->ip = uNextEip;
4128 pCurTSS16->flags = u32EFlags;
4129 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4130 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4131 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4132 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4133 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4134 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4135 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4136 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4137 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4138 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4139 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4140 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4141
4142 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4143 if (rcStrict != VINF_SUCCESS)
4144 {
4145 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4146 VBOXSTRICTRC_VAL(rcStrict)));
4147 return rcStrict;
4148 }
4149 }
4150
4151 /*
4152 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4153 */
4154 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4155 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4156 {
4157 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4158 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4159 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4160 }
4161
4162 /*
4163 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4164 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4165 */
4166 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4167 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4168 bool fNewDebugTrap;
4169 if (fIsNewTSS386)
4170 {
4171 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
4172 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4173 uNewEip = pNewTSS32->eip;
4174 uNewEflags = pNewTSS32->eflags;
4175 uNewEax = pNewTSS32->eax;
4176 uNewEcx = pNewTSS32->ecx;
4177 uNewEdx = pNewTSS32->edx;
4178 uNewEbx = pNewTSS32->ebx;
4179 uNewEsp = pNewTSS32->esp;
4180 uNewEbp = pNewTSS32->ebp;
4181 uNewEsi = pNewTSS32->esi;
4182 uNewEdi = pNewTSS32->edi;
4183 uNewES = pNewTSS32->es;
4184 uNewCS = pNewTSS32->cs;
4185 uNewSS = pNewTSS32->ss;
4186 uNewDS = pNewTSS32->ds;
4187 uNewFS = pNewTSS32->fs;
4188 uNewGS = pNewTSS32->gs;
4189 uNewLdt = pNewTSS32->selLdt;
4190 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4191 }
4192 else
4193 {
4194 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
4195 uNewCr3 = 0;
4196 uNewEip = pNewTSS16->ip;
4197 uNewEflags = pNewTSS16->flags;
4198 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4199 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4200 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4201 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4202 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4203 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4204 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4205 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4206 uNewES = pNewTSS16->es;
4207 uNewCS = pNewTSS16->cs;
4208 uNewSS = pNewTSS16->ss;
4209 uNewDS = pNewTSS16->ds;
4210 uNewFS = 0;
4211 uNewGS = 0;
4212 uNewLdt = pNewTSS16->selLdt;
4213 fNewDebugTrap = false;
4214 }
4215
4216 if (GCPtrNewTSS == GCPtrCurTSS)
4217 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4218 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4219
4220 /*
4221 * We're done accessing the new TSS.
4222 */
4223 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4224 if (rcStrict != VINF_SUCCESS)
4225 {
4226 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4227 return rcStrict;
4228 }
4229
4230 /*
4231 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4232 */
4233 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4234 {
4235 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4236 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4237 if (rcStrict != VINF_SUCCESS)
4238 {
4239 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4240 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4241 return rcStrict;
4242 }
4243
4244 /* Check that the descriptor indicates the new TSS is available (not busy). */
4245 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4246 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4247 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4248
4249 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4250 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4251 if (rcStrict != VINF_SUCCESS)
4252 {
4253 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4254 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4255 return rcStrict;
4256 }
4257 }
4258
4259 /*
4260 * From this point on, we're technically in the new task. We will defer exceptions
4261 * until the completion of the task switch but before executing any instructions in the new task.
4262 */
4263 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4264 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4265 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4266 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4267 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4268 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4269 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4270
4271 /* Set the busy bit in TR. */
4272 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4273
4274 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4275 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4276 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4277 {
4278 uNewEflags |= X86_EFL_NT;
4279 }
4280
4281 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4282 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4283 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4284
4285 pVCpu->cpum.GstCtx.eip = uNewEip;
4286 pVCpu->cpum.GstCtx.eax = uNewEax;
4287 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4288 pVCpu->cpum.GstCtx.edx = uNewEdx;
4289 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4290 pVCpu->cpum.GstCtx.esp = uNewEsp;
4291 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4292 pVCpu->cpum.GstCtx.esi = uNewEsi;
4293 pVCpu->cpum.GstCtx.edi = uNewEdi;
4294
4295 uNewEflags &= X86_EFL_LIVE_MASK;
4296 uNewEflags |= X86_EFL_RA1_MASK;
4297 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4298
4299 /*
4300 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4301 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4302 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4303 */
4304 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4305 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4306
4307 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4308 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4309
4310 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4311 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4312
4313 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4314 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4315
4316 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4317 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4318
4319 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4320 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4321 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4322
4323 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4324 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4325 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4326 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4327
4328 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4329 {
4330 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4331 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4332 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4333 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4334 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4335 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4336 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4337 }
4338
4339 /*
4340 * Switch CR3 for the new task.
4341 */
4342 if ( fIsNewTSS386
4343 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4344 {
4345 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4346 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4347 AssertRCSuccessReturn(rc, rc);
4348
4349 /* Inform PGM. */
4350 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4351 AssertRCReturn(rc, rc);
4352 /* ignore informational status codes */
4353
4354 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4355 }
4356
4357 /*
4358 * Switch LDTR for the new task.
4359 */
4360 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4361 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4362 else
4363 {
4364 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4365
4366 IEMSELDESC DescNewLdt;
4367 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4368 if (rcStrict != VINF_SUCCESS)
4369 {
4370 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4371 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4372 return rcStrict;
4373 }
4374 if ( !DescNewLdt.Legacy.Gen.u1Present
4375 || DescNewLdt.Legacy.Gen.u1DescType
4376 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4377 {
4378 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4379 uNewLdt, DescNewLdt.Legacy.u));
4380 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4381 }
4382
4383 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4384 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4385 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4386 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4387 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4388 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4389 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4390 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4391 }
4392
4393 IEMSELDESC DescSS;
4394 if (IEM_IS_V86_MODE(pVCpu))
4395 {
4396 pVCpu->iem.s.uCpl = 3;
4397 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4398 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4399 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4400 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4401 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4402 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4403
4404 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
4405 DescSS.Legacy.u = 0;
4406 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4407 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4408 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4409 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4410 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4411 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4412 DescSS.Legacy.Gen.u2Dpl = 3;
4413 }
4414 else
4415 {
4416 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
4417
4418 /*
4419 * Load the stack segment for the new task.
4420 */
4421 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4422 {
4423 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4424 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4425 }
4426
4427 /* Fetch the descriptor. */
4428 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4429 if (rcStrict != VINF_SUCCESS)
4430 {
4431 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4432 VBOXSTRICTRC_VAL(rcStrict)));
4433 return rcStrict;
4434 }
4435
4436 /* SS must be a data segment and writable. */
4437 if ( !DescSS.Legacy.Gen.u1DescType
4438 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4439 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4440 {
4441 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4442 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4443 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4444 }
4445
4446 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4447 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4448 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4449 {
4450 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4451 uNewCpl));
4452 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4453 }
4454
4455 /* Is it there? */
4456 if (!DescSS.Legacy.Gen.u1Present)
4457 {
4458 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4459 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4460 }
4461
4462 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4463 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4464
4465 /* Set the accessed bit before committing the result into SS. */
4466 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4467 {
4468 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4469 if (rcStrict != VINF_SUCCESS)
4470 return rcStrict;
4471 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4472 }
4473
4474 /* Commit SS. */
4475 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4476 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4477 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4478 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4479 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4480 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4481 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4482
4483 /* CPL has changed, update IEM before loading rest of segments. */
4484 pVCpu->iem.s.uCpl = uNewCpl;
4485
4486 /*
4487 * Load the data segments for the new task.
4488 */
4489 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4490 if (rcStrict != VINF_SUCCESS)
4491 return rcStrict;
4492 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4493 if (rcStrict != VINF_SUCCESS)
4494 return rcStrict;
4495 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4496 if (rcStrict != VINF_SUCCESS)
4497 return rcStrict;
4498 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4499 if (rcStrict != VINF_SUCCESS)
4500 return rcStrict;
4501
4502 /*
4503 * Load the code segment for the new task.
4504 */
4505 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4506 {
4507 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4508 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4509 }
4510
4511 /* Fetch the descriptor. */
4512 IEMSELDESC DescCS;
4513 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4514 if (rcStrict != VINF_SUCCESS)
4515 {
4516 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4517 return rcStrict;
4518 }
4519
4520 /* CS must be a code segment. */
4521 if ( !DescCS.Legacy.Gen.u1DescType
4522 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4523 {
4524 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4525 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4526 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4527 }
4528
4529 /* For conforming CS, DPL must be less than or equal to the RPL. */
4530 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4531 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4532 {
4533 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4534 DescCS.Legacy.Gen.u2Dpl));
4535 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4536 }
4537
4538 /* For non-conforming CS, DPL must match RPL. */
4539 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4540 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4541 {
4542 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4543 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4544 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4545 }
4546
4547 /* Is it there? */
4548 if (!DescCS.Legacy.Gen.u1Present)
4549 {
4550 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4551 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4552 }
4553
4554 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4555 u64Base = X86DESC_BASE(&DescCS.Legacy);
4556
4557 /* Set the accessed bit before committing the result into CS. */
4558 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4559 {
4560 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4561 if (rcStrict != VINF_SUCCESS)
4562 return rcStrict;
4563 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4564 }
4565
4566 /* Commit CS. */
4567 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4568 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4569 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4570 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4571 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4572 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4573 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4574 }
4575
4576 /** @todo Debug trap. */
4577 if (fIsNewTSS386 && fNewDebugTrap)
4578 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4579
4580 /*
4581 * Construct the error code masks based on what caused this task switch.
4582 * See Intel Instruction reference for INT.
4583 */
4584 uint16_t uExt;
4585 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4586 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4587 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4588 {
4589 uExt = 1;
4590 }
4591 else
4592 uExt = 0;
4593
4594 /*
4595 * Push any error code on to the new stack.
4596 */
4597 if (fFlags & IEM_XCPT_FLAGS_ERR)
4598 {
4599 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4600 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4601 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4602
4603 /* Check that there is sufficient space on the stack. */
4604 /** @todo Factor out segment limit checking for normal/expand down segments
4605 * into a separate function. */
4606 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4607 {
4608 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4609 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4610 {
4611 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4612 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4613 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4614 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4615 }
4616 }
4617 else
4618 {
4619 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4620 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4621 {
4622 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4623 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4624 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4625 }
4626 }
4627
4628
4629 if (fIsNewTSS386)
4630 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4631 else
4632 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4633 if (rcStrict != VINF_SUCCESS)
4634 {
4635 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4636 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4637 return rcStrict;
4638 }
4639 }
4640
4641 /* Check the new EIP against the new CS limit. */
4642 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4643 {
4644 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4645 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4646 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4647 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4648 }
4649
4650 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4651 pVCpu->cpum.GstCtx.ss.Sel));
4652 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4653}
4654
4655
4656/**
4657 * Implements exceptions and interrupts for protected mode.
4658 *
4659 * @returns VBox strict status code.
4660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4661 * @param cbInstr The number of bytes to offset rIP by in the return
4662 * address.
4663 * @param u8Vector The interrupt / exception vector number.
4664 * @param fFlags The flags.
4665 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4666 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4667 */
4668IEM_STATIC VBOXSTRICTRC
4669iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
4670 uint8_t cbInstr,
4671 uint8_t u8Vector,
4672 uint32_t fFlags,
4673 uint16_t uErr,
4674 uint64_t uCr2)
4675{
4676 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4677
4678 /*
4679 * Read the IDT entry.
4680 */
4681 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4682 {
4683 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4684 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4685 }
4686 X86DESC Idte;
4687 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4688 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4689 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4690 {
4691 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4692 return rcStrict;
4693 }
4694 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4695 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4696 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4697
4698 /*
4699 * Check the descriptor type, DPL and such.
4700 * ASSUMES this is done in the same order as described for call-gate calls.
4701 */
4702 if (Idte.Gate.u1DescType)
4703 {
4704 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4705 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4706 }
4707 bool fTaskGate = false;
4708 uint8_t f32BitGate = true;
4709 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4710 switch (Idte.Gate.u4Type)
4711 {
4712 case X86_SEL_TYPE_SYS_UNDEFINED:
4713 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4714 case X86_SEL_TYPE_SYS_LDT:
4715 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4716 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4717 case X86_SEL_TYPE_SYS_UNDEFINED2:
4718 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4719 case X86_SEL_TYPE_SYS_UNDEFINED3:
4720 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4721 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4722 case X86_SEL_TYPE_SYS_UNDEFINED4:
4723 {
4724 /** @todo check what actually happens when the type is wrong...
4725 * esp. call gates. */
4726 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4727 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4728 }
4729
4730 case X86_SEL_TYPE_SYS_286_INT_GATE:
4731 f32BitGate = false;
4732 RT_FALL_THRU();
4733 case X86_SEL_TYPE_SYS_386_INT_GATE:
4734 fEflToClear |= X86_EFL_IF;
4735 break;
4736
4737 case X86_SEL_TYPE_SYS_TASK_GATE:
4738 fTaskGate = true;
4739#ifndef IEM_IMPLEMENTS_TASKSWITCH
4740 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4741#endif
4742 break;
4743
4744 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4745 f32BitGate = false;
4746 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4747 break;
4748
4749 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4750 }
4751
4752 /* Check DPL against CPL if applicable. */
4753 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4754 {
4755 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4756 {
4757 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4758 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4759 }
4760 }
4761
4762 /* Is it there? */
4763 if (!Idte.Gate.u1Present)
4764 {
4765 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4766 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4767 }
4768
4769 /* Is it a task-gate? */
4770 if (fTaskGate)
4771 {
4772 /*
4773 * Construct the error code masks based on what caused this task switch.
4774 * See Intel Instruction reference for INT.
4775 */
4776 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4777 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4778 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4779 RTSEL SelTSS = Idte.Gate.u16Sel;
4780
4781 /*
4782 * Fetch the TSS descriptor in the GDT.
4783 */
4784 IEMSELDESC DescTSS;
4785 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4786 if (rcStrict != VINF_SUCCESS)
4787 {
4788 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4789 VBOXSTRICTRC_VAL(rcStrict)));
4790 return rcStrict;
4791 }
4792
4793 /* The TSS descriptor must be a system segment and be available (not busy). */
4794 if ( DescTSS.Legacy.Gen.u1DescType
4795 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4796 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4797 {
4798 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4799 u8Vector, SelTSS, DescTSS.Legacy.au64));
4800 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4801 }
4802
4803 /* The TSS must be present. */
4804 if (!DescTSS.Legacy.Gen.u1Present)
4805 {
4806 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4807 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4808 }
4809
4810 /* Do the actual task switch. */
4811 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4812 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4813 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4814 }
4815
4816 /* A null CS is bad. */
4817 RTSEL NewCS = Idte.Gate.u16Sel;
4818 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4819 {
4820 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4821 return iemRaiseGeneralProtectionFault0(pVCpu);
4822 }
4823
4824 /* Fetch the descriptor for the new CS. */
4825 IEMSELDESC DescCS;
4826 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4827 if (rcStrict != VINF_SUCCESS)
4828 {
4829 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4830 return rcStrict;
4831 }
4832
4833 /* Must be a code segment. */
4834 if (!DescCS.Legacy.Gen.u1DescType)
4835 {
4836 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4837 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4838 }
4839 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4840 {
4841 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4842 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4843 }
4844
4845 /* Don't allow lowering the privilege level. */
4846 /** @todo Does the lowering of privileges apply to software interrupts
4847 * only? This has bearings on the more-privileged or
4848 * same-privilege stack behavior further down. A testcase would
4849 * be nice. */
4850 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4851 {
4852 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4853 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4854 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4855 }
4856
4857 /* Make sure the selector is present. */
4858 if (!DescCS.Legacy.Gen.u1Present)
4859 {
4860 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4861 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4862 }
4863
4864 /* Check the new EIP against the new CS limit. */
4865 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4866 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4867 ? Idte.Gate.u16OffsetLow
4868 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4869 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4870 if (uNewEip > cbLimitCS)
4871 {
4872 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4873 u8Vector, uNewEip, cbLimitCS, NewCS));
4874 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4875 }
4876 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4877
4878 /* Calc the flag image to push. */
4879 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4880 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4881 fEfl &= ~X86_EFL_RF;
4882 else
4883 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4884
4885 /* From V8086 mode only go to CPL 0. */
4886 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4887 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4888 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4889 {
4890 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4891 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4892 }
4893
4894 /*
4895 * If the privilege level changes, we need to get a new stack from the TSS.
4896 * This in turns means validating the new SS and ESP...
4897 */
4898 if (uNewCpl != pVCpu->iem.s.uCpl)
4899 {
4900 RTSEL NewSS;
4901 uint32_t uNewEsp;
4902 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4903 if (rcStrict != VINF_SUCCESS)
4904 return rcStrict;
4905
4906 IEMSELDESC DescSS;
4907 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4908 if (rcStrict != VINF_SUCCESS)
4909 return rcStrict;
4910 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4911 if (!DescSS.Legacy.Gen.u1DefBig)
4912 {
4913 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4914 uNewEsp = (uint16_t)uNewEsp;
4915 }
4916
4917 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4918
4919 /* Check that there is sufficient space for the stack frame. */
4920 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4921 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4922 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4923 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4924
4925 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4926 {
4927 if ( uNewEsp - 1 > cbLimitSS
4928 || uNewEsp < cbStackFrame)
4929 {
4930 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4931 u8Vector, NewSS, uNewEsp, cbStackFrame));
4932 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4933 }
4934 }
4935 else
4936 {
4937 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4938 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4939 {
4940 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4941 u8Vector, NewSS, uNewEsp, cbStackFrame));
4942 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4943 }
4944 }
4945
4946 /*
4947 * Start making changes.
4948 */
4949
4950 /* Set the new CPL so that stack accesses use it. */
4951 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4952 pVCpu->iem.s.uCpl = uNewCpl;
4953
4954 /* Create the stack frame. */
4955 RTPTRUNION uStackFrame;
4956 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4957 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4958 if (rcStrict != VINF_SUCCESS)
4959 return rcStrict;
4960 void * const pvStackFrame = uStackFrame.pv;
4961 if (f32BitGate)
4962 {
4963 if (fFlags & IEM_XCPT_FLAGS_ERR)
4964 *uStackFrame.pu32++ = uErr;
4965 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4966 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4967 uStackFrame.pu32[2] = fEfl;
4968 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4969 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4970 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4971 if (fEfl & X86_EFL_VM)
4972 {
4973 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4974 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4975 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4976 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4977 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4978 }
4979 }
4980 else
4981 {
4982 if (fFlags & IEM_XCPT_FLAGS_ERR)
4983 *uStackFrame.pu16++ = uErr;
4984 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4985 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4986 uStackFrame.pu16[2] = fEfl;
4987 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4988 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4989 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4990 if (fEfl & X86_EFL_VM)
4991 {
4992 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
4993 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
4994 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
4995 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
4996 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
4997 }
4998 }
4999 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5000 if (rcStrict != VINF_SUCCESS)
5001 return rcStrict;
5002
5003 /* Mark the selectors 'accessed' (hope this is the correct time). */
5004 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5005 * after pushing the stack frame? (Write protect the gdt + stack to
5006 * find out.) */
5007 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5008 {
5009 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5010 if (rcStrict != VINF_SUCCESS)
5011 return rcStrict;
5012 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5013 }
5014
5015 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5016 {
5017 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5018 if (rcStrict != VINF_SUCCESS)
5019 return rcStrict;
5020 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5021 }
5022
5023 /*
5024 * Start comitting the register changes (joins with the DPL=CPL branch).
5025 */
5026 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5027 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5028 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5029 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5030 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5031 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5032 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5033 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5034 * SP is loaded).
5035 * Need to check the other combinations too:
5036 * - 16-bit TSS, 32-bit handler
5037 * - 32-bit TSS, 16-bit handler */
5038 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5039 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5040 else
5041 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5042
5043 if (fEfl & X86_EFL_VM)
5044 {
5045 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5046 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5047 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5048 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5049 }
5050 }
5051 /*
5052 * Same privilege, no stack change and smaller stack frame.
5053 */
5054 else
5055 {
5056 uint64_t uNewRsp;
5057 RTPTRUNION uStackFrame;
5058 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5059 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5060 if (rcStrict != VINF_SUCCESS)
5061 return rcStrict;
5062 void * const pvStackFrame = uStackFrame.pv;
5063
5064 if (f32BitGate)
5065 {
5066 if (fFlags & IEM_XCPT_FLAGS_ERR)
5067 *uStackFrame.pu32++ = uErr;
5068 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5069 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5070 uStackFrame.pu32[2] = fEfl;
5071 }
5072 else
5073 {
5074 if (fFlags & IEM_XCPT_FLAGS_ERR)
5075 *uStackFrame.pu16++ = uErr;
5076 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5077 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5078 uStackFrame.pu16[2] = fEfl;
5079 }
5080 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5081 if (rcStrict != VINF_SUCCESS)
5082 return rcStrict;
5083
5084 /* Mark the CS selector as 'accessed'. */
5085 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5086 {
5087 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5088 if (rcStrict != VINF_SUCCESS)
5089 return rcStrict;
5090 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5091 }
5092
5093 /*
5094 * Start committing the register changes (joins with the other branch).
5095 */
5096 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5097 }
5098
5099 /* ... register committing continues. */
5100 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5101 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5102 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5103 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5104 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5105 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5106
5107 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5108 fEfl &= ~fEflToClear;
5109 IEMMISC_SET_EFL(pVCpu, fEfl);
5110
5111 if (fFlags & IEM_XCPT_FLAGS_CR2)
5112 pVCpu->cpum.GstCtx.cr2 = uCr2;
5113
5114 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5115 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5116
5117 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5118}
5119
5120
5121/**
5122 * Implements exceptions and interrupts for long mode.
5123 *
5124 * @returns VBox strict status code.
5125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5126 * @param cbInstr The number of bytes to offset rIP by in the return
5127 * address.
5128 * @param u8Vector The interrupt / exception vector number.
5129 * @param fFlags The flags.
5130 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5131 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5132 */
5133IEM_STATIC VBOXSTRICTRC
5134iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5135 uint8_t cbInstr,
5136 uint8_t u8Vector,
5137 uint32_t fFlags,
5138 uint16_t uErr,
5139 uint64_t uCr2)
5140{
5141 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5142
5143 /*
5144 * Read the IDT entry.
5145 */
5146 uint16_t offIdt = (uint16_t)u8Vector << 4;
5147 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5148 {
5149 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5150 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5151 }
5152 X86DESC64 Idte;
5153 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5154 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5155 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5156 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5157 {
5158 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5159 return rcStrict;
5160 }
5161 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5162 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5163 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5164
5165 /*
5166 * Check the descriptor type, DPL and such.
5167 * ASSUMES this is done in the same order as described for call-gate calls.
5168 */
5169 if (Idte.Gate.u1DescType)
5170 {
5171 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5172 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5173 }
5174 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5175 switch (Idte.Gate.u4Type)
5176 {
5177 case AMD64_SEL_TYPE_SYS_INT_GATE:
5178 fEflToClear |= X86_EFL_IF;
5179 break;
5180 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5181 break;
5182
5183 default:
5184 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5185 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5186 }
5187
5188 /* Check DPL against CPL if applicable. */
5189 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5190 {
5191 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5192 {
5193 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5194 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5195 }
5196 }
5197
5198 /* Is it there? */
5199 if (!Idte.Gate.u1Present)
5200 {
5201 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5202 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5203 }
5204
5205 /* A null CS is bad. */
5206 RTSEL NewCS = Idte.Gate.u16Sel;
5207 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5208 {
5209 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5210 return iemRaiseGeneralProtectionFault0(pVCpu);
5211 }
5212
5213 /* Fetch the descriptor for the new CS. */
5214 IEMSELDESC DescCS;
5215 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5216 if (rcStrict != VINF_SUCCESS)
5217 {
5218 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5219 return rcStrict;
5220 }
5221
5222 /* Must be a 64-bit code segment. */
5223 if (!DescCS.Long.Gen.u1DescType)
5224 {
5225 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5226 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5227 }
5228 if ( !DescCS.Long.Gen.u1Long
5229 || DescCS.Long.Gen.u1DefBig
5230 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5231 {
5232 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5233 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5234 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5235 }
5236
5237 /* Don't allow lowering the privilege level. For non-conforming CS
5238 selectors, the CS.DPL sets the privilege level the trap/interrupt
5239 handler runs at. For conforming CS selectors, the CPL remains
5240 unchanged, but the CS.DPL must be <= CPL. */
5241 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5242 * when CPU in Ring-0. Result \#GP? */
5243 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5244 {
5245 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5246 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5247 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5248 }
5249
5250
5251 /* Make sure the selector is present. */
5252 if (!DescCS.Legacy.Gen.u1Present)
5253 {
5254 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5255 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5256 }
5257
5258 /* Check that the new RIP is canonical. */
5259 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5260 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5261 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5262 if (!IEM_IS_CANONICAL(uNewRip))
5263 {
5264 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5265 return iemRaiseGeneralProtectionFault0(pVCpu);
5266 }
5267
5268 /*
5269 * If the privilege level changes or if the IST isn't zero, we need to get
5270 * a new stack from the TSS.
5271 */
5272 uint64_t uNewRsp;
5273 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5274 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5275 if ( uNewCpl != pVCpu->iem.s.uCpl
5276 || Idte.Gate.u3IST != 0)
5277 {
5278 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5279 if (rcStrict != VINF_SUCCESS)
5280 return rcStrict;
5281 }
5282 else
5283 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5284 uNewRsp &= ~(uint64_t)0xf;
5285
5286 /*
5287 * Calc the flag image to push.
5288 */
5289 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5290 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5291 fEfl &= ~X86_EFL_RF;
5292 else
5293 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5294
5295 /*
5296 * Start making changes.
5297 */
5298 /* Set the new CPL so that stack accesses use it. */
5299 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5300 pVCpu->iem.s.uCpl = uNewCpl;
5301
5302 /* Create the stack frame. */
5303 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5304 RTPTRUNION uStackFrame;
5305 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5306 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5307 if (rcStrict != VINF_SUCCESS)
5308 return rcStrict;
5309 void * const pvStackFrame = uStackFrame.pv;
5310
5311 if (fFlags & IEM_XCPT_FLAGS_ERR)
5312 *uStackFrame.pu64++ = uErr;
5313 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5314 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5315 uStackFrame.pu64[2] = fEfl;
5316 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5317 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5318 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5319 if (rcStrict != VINF_SUCCESS)
5320 return rcStrict;
5321
5322 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5323 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5324 * after pushing the stack frame? (Write protect the gdt + stack to
5325 * find out.) */
5326 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5327 {
5328 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5329 if (rcStrict != VINF_SUCCESS)
5330 return rcStrict;
5331 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5332 }
5333
5334 /*
5335 * Start comitting the register changes.
5336 */
5337 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5338 * hidden registers when interrupting 32-bit or 16-bit code! */
5339 if (uNewCpl != uOldCpl)
5340 {
5341 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5342 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5343 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5344 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5345 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5346 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5347 }
5348 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5349 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5350 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5351 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5352 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5353 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5354 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5355 pVCpu->cpum.GstCtx.rip = uNewRip;
5356
5357 fEfl &= ~fEflToClear;
5358 IEMMISC_SET_EFL(pVCpu, fEfl);
5359
5360 if (fFlags & IEM_XCPT_FLAGS_CR2)
5361 pVCpu->cpum.GstCtx.cr2 = uCr2;
5362
5363 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5364 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5365
5366 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5367}
5368
5369
5370/**
5371 * Implements exceptions and interrupts.
5372 *
5373 * All exceptions and interrupts goes thru this function!
5374 *
5375 * @returns VBox strict status code.
5376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5377 * @param cbInstr The number of bytes to offset rIP by in the return
5378 * address.
5379 * @param u8Vector The interrupt / exception vector number.
5380 * @param fFlags The flags.
5381 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5382 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5383 */
5384DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5385iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5386 uint8_t cbInstr,
5387 uint8_t u8Vector,
5388 uint32_t fFlags,
5389 uint16_t uErr,
5390 uint64_t uCr2)
5391{
5392 /*
5393 * Get all the state that we might need here.
5394 */
5395 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5396 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5397
5398#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5399 /*
5400 * Flush prefetch buffer
5401 */
5402 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5403#endif
5404
5405 /*
5406 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5407 */
5408 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5409 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5410 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5411 | IEM_XCPT_FLAGS_BP_INSTR
5412 | IEM_XCPT_FLAGS_ICEBP_INSTR
5413 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5414 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5415 {
5416 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5417 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5418 u8Vector = X86_XCPT_GP;
5419 uErr = 0;
5420 }
5421#ifdef DBGFTRACE_ENABLED
5422 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5423 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5424 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5425#endif
5426
5427 /*
5428 * Evaluate whether NMI blocking should be in effect.
5429 * Normally, NMI blocking is in effect whenever we inject an NMI.
5430 */
5431 bool fBlockNmi;
5432 if ( u8Vector == X86_XCPT_NMI
5433 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5434 fBlockNmi = true;
5435 else
5436 fBlockNmi = false;
5437
5438#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5439 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5440 {
5441 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5442 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5443 return rcStrict0;
5444
5445 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5446 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5447 {
5448 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5449 fBlockNmi = false;
5450 }
5451 }
5452#endif
5453
5454#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5455 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5456 {
5457 /*
5458 * If the event is being injected as part of VMRUN, it isn't subject to event
5459 * intercepts in the nested-guest. However, secondary exceptions that occur
5460 * during injection of any event -are- subject to exception intercepts.
5461 *
5462 * See AMD spec. 15.20 "Event Injection".
5463 */
5464 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5465 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5466 else
5467 {
5468 /*
5469 * Check and handle if the event being raised is intercepted.
5470 */
5471 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5472 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5473 return rcStrict0;
5474 }
5475 }
5476#endif
5477
5478 /*
5479 * Set NMI blocking if necessary.
5480 */
5481 if ( fBlockNmi
5482 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5483 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5484
5485 /*
5486 * Do recursion accounting.
5487 */
5488 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5489 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5490 if (pVCpu->iem.s.cXcptRecursions == 0)
5491 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5492 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5493 else
5494 {
5495 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5496 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5497 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5498
5499 if (pVCpu->iem.s.cXcptRecursions >= 4)
5500 {
5501#ifdef DEBUG_bird
5502 AssertFailed();
5503#endif
5504 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5505 }
5506
5507 /*
5508 * Evaluate the sequence of recurring events.
5509 */
5510 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5511 NULL /* pXcptRaiseInfo */);
5512 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5513 { /* likely */ }
5514 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5515 {
5516 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5517 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5518 u8Vector = X86_XCPT_DF;
5519 uErr = 0;
5520#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5521 /* VMX nested-guest #DF intercept needs to be checked here. */
5522 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5523 {
5524 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5525 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5526 return rcStrict0;
5527 }
5528#endif
5529 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5530 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5531 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5532 }
5533 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5534 {
5535 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5536 return iemInitiateCpuShutdown(pVCpu);
5537 }
5538 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5539 {
5540 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5541 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5542 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5543 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5544 return VERR_EM_GUEST_CPU_HANG;
5545 }
5546 else
5547 {
5548 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5549 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5550 return VERR_IEM_IPE_9;
5551 }
5552
5553 /*
5554 * The 'EXT' bit is set when an exception occurs during deliver of an external
5555 * event (such as an interrupt or earlier exception)[1]. Privileged software
5556 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5557 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5558 *
5559 * [1] - Intel spec. 6.13 "Error Code"
5560 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5561 * [3] - Intel Instruction reference for INT n.
5562 */
5563 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5564 && (fFlags & IEM_XCPT_FLAGS_ERR)
5565 && u8Vector != X86_XCPT_PF
5566 && u8Vector != X86_XCPT_DF)
5567 {
5568 uErr |= X86_TRAP_ERR_EXTERNAL;
5569 }
5570 }
5571
5572 pVCpu->iem.s.cXcptRecursions++;
5573 pVCpu->iem.s.uCurXcpt = u8Vector;
5574 pVCpu->iem.s.fCurXcpt = fFlags;
5575 pVCpu->iem.s.uCurXcptErr = uErr;
5576 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5577
5578 /*
5579 * Extensive logging.
5580 */
5581#if defined(LOG_ENABLED) && defined(IN_RING3)
5582 if (LogIs3Enabled())
5583 {
5584 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5585 PVM pVM = pVCpu->CTX_SUFF(pVM);
5586 char szRegs[4096];
5587 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5588 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5589 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5590 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5591 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5592 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5593 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5594 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5595 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5596 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5597 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5598 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5599 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5600 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5601 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5602 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5603 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5604 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5605 " efer=%016VR{efer}\n"
5606 " pat=%016VR{pat}\n"
5607 " sf_mask=%016VR{sf_mask}\n"
5608 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5609 " lstar=%016VR{lstar}\n"
5610 " star=%016VR{star} cstar=%016VR{cstar}\n"
5611 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5612 );
5613
5614 char szInstr[256];
5615 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5616 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5617 szInstr, sizeof(szInstr), NULL);
5618 Log3(("%s%s\n", szRegs, szInstr));
5619 }
5620#endif /* LOG_ENABLED */
5621
5622 /*
5623 * Call the mode specific worker function.
5624 */
5625 VBOXSTRICTRC rcStrict;
5626 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5627 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5628 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5629 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5630 else
5631 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5632
5633 /* Flush the prefetch buffer. */
5634#ifdef IEM_WITH_CODE_TLB
5635 pVCpu->iem.s.pbInstrBuf = NULL;
5636#else
5637 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5638#endif
5639
5640 /*
5641 * Unwind.
5642 */
5643 pVCpu->iem.s.cXcptRecursions--;
5644 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5645 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5646 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5647 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5648 pVCpu->iem.s.cXcptRecursions + 1));
5649 return rcStrict;
5650}
5651
5652#ifdef IEM_WITH_SETJMP
5653/**
5654 * See iemRaiseXcptOrInt. Will not return.
5655 */
5656IEM_STATIC DECL_NO_RETURN(void)
5657iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
5658 uint8_t cbInstr,
5659 uint8_t u8Vector,
5660 uint32_t fFlags,
5661 uint16_t uErr,
5662 uint64_t uCr2)
5663{
5664 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5665 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5666}
5667#endif
5668
5669
5670/** \#DE - 00. */
5671DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
5672{
5673 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5674}
5675
5676
5677/** \#DB - 01.
5678 * @note This automatically clear DR7.GD. */
5679DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
5680{
5681 /** @todo set/clear RF. */
5682 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5683 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5684}
5685
5686
5687/** \#BR - 05. */
5688DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
5689{
5690 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5691}
5692
5693
5694/** \#UD - 06. */
5695DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
5696{
5697 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5698}
5699
5700
5701/** \#NM - 07. */
5702DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
5703{
5704 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5705}
5706
5707
5708/** \#TS(err) - 0a. */
5709DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5710{
5711 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5712}
5713
5714
5715/** \#TS(tr) - 0a. */
5716DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
5717{
5718 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5719 pVCpu->cpum.GstCtx.tr.Sel, 0);
5720}
5721
5722
5723/** \#TS(0) - 0a. */
5724DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
5725{
5726 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5727 0, 0);
5728}
5729
5730
5731/** \#TS(err) - 0a. */
5732DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5733{
5734 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5735 uSel & X86_SEL_MASK_OFF_RPL, 0);
5736}
5737
5738
5739/** \#NP(err) - 0b. */
5740DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5741{
5742 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5743}
5744
5745
5746/** \#NP(sel) - 0b. */
5747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5748{
5749 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5750 uSel & ~X86_SEL_RPL, 0);
5751}
5752
5753
5754/** \#SS(seg) - 0c. */
5755DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5756{
5757 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5758 uSel & ~X86_SEL_RPL, 0);
5759}
5760
5761
5762/** \#SS(err) - 0c. */
5763DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5764{
5765 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5766}
5767
5768
5769/** \#GP(n) - 0d. */
5770DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
5771{
5772 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5773}
5774
5775
5776/** \#GP(0) - 0d. */
5777DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
5778{
5779 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5780}
5781
5782#ifdef IEM_WITH_SETJMP
5783/** \#GP(0) - 0d. */
5784DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
5785{
5786 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5787}
5788#endif
5789
5790
5791/** \#GP(sel) - 0d. */
5792DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5793{
5794 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5795 Sel & ~X86_SEL_RPL, 0);
5796}
5797
5798
5799/** \#GP(0) - 0d. */
5800DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
5801{
5802 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5803}
5804
5805
5806/** \#GP(sel) - 0d. */
5807DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5808{
5809 NOREF(iSegReg); NOREF(fAccess);
5810 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5811 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5812}
5813
5814#ifdef IEM_WITH_SETJMP
5815/** \#GP(sel) - 0d, longjmp. */
5816DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5817{
5818 NOREF(iSegReg); NOREF(fAccess);
5819 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5820 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5821}
5822#endif
5823
5824/** \#GP(sel) - 0d. */
5825DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5826{
5827 NOREF(Sel);
5828 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5829}
5830
5831#ifdef IEM_WITH_SETJMP
5832/** \#GP(sel) - 0d, longjmp. */
5833DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
5834{
5835 NOREF(Sel);
5836 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5837}
5838#endif
5839
5840
5841/** \#GP(sel) - 0d. */
5842DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5843{
5844 NOREF(iSegReg); NOREF(fAccess);
5845 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5846}
5847
5848#ifdef IEM_WITH_SETJMP
5849/** \#GP(sel) - 0d, longjmp. */
5850DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
5851 uint32_t fAccess)
5852{
5853 NOREF(iSegReg); NOREF(fAccess);
5854 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5855}
5856#endif
5857
5858
5859/** \#PF(n) - 0e. */
5860DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5861{
5862 uint16_t uErr;
5863 switch (rc)
5864 {
5865 case VERR_PAGE_NOT_PRESENT:
5866 case VERR_PAGE_TABLE_NOT_PRESENT:
5867 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5868 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5869 uErr = 0;
5870 break;
5871
5872 default:
5873 AssertMsgFailed(("%Rrc\n", rc));
5874 RT_FALL_THRU();
5875 case VERR_ACCESS_DENIED:
5876 uErr = X86_TRAP_PF_P;
5877 break;
5878
5879 /** @todo reserved */
5880 }
5881
5882 if (pVCpu->iem.s.uCpl == 3)
5883 uErr |= X86_TRAP_PF_US;
5884
5885 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5886 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5887 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5888 uErr |= X86_TRAP_PF_ID;
5889
5890#if 0 /* This is so much non-sense, really. Why was it done like that? */
5891 /* Note! RW access callers reporting a WRITE protection fault, will clear
5892 the READ flag before calling. So, read-modify-write accesses (RW)
5893 can safely be reported as READ faults. */
5894 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5895 uErr |= X86_TRAP_PF_RW;
5896#else
5897 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5898 {
5899 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5900 uErr |= X86_TRAP_PF_RW;
5901 }
5902#endif
5903
5904 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5905 uErr, GCPtrWhere);
5906}
5907
5908#ifdef IEM_WITH_SETJMP
5909/** \#PF(n) - 0e, longjmp. */
5910IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5911{
5912 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5913}
5914#endif
5915
5916
5917/** \#MF(0) - 10. */
5918DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
5919{
5920 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5921}
5922
5923
5924/** \#AC(0) - 11. */
5925DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
5926{
5927 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5928}
5929
5930
5931/**
5932 * Macro for calling iemCImplRaiseDivideError().
5933 *
5934 * This enables us to add/remove arguments and force different levels of
5935 * inlining as we wish.
5936 *
5937 * @return Strict VBox status code.
5938 */
5939#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5940IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5941{
5942 NOREF(cbInstr);
5943 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5944}
5945
5946
5947/**
5948 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5949 *
5950 * This enables us to add/remove arguments and force different levels of
5951 * inlining as we wish.
5952 *
5953 * @return Strict VBox status code.
5954 */
5955#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5956IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5957{
5958 NOREF(cbInstr);
5959 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5960}
5961
5962
5963/**
5964 * Macro for calling iemCImplRaiseInvalidOpcode().
5965 *
5966 * This enables us to add/remove arguments and force different levels of
5967 * inlining as we wish.
5968 *
5969 * @return Strict VBox status code.
5970 */
5971#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5972IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5973{
5974 NOREF(cbInstr);
5975 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5976}
5977
5978
5979/** @} */
5980
5981
5982/*
5983 *
5984 * Helpers routines.
5985 * Helpers routines.
5986 * Helpers routines.
5987 *
5988 */
5989
5990/**
5991 * Recalculates the effective operand size.
5992 *
5993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5994 */
5995IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
5996{
5997 switch (pVCpu->iem.s.enmCpuMode)
5998 {
5999 case IEMMODE_16BIT:
6000 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6001 break;
6002 case IEMMODE_32BIT:
6003 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6004 break;
6005 case IEMMODE_64BIT:
6006 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6007 {
6008 case 0:
6009 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6010 break;
6011 case IEM_OP_PRF_SIZE_OP:
6012 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6013 break;
6014 case IEM_OP_PRF_SIZE_REX_W:
6015 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6016 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6017 break;
6018 }
6019 break;
6020 default:
6021 AssertFailed();
6022 }
6023}
6024
6025
6026/**
6027 * Sets the default operand size to 64-bit and recalculates the effective
6028 * operand size.
6029 *
6030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6031 */
6032IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6033{
6034 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6035 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6036 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6037 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6038 else
6039 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6040}
6041
6042
6043/*
6044 *
6045 * Common opcode decoders.
6046 * Common opcode decoders.
6047 * Common opcode decoders.
6048 *
6049 */
6050//#include <iprt/mem.h>
6051
6052/**
6053 * Used to add extra details about a stub case.
6054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6055 */
6056IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6057{
6058#if defined(LOG_ENABLED) && defined(IN_RING3)
6059 PVM pVM = pVCpu->CTX_SUFF(pVM);
6060 char szRegs[4096];
6061 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6062 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6063 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6064 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6065 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6066 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6067 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6068 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6069 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6070 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6071 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6072 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6073 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6074 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6075 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6076 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6077 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6078 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6079 " efer=%016VR{efer}\n"
6080 " pat=%016VR{pat}\n"
6081 " sf_mask=%016VR{sf_mask}\n"
6082 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6083 " lstar=%016VR{lstar}\n"
6084 " star=%016VR{star} cstar=%016VR{cstar}\n"
6085 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6086 );
6087
6088 char szInstr[256];
6089 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6090 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6091 szInstr, sizeof(szInstr), NULL);
6092
6093 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6094#else
6095 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6096#endif
6097}
6098
6099/**
6100 * Complains about a stub.
6101 *
6102 * Providing two versions of this macro, one for daily use and one for use when
6103 * working on IEM.
6104 */
6105#if 0
6106# define IEMOP_BITCH_ABOUT_STUB() \
6107 do { \
6108 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6109 iemOpStubMsg2(pVCpu); \
6110 RTAssertPanic(); \
6111 } while (0)
6112#else
6113# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6114#endif
6115
6116/** Stubs an opcode. */
6117#define FNIEMOP_STUB(a_Name) \
6118 FNIEMOP_DEF(a_Name) \
6119 { \
6120 RT_NOREF_PV(pVCpu); \
6121 IEMOP_BITCH_ABOUT_STUB(); \
6122 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6123 } \
6124 typedef int ignore_semicolon
6125
6126/** Stubs an opcode. */
6127#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6128 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6129 { \
6130 RT_NOREF_PV(pVCpu); \
6131 RT_NOREF_PV(a_Name0); \
6132 IEMOP_BITCH_ABOUT_STUB(); \
6133 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6134 } \
6135 typedef int ignore_semicolon
6136
6137/** Stubs an opcode which currently should raise \#UD. */
6138#define FNIEMOP_UD_STUB(a_Name) \
6139 FNIEMOP_DEF(a_Name) \
6140 { \
6141 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6142 return IEMOP_RAISE_INVALID_OPCODE(); \
6143 } \
6144 typedef int ignore_semicolon
6145
6146/** Stubs an opcode which currently should raise \#UD. */
6147#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6148 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6149 { \
6150 RT_NOREF_PV(pVCpu); \
6151 RT_NOREF_PV(a_Name0); \
6152 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6153 return IEMOP_RAISE_INVALID_OPCODE(); \
6154 } \
6155 typedef int ignore_semicolon
6156
6157
6158
6159/** @name Register Access.
6160 * @{
6161 */
6162
6163/**
6164 * Gets a reference (pointer) to the specified hidden segment register.
6165 *
6166 * @returns Hidden register reference.
6167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6168 * @param iSegReg The segment register.
6169 */
6170IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6171{
6172 Assert(iSegReg < X86_SREG_COUNT);
6173 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6174 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6175
6176 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6177 return pSReg;
6178}
6179
6180
6181/**
6182 * Ensures that the given hidden segment register is up to date.
6183 *
6184 * @returns Hidden register reference.
6185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6186 * @param pSReg The segment register.
6187 */
6188IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6189{
6190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6191 NOREF(pVCpu);
6192 return pSReg;
6193}
6194
6195
6196/**
6197 * Gets a reference (pointer) to the specified segment register (the selector
6198 * value).
6199 *
6200 * @returns Pointer to the selector variable.
6201 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6202 * @param iSegReg The segment register.
6203 */
6204DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6205{
6206 Assert(iSegReg < X86_SREG_COUNT);
6207 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6208 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6209}
6210
6211
6212/**
6213 * Fetches the selector value of a segment register.
6214 *
6215 * @returns The selector value.
6216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6217 * @param iSegReg The segment register.
6218 */
6219DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6220{
6221 Assert(iSegReg < X86_SREG_COUNT);
6222 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6223 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6224}
6225
6226
6227/**
6228 * Fetches the base address value of a segment register.
6229 *
6230 * @returns The selector value.
6231 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6232 * @param iSegReg The segment register.
6233 */
6234DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6235{
6236 Assert(iSegReg < X86_SREG_COUNT);
6237 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6238 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6239}
6240
6241
6242/**
6243 * Gets a reference (pointer) to the specified general purpose register.
6244 *
6245 * @returns Register reference.
6246 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6247 * @param iReg The general purpose register.
6248 */
6249DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6250{
6251 Assert(iReg < 16);
6252 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6253}
6254
6255
6256/**
6257 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6258 *
6259 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6260 *
6261 * @returns Register reference.
6262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6263 * @param iReg The register.
6264 */
6265DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6266{
6267 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6268 {
6269 Assert(iReg < 16);
6270 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6271 }
6272 /* high 8-bit register. */
6273 Assert(iReg < 8);
6274 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6275}
6276
6277
6278/**
6279 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6280 *
6281 * @returns Register reference.
6282 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6283 * @param iReg The register.
6284 */
6285DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6286{
6287 Assert(iReg < 16);
6288 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6289}
6290
6291
6292/**
6293 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6294 *
6295 * @returns Register reference.
6296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6297 * @param iReg The register.
6298 */
6299DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6300{
6301 Assert(iReg < 16);
6302 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6303}
6304
6305
6306/**
6307 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6308 *
6309 * @returns Register reference.
6310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6311 * @param iReg The register.
6312 */
6313DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6314{
6315 Assert(iReg < 64);
6316 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6317}
6318
6319
6320/**
6321 * Gets a reference (pointer) to the specified segment register's base address.
6322 *
6323 * @returns Segment register base address reference.
6324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6325 * @param iSegReg The segment selector.
6326 */
6327DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6328{
6329 Assert(iSegReg < X86_SREG_COUNT);
6330 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6331 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6332}
6333
6334
6335/**
6336 * Fetches the value of a 8-bit general purpose register.
6337 *
6338 * @returns The register value.
6339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6340 * @param iReg The register.
6341 */
6342DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6343{
6344 return *iemGRegRefU8(pVCpu, iReg);
6345}
6346
6347
6348/**
6349 * Fetches the value of a 16-bit general purpose register.
6350 *
6351 * @returns The register value.
6352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6353 * @param iReg The register.
6354 */
6355DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6356{
6357 Assert(iReg < 16);
6358 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6359}
6360
6361
6362/**
6363 * Fetches the value of a 32-bit general purpose register.
6364 *
6365 * @returns The register value.
6366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6367 * @param iReg The register.
6368 */
6369DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6370{
6371 Assert(iReg < 16);
6372 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6373}
6374
6375
6376/**
6377 * Fetches the value of a 64-bit general purpose register.
6378 *
6379 * @returns The register value.
6380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6381 * @param iReg The register.
6382 */
6383DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6384{
6385 Assert(iReg < 16);
6386 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6387}
6388
6389
6390/**
6391 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6392 *
6393 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6394 * segment limit.
6395 *
6396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6397 * @param offNextInstr The offset of the next instruction.
6398 */
6399IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6400{
6401 switch (pVCpu->iem.s.enmEffOpSize)
6402 {
6403 case IEMMODE_16BIT:
6404 {
6405 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6406 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6407 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6408 return iemRaiseGeneralProtectionFault0(pVCpu);
6409 pVCpu->cpum.GstCtx.rip = uNewIp;
6410 break;
6411 }
6412
6413 case IEMMODE_32BIT:
6414 {
6415 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6416 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6417
6418 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6419 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6420 return iemRaiseGeneralProtectionFault0(pVCpu);
6421 pVCpu->cpum.GstCtx.rip = uNewEip;
6422 break;
6423 }
6424
6425 case IEMMODE_64BIT:
6426 {
6427 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6428
6429 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6430 if (!IEM_IS_CANONICAL(uNewRip))
6431 return iemRaiseGeneralProtectionFault0(pVCpu);
6432 pVCpu->cpum.GstCtx.rip = uNewRip;
6433 break;
6434 }
6435
6436 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6437 }
6438
6439 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6440
6441#ifndef IEM_WITH_CODE_TLB
6442 /* Flush the prefetch buffer. */
6443 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6444#endif
6445
6446 return VINF_SUCCESS;
6447}
6448
6449
6450/**
6451 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6452 *
6453 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6454 * segment limit.
6455 *
6456 * @returns Strict VBox status code.
6457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6458 * @param offNextInstr The offset of the next instruction.
6459 */
6460IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
6461{
6462 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6463
6464 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6465 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6466 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6467 return iemRaiseGeneralProtectionFault0(pVCpu);
6468 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6469 pVCpu->cpum.GstCtx.rip = uNewIp;
6470 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6471
6472#ifndef IEM_WITH_CODE_TLB
6473 /* Flush the prefetch buffer. */
6474 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6475#endif
6476
6477 return VINF_SUCCESS;
6478}
6479
6480
6481/**
6482 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6483 *
6484 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6485 * segment limit.
6486 *
6487 * @returns Strict VBox status code.
6488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6489 * @param offNextInstr The offset of the next instruction.
6490 */
6491IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
6492{
6493 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6494
6495 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6496 {
6497 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6498
6499 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6500 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6501 return iemRaiseGeneralProtectionFault0(pVCpu);
6502 pVCpu->cpum.GstCtx.rip = uNewEip;
6503 }
6504 else
6505 {
6506 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6507
6508 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6509 if (!IEM_IS_CANONICAL(uNewRip))
6510 return iemRaiseGeneralProtectionFault0(pVCpu);
6511 pVCpu->cpum.GstCtx.rip = uNewRip;
6512 }
6513 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6514
6515#ifndef IEM_WITH_CODE_TLB
6516 /* Flush the prefetch buffer. */
6517 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6518#endif
6519
6520 return VINF_SUCCESS;
6521}
6522
6523
6524/**
6525 * Performs a near jump to the specified address.
6526 *
6527 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6528 * segment limit.
6529 *
6530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6531 * @param uNewRip The new RIP value.
6532 */
6533IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
6534{
6535 switch (pVCpu->iem.s.enmEffOpSize)
6536 {
6537 case IEMMODE_16BIT:
6538 {
6539 Assert(uNewRip <= UINT16_MAX);
6540 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6541 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6542 return iemRaiseGeneralProtectionFault0(pVCpu);
6543 /** @todo Test 16-bit jump in 64-bit mode. */
6544 pVCpu->cpum.GstCtx.rip = uNewRip;
6545 break;
6546 }
6547
6548 case IEMMODE_32BIT:
6549 {
6550 Assert(uNewRip <= UINT32_MAX);
6551 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6552 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6553
6554 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6555 return iemRaiseGeneralProtectionFault0(pVCpu);
6556 pVCpu->cpum.GstCtx.rip = uNewRip;
6557 break;
6558 }
6559
6560 case IEMMODE_64BIT:
6561 {
6562 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6563
6564 if (!IEM_IS_CANONICAL(uNewRip))
6565 return iemRaiseGeneralProtectionFault0(pVCpu);
6566 pVCpu->cpum.GstCtx.rip = uNewRip;
6567 break;
6568 }
6569
6570 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6571 }
6572
6573 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6574
6575#ifndef IEM_WITH_CODE_TLB
6576 /* Flush the prefetch buffer. */
6577 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6578#endif
6579
6580 return VINF_SUCCESS;
6581}
6582
6583
6584/**
6585 * Get the address of the top of the stack.
6586 *
6587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6588 */
6589DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6590{
6591 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6592 return pVCpu->cpum.GstCtx.rsp;
6593 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6594 return pVCpu->cpum.GstCtx.esp;
6595 return pVCpu->cpum.GstCtx.sp;
6596}
6597
6598
6599/**
6600 * Updates the RIP/EIP/IP to point to the next instruction.
6601 *
6602 * This function leaves the EFLAGS.RF flag alone.
6603 *
6604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6605 * @param cbInstr The number of bytes to add.
6606 */
6607IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6608{
6609 switch (pVCpu->iem.s.enmCpuMode)
6610 {
6611 case IEMMODE_16BIT:
6612 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6613 pVCpu->cpum.GstCtx.eip += cbInstr;
6614 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6615 break;
6616
6617 case IEMMODE_32BIT:
6618 pVCpu->cpum.GstCtx.eip += cbInstr;
6619 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6620 break;
6621
6622 case IEMMODE_64BIT:
6623 pVCpu->cpum.GstCtx.rip += cbInstr;
6624 break;
6625 default: AssertFailed();
6626 }
6627}
6628
6629
6630#if 0
6631/**
6632 * Updates the RIP/EIP/IP to point to the next instruction.
6633 *
6634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6635 */
6636IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
6637{
6638 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6639}
6640#endif
6641
6642
6643
6644/**
6645 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6646 *
6647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6648 * @param cbInstr The number of bytes to add.
6649 */
6650IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6651{
6652 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6653
6654 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6655#if ARCH_BITS >= 64
6656 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6657 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6658 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6659#else
6660 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6661 pVCpu->cpum.GstCtx.rip += cbInstr;
6662 else
6663 pVCpu->cpum.GstCtx.eip += cbInstr;
6664#endif
6665}
6666
6667
6668/**
6669 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6670 *
6671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6672 */
6673IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
6674{
6675 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6676}
6677
6678
6679/**
6680 * Adds to the stack pointer.
6681 *
6682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6683 * @param cbToAdd The number of bytes to add (8-bit!).
6684 */
6685DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
6686{
6687 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6688 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6689 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6690 pVCpu->cpum.GstCtx.esp += cbToAdd;
6691 else
6692 pVCpu->cpum.GstCtx.sp += cbToAdd;
6693}
6694
6695
6696/**
6697 * Subtracts from the stack pointer.
6698 *
6699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6700 * @param cbToSub The number of bytes to subtract (8-bit!).
6701 */
6702DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
6703{
6704 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6705 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6706 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6707 pVCpu->cpum.GstCtx.esp -= cbToSub;
6708 else
6709 pVCpu->cpum.GstCtx.sp -= cbToSub;
6710}
6711
6712
6713/**
6714 * Adds to the temporary stack pointer.
6715 *
6716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6717 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6718 * @param cbToAdd The number of bytes to add (16-bit).
6719 */
6720DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6721{
6722 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6723 pTmpRsp->u += cbToAdd;
6724 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6725 pTmpRsp->DWords.dw0 += cbToAdd;
6726 else
6727 pTmpRsp->Words.w0 += cbToAdd;
6728}
6729
6730
6731/**
6732 * Subtracts from the temporary stack pointer.
6733 *
6734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6735 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6736 * @param cbToSub The number of bytes to subtract.
6737 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6738 * expecting that.
6739 */
6740DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6741{
6742 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6743 pTmpRsp->u -= cbToSub;
6744 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6745 pTmpRsp->DWords.dw0 -= cbToSub;
6746 else
6747 pTmpRsp->Words.w0 -= cbToSub;
6748}
6749
6750
6751/**
6752 * Calculates the effective stack address for a push of the specified size as
6753 * well as the new RSP value (upper bits may be masked).
6754 *
6755 * @returns Effective stack addressf for the push.
6756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6757 * @param cbItem The size of the stack item to pop.
6758 * @param puNewRsp Where to return the new RSP value.
6759 */
6760DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6761{
6762 RTUINT64U uTmpRsp;
6763 RTGCPTR GCPtrTop;
6764 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6765
6766 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6767 GCPtrTop = uTmpRsp.u -= cbItem;
6768 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6769 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6770 else
6771 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6772 *puNewRsp = uTmpRsp.u;
6773 return GCPtrTop;
6774}
6775
6776
6777/**
6778 * Gets the current stack pointer and calculates the value after a pop of the
6779 * specified size.
6780 *
6781 * @returns Current stack pointer.
6782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6783 * @param cbItem The size of the stack item to pop.
6784 * @param puNewRsp Where to return the new RSP value.
6785 */
6786DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6787{
6788 RTUINT64U uTmpRsp;
6789 RTGCPTR GCPtrTop;
6790 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6791
6792 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6793 {
6794 GCPtrTop = uTmpRsp.u;
6795 uTmpRsp.u += cbItem;
6796 }
6797 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6798 {
6799 GCPtrTop = uTmpRsp.DWords.dw0;
6800 uTmpRsp.DWords.dw0 += cbItem;
6801 }
6802 else
6803 {
6804 GCPtrTop = uTmpRsp.Words.w0;
6805 uTmpRsp.Words.w0 += cbItem;
6806 }
6807 *puNewRsp = uTmpRsp.u;
6808 return GCPtrTop;
6809}
6810
6811
6812/**
6813 * Calculates the effective stack address for a push of the specified size as
6814 * well as the new temporary RSP value (upper bits may be masked).
6815 *
6816 * @returns Effective stack addressf for the push.
6817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6818 * @param pTmpRsp The temporary stack pointer. This is updated.
6819 * @param cbItem The size of the stack item to pop.
6820 */
6821DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6822{
6823 RTGCPTR GCPtrTop;
6824
6825 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6826 GCPtrTop = pTmpRsp->u -= cbItem;
6827 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6828 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6829 else
6830 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6831 return GCPtrTop;
6832}
6833
6834
6835/**
6836 * Gets the effective stack address for a pop of the specified size and
6837 * calculates and updates the temporary RSP.
6838 *
6839 * @returns Current stack pointer.
6840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6841 * @param pTmpRsp The temporary stack pointer. This is updated.
6842 * @param cbItem The size of the stack item to pop.
6843 */
6844DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6845{
6846 RTGCPTR GCPtrTop;
6847 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6848 {
6849 GCPtrTop = pTmpRsp->u;
6850 pTmpRsp->u += cbItem;
6851 }
6852 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6853 {
6854 GCPtrTop = pTmpRsp->DWords.dw0;
6855 pTmpRsp->DWords.dw0 += cbItem;
6856 }
6857 else
6858 {
6859 GCPtrTop = pTmpRsp->Words.w0;
6860 pTmpRsp->Words.w0 += cbItem;
6861 }
6862 return GCPtrTop;
6863}
6864
6865/** @} */
6866
6867
6868/** @name FPU access and helpers.
6869 *
6870 * @{
6871 */
6872
6873
6874/**
6875 * Hook for preparing to use the host FPU.
6876 *
6877 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6878 *
6879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6880 */
6881DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
6882{
6883#ifdef IN_RING3
6884 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6885#else
6886 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6887#endif
6888 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6889}
6890
6891
6892/**
6893 * Hook for preparing to use the host FPU for SSE.
6894 *
6895 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6896 *
6897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6898 */
6899DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
6900{
6901 iemFpuPrepareUsage(pVCpu);
6902}
6903
6904
6905/**
6906 * Hook for preparing to use the host FPU for AVX.
6907 *
6908 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6909 *
6910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6911 */
6912DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
6913{
6914 iemFpuPrepareUsage(pVCpu);
6915}
6916
6917
6918/**
6919 * Hook for actualizing the guest FPU state before the interpreter reads it.
6920 *
6921 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6922 *
6923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6924 */
6925DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
6926{
6927#ifdef IN_RING3
6928 NOREF(pVCpu);
6929#else
6930 CPUMRZFpuStateActualizeForRead(pVCpu);
6931#endif
6932 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6933}
6934
6935
6936/**
6937 * Hook for actualizing the guest FPU state before the interpreter changes it.
6938 *
6939 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6940 *
6941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6942 */
6943DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
6944{
6945#ifdef IN_RING3
6946 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6947#else
6948 CPUMRZFpuStateActualizeForChange(pVCpu);
6949#endif
6950 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6951}
6952
6953
6954/**
6955 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6956 * only.
6957 *
6958 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6959 *
6960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6961 */
6962DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
6963{
6964#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6965 NOREF(pVCpu);
6966#else
6967 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6968#endif
6969 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6970}
6971
6972
6973/**
6974 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6975 * read+write.
6976 *
6977 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6978 *
6979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6980 */
6981DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
6982{
6983#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6984 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6985#else
6986 CPUMRZFpuStateActualizeForChange(pVCpu);
6987#endif
6988 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6989}
6990
6991
6992/**
6993 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6994 * only.
6995 *
6996 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6997 *
6998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6999 */
7000DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7001{
7002#ifdef IN_RING3
7003 NOREF(pVCpu);
7004#else
7005 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7006#endif
7007 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7008}
7009
7010
7011/**
7012 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7013 * read+write.
7014 *
7015 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7016 *
7017 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7018 */
7019DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7020{
7021#ifdef IN_RING3
7022 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7023#else
7024 CPUMRZFpuStateActualizeForChange(pVCpu);
7025#endif
7026 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7027}
7028
7029
7030/**
7031 * Stores a QNaN value into a FPU register.
7032 *
7033 * @param pReg Pointer to the register.
7034 */
7035DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7036{
7037 pReg->au32[0] = UINT32_C(0x00000000);
7038 pReg->au32[1] = UINT32_C(0xc0000000);
7039 pReg->au16[4] = UINT16_C(0xffff);
7040}
7041
7042
7043/**
7044 * Updates the FOP, FPU.CS and FPUIP registers.
7045 *
7046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7047 * @param pFpuCtx The FPU context.
7048 */
7049DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7050{
7051 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7052 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7053 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7054 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7055 {
7056 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7057 * happens in real mode here based on the fnsave and fnstenv images. */
7058 pFpuCtx->CS = 0;
7059 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7060 }
7061 else
7062 {
7063 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7064 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7065 }
7066}
7067
7068
7069/**
7070 * Updates the x87.DS and FPUDP registers.
7071 *
7072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7073 * @param pFpuCtx The FPU context.
7074 * @param iEffSeg The effective segment register.
7075 * @param GCPtrEff The effective address relative to @a iEffSeg.
7076 */
7077DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7078{
7079 RTSEL sel;
7080 switch (iEffSeg)
7081 {
7082 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7083 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7084 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7085 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7086 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7087 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7088 default:
7089 AssertMsgFailed(("%d\n", iEffSeg));
7090 sel = pVCpu->cpum.GstCtx.ds.Sel;
7091 }
7092 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7093 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7094 {
7095 pFpuCtx->DS = 0;
7096 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7097 }
7098 else
7099 {
7100 pFpuCtx->DS = sel;
7101 pFpuCtx->FPUDP = GCPtrEff;
7102 }
7103}
7104
7105
7106/**
7107 * Rotates the stack registers in the push direction.
7108 *
7109 * @param pFpuCtx The FPU context.
7110 * @remarks This is a complete waste of time, but fxsave stores the registers in
7111 * stack order.
7112 */
7113DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7114{
7115 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7116 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7117 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7118 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7119 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7120 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7121 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7122 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7123 pFpuCtx->aRegs[0].r80 = r80Tmp;
7124}
7125
7126
7127/**
7128 * Rotates the stack registers in the pop direction.
7129 *
7130 * @param pFpuCtx The FPU context.
7131 * @remarks This is a complete waste of time, but fxsave stores the registers in
7132 * stack order.
7133 */
7134DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7135{
7136 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7137 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7138 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7139 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7140 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7141 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7142 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7143 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7144 pFpuCtx->aRegs[7].r80 = r80Tmp;
7145}
7146
7147
7148/**
7149 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7150 * exception prevents it.
7151 *
7152 * @param pResult The FPU operation result to push.
7153 * @param pFpuCtx The FPU context.
7154 */
7155IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7156{
7157 /* Update FSW and bail if there are pending exceptions afterwards. */
7158 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7159 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7160 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7161 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7162 {
7163 pFpuCtx->FSW = fFsw;
7164 return;
7165 }
7166
7167 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7168 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7169 {
7170 /* All is fine, push the actual value. */
7171 pFpuCtx->FTW |= RT_BIT(iNewTop);
7172 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7173 }
7174 else if (pFpuCtx->FCW & X86_FCW_IM)
7175 {
7176 /* Masked stack overflow, push QNaN. */
7177 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7178 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7179 }
7180 else
7181 {
7182 /* Raise stack overflow, don't push anything. */
7183 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7184 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7185 return;
7186 }
7187
7188 fFsw &= ~X86_FSW_TOP_MASK;
7189 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7190 pFpuCtx->FSW = fFsw;
7191
7192 iemFpuRotateStackPush(pFpuCtx);
7193}
7194
7195
7196/**
7197 * Stores a result in a FPU register and updates the FSW and FTW.
7198 *
7199 * @param pFpuCtx The FPU context.
7200 * @param pResult The result to store.
7201 * @param iStReg Which FPU register to store it in.
7202 */
7203IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7204{
7205 Assert(iStReg < 8);
7206 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7207 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7208 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7209 pFpuCtx->FTW |= RT_BIT(iReg);
7210 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7211}
7212
7213
7214/**
7215 * Only updates the FPU status word (FSW) with the result of the current
7216 * instruction.
7217 *
7218 * @param pFpuCtx The FPU context.
7219 * @param u16FSW The FSW output of the current instruction.
7220 */
7221IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7222{
7223 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7224 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7225}
7226
7227
7228/**
7229 * Pops one item off the FPU stack if no pending exception prevents it.
7230 *
7231 * @param pFpuCtx The FPU context.
7232 */
7233IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7234{
7235 /* Check pending exceptions. */
7236 uint16_t uFSW = pFpuCtx->FSW;
7237 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7238 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7239 return;
7240
7241 /* TOP--. */
7242 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7243 uFSW &= ~X86_FSW_TOP_MASK;
7244 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7245 pFpuCtx->FSW = uFSW;
7246
7247 /* Mark the previous ST0 as empty. */
7248 iOldTop >>= X86_FSW_TOP_SHIFT;
7249 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7250
7251 /* Rotate the registers. */
7252 iemFpuRotateStackPop(pFpuCtx);
7253}
7254
7255
7256/**
7257 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7258 *
7259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7260 * @param pResult The FPU operation result to push.
7261 */
7262IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7263{
7264 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7265 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7266 iemFpuMaybePushResult(pResult, pFpuCtx);
7267}
7268
7269
7270/**
7271 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7272 * and sets FPUDP and FPUDS.
7273 *
7274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7275 * @param pResult The FPU operation result to push.
7276 * @param iEffSeg The effective segment register.
7277 * @param GCPtrEff The effective address relative to @a iEffSeg.
7278 */
7279IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7280{
7281 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7282 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7283 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7284 iemFpuMaybePushResult(pResult, pFpuCtx);
7285}
7286
7287
7288/**
7289 * Replace ST0 with the first value and push the second onto the FPU stack,
7290 * unless a pending exception prevents it.
7291 *
7292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7293 * @param pResult The FPU operation result to store and push.
7294 */
7295IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7296{
7297 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7298 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7299
7300 /* Update FSW and bail if there are pending exceptions afterwards. */
7301 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7302 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7303 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7304 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7305 {
7306 pFpuCtx->FSW = fFsw;
7307 return;
7308 }
7309
7310 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7311 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7312 {
7313 /* All is fine, push the actual value. */
7314 pFpuCtx->FTW |= RT_BIT(iNewTop);
7315 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7316 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7317 }
7318 else if (pFpuCtx->FCW & X86_FCW_IM)
7319 {
7320 /* Masked stack overflow, push QNaN. */
7321 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7322 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7323 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7324 }
7325 else
7326 {
7327 /* Raise stack overflow, don't push anything. */
7328 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7329 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7330 return;
7331 }
7332
7333 fFsw &= ~X86_FSW_TOP_MASK;
7334 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7335 pFpuCtx->FSW = fFsw;
7336
7337 iemFpuRotateStackPush(pFpuCtx);
7338}
7339
7340
7341/**
7342 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7343 * FOP.
7344 *
7345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7346 * @param pResult The result to store.
7347 * @param iStReg Which FPU register to store it in.
7348 */
7349IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7350{
7351 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7352 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7353 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7354}
7355
7356
7357/**
7358 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7359 * FOP, and then pops the stack.
7360 *
7361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7362 * @param pResult The result to store.
7363 * @param iStReg Which FPU register to store it in.
7364 */
7365IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7366{
7367 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7368 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7369 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7370 iemFpuMaybePopOne(pFpuCtx);
7371}
7372
7373
7374/**
7375 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7376 * FPUDP, and FPUDS.
7377 *
7378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7379 * @param pResult The result to store.
7380 * @param iStReg Which FPU register to store it in.
7381 * @param iEffSeg The effective memory operand selector register.
7382 * @param GCPtrEff The effective memory operand offset.
7383 */
7384IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7385 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7386{
7387 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7388 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7389 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7390 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7391}
7392
7393
7394/**
7395 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7396 * FPUDP, and FPUDS, and then pops the stack.
7397 *
7398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7399 * @param pResult The result to store.
7400 * @param iStReg Which FPU register to store it in.
7401 * @param iEffSeg The effective memory operand selector register.
7402 * @param GCPtrEff The effective memory operand offset.
7403 */
7404IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7405 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7406{
7407 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7408 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7409 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7410 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7411 iemFpuMaybePopOne(pFpuCtx);
7412}
7413
7414
7415/**
7416 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7417 *
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 */
7420IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7421{
7422 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7423 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7424}
7425
7426
7427/**
7428 * Marks the specified stack register as free (for FFREE).
7429 *
7430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7431 * @param iStReg The register to free.
7432 */
7433IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7434{
7435 Assert(iStReg < 8);
7436 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7437 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7438 pFpuCtx->FTW &= ~RT_BIT(iReg);
7439}
7440
7441
7442/**
7443 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7444 *
7445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7446 */
7447IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
7448{
7449 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7450 uint16_t uFsw = pFpuCtx->FSW;
7451 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7452 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7453 uFsw &= ~X86_FSW_TOP_MASK;
7454 uFsw |= uTop;
7455 pFpuCtx->FSW = uFsw;
7456}
7457
7458
7459/**
7460 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7461 *
7462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7463 */
7464IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
7465{
7466 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7467 uint16_t uFsw = pFpuCtx->FSW;
7468 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7469 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7470 uFsw &= ~X86_FSW_TOP_MASK;
7471 uFsw |= uTop;
7472 pFpuCtx->FSW = uFsw;
7473}
7474
7475
7476/**
7477 * Updates the FSW, FOP, FPUIP, and FPUCS.
7478 *
7479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7480 * @param u16FSW The FSW from the current instruction.
7481 */
7482IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
7483{
7484 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7485 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7486 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7487}
7488
7489
7490/**
7491 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7492 *
7493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7494 * @param u16FSW The FSW from the current instruction.
7495 */
7496IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7497{
7498 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7499 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7500 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7501 iemFpuMaybePopOne(pFpuCtx);
7502}
7503
7504
7505/**
7506 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7507 *
7508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7509 * @param u16FSW The FSW from the current instruction.
7510 * @param iEffSeg The effective memory operand selector register.
7511 * @param GCPtrEff The effective memory operand offset.
7512 */
7513IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7514{
7515 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7516 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7517 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7518 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7519}
7520
7521
7522/**
7523 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7524 *
7525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7526 * @param u16FSW The FSW from the current instruction.
7527 */
7528IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7529{
7530 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7531 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7532 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7533 iemFpuMaybePopOne(pFpuCtx);
7534 iemFpuMaybePopOne(pFpuCtx);
7535}
7536
7537
7538/**
7539 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7540 *
7541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7542 * @param u16FSW The FSW from the current instruction.
7543 * @param iEffSeg The effective memory operand selector register.
7544 * @param GCPtrEff The effective memory operand offset.
7545 */
7546IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7547{
7548 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7549 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7550 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7551 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7552 iemFpuMaybePopOne(pFpuCtx);
7553}
7554
7555
7556/**
7557 * Worker routine for raising an FPU stack underflow exception.
7558 *
7559 * @param pFpuCtx The FPU context.
7560 * @param iStReg The stack register being accessed.
7561 */
7562IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7563{
7564 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7565 if (pFpuCtx->FCW & X86_FCW_IM)
7566 {
7567 /* Masked underflow. */
7568 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7569 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7570 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7571 if (iStReg != UINT8_MAX)
7572 {
7573 pFpuCtx->FTW |= RT_BIT(iReg);
7574 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7575 }
7576 }
7577 else
7578 {
7579 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7580 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7581 }
7582}
7583
7584
7585/**
7586 * Raises a FPU stack underflow exception.
7587 *
7588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7589 * @param iStReg The destination register that should be loaded
7590 * with QNaN if \#IS is not masked. Specify
7591 * UINT8_MAX if none (like for fcom).
7592 */
7593DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
7594{
7595 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7596 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7597 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7598}
7599
7600
7601DECL_NO_INLINE(IEM_STATIC, void)
7602iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7603{
7604 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7605 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7606 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7607 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7608}
7609
7610
7611DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
7612{
7613 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7614 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7615 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7616 iemFpuMaybePopOne(pFpuCtx);
7617}
7618
7619
7620DECL_NO_INLINE(IEM_STATIC, void)
7621iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7622{
7623 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7624 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7625 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7626 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7627 iemFpuMaybePopOne(pFpuCtx);
7628}
7629
7630
7631DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
7632{
7633 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7634 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7635 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7636 iemFpuMaybePopOne(pFpuCtx);
7637 iemFpuMaybePopOne(pFpuCtx);
7638}
7639
7640
7641DECL_NO_INLINE(IEM_STATIC, void)
7642iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
7643{
7644 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7645 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7646
7647 if (pFpuCtx->FCW & X86_FCW_IM)
7648 {
7649 /* Masked overflow - Push QNaN. */
7650 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7651 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7652 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7653 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7654 pFpuCtx->FTW |= RT_BIT(iNewTop);
7655 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7656 iemFpuRotateStackPush(pFpuCtx);
7657 }
7658 else
7659 {
7660 /* Exception pending - don't change TOP or the register stack. */
7661 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7662 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7663 }
7664}
7665
7666
7667DECL_NO_INLINE(IEM_STATIC, void)
7668iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
7669{
7670 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7671 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7672
7673 if (pFpuCtx->FCW & X86_FCW_IM)
7674 {
7675 /* Masked overflow - Push QNaN. */
7676 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7677 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7678 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7679 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7680 pFpuCtx->FTW |= RT_BIT(iNewTop);
7681 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7682 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7683 iemFpuRotateStackPush(pFpuCtx);
7684 }
7685 else
7686 {
7687 /* Exception pending - don't change TOP or the register stack. */
7688 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7689 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7690 }
7691}
7692
7693
7694/**
7695 * Worker routine for raising an FPU stack overflow exception on a push.
7696 *
7697 * @param pFpuCtx The FPU context.
7698 */
7699IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7700{
7701 if (pFpuCtx->FCW & X86_FCW_IM)
7702 {
7703 /* Masked overflow. */
7704 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7705 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7706 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7707 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7708 pFpuCtx->FTW |= RT_BIT(iNewTop);
7709 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7710 iemFpuRotateStackPush(pFpuCtx);
7711 }
7712 else
7713 {
7714 /* Exception pending - don't change TOP or the register stack. */
7715 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7716 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7717 }
7718}
7719
7720
7721/**
7722 * Raises a FPU stack overflow exception on a push.
7723 *
7724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7725 */
7726DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
7727{
7728 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7729 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7730 iemFpuStackPushOverflowOnly(pFpuCtx);
7731}
7732
7733
7734/**
7735 * Raises a FPU stack overflow exception on a push with a memory operand.
7736 *
7737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7738 * @param iEffSeg The effective memory operand selector register.
7739 * @param GCPtrEff The effective memory operand offset.
7740 */
7741DECL_NO_INLINE(IEM_STATIC, void)
7742iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7743{
7744 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7745 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7746 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7747 iemFpuStackPushOverflowOnly(pFpuCtx);
7748}
7749
7750
7751IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
7752{
7753 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7754 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7755 if (pFpuCtx->FTW & RT_BIT(iReg))
7756 return VINF_SUCCESS;
7757 return VERR_NOT_FOUND;
7758}
7759
7760
7761IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7762{
7763 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7764 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7765 if (pFpuCtx->FTW & RT_BIT(iReg))
7766 {
7767 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7768 return VINF_SUCCESS;
7769 }
7770 return VERR_NOT_FOUND;
7771}
7772
7773
7774IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7775 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7776{
7777 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7778 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7779 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7780 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7781 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7782 {
7783 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7784 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7785 return VINF_SUCCESS;
7786 }
7787 return VERR_NOT_FOUND;
7788}
7789
7790
7791IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7792{
7793 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7794 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7795 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7796 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7797 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7798 {
7799 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7800 return VINF_SUCCESS;
7801 }
7802 return VERR_NOT_FOUND;
7803}
7804
7805
7806/**
7807 * Updates the FPU exception status after FCW is changed.
7808 *
7809 * @param pFpuCtx The FPU context.
7810 */
7811IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7812{
7813 uint16_t u16Fsw = pFpuCtx->FSW;
7814 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7815 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7816 else
7817 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7818 pFpuCtx->FSW = u16Fsw;
7819}
7820
7821
7822/**
7823 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7824 *
7825 * @returns The full FTW.
7826 * @param pFpuCtx The FPU context.
7827 */
7828IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7829{
7830 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7831 uint16_t u16Ftw = 0;
7832 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7833 for (unsigned iSt = 0; iSt < 8; iSt++)
7834 {
7835 unsigned const iReg = (iSt + iTop) & 7;
7836 if (!(u8Ftw & RT_BIT(iReg)))
7837 u16Ftw |= 3 << (iReg * 2); /* empty */
7838 else
7839 {
7840 uint16_t uTag;
7841 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7842 if (pr80Reg->s.uExponent == 0x7fff)
7843 uTag = 2; /* Exponent is all 1's => Special. */
7844 else if (pr80Reg->s.uExponent == 0x0000)
7845 {
7846 if (pr80Reg->s.u64Mantissa == 0x0000)
7847 uTag = 1; /* All bits are zero => Zero. */
7848 else
7849 uTag = 2; /* Must be special. */
7850 }
7851 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7852 uTag = 0; /* Valid. */
7853 else
7854 uTag = 2; /* Must be special. */
7855
7856 u16Ftw |= uTag << (iReg * 2); /* empty */
7857 }
7858 }
7859
7860 return u16Ftw;
7861}
7862
7863
7864/**
7865 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7866 *
7867 * @returns The compressed FTW.
7868 * @param u16FullFtw The full FTW to convert.
7869 */
7870IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7871{
7872 uint8_t u8Ftw = 0;
7873 for (unsigned i = 0; i < 8; i++)
7874 {
7875 if ((u16FullFtw & 3) != 3 /*empty*/)
7876 u8Ftw |= RT_BIT(i);
7877 u16FullFtw >>= 2;
7878 }
7879
7880 return u8Ftw;
7881}
7882
7883/** @} */
7884
7885
7886/** @name Memory access.
7887 *
7888 * @{
7889 */
7890
7891
7892/**
7893 * Updates the IEMCPU::cbWritten counter if applicable.
7894 *
7895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7896 * @param fAccess The access being accounted for.
7897 * @param cbMem The access size.
7898 */
7899DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
7900{
7901 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7902 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7903 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7904}
7905
7906
7907/**
7908 * Checks if the given segment can be written to, raise the appropriate
7909 * exception if not.
7910 *
7911 * @returns VBox strict status code.
7912 *
7913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7914 * @param pHid Pointer to the hidden register.
7915 * @param iSegReg The register number.
7916 * @param pu64BaseAddr Where to return the base address to use for the
7917 * segment. (In 64-bit code it may differ from the
7918 * base in the hidden segment.)
7919 */
7920IEM_STATIC VBOXSTRICTRC
7921iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7922{
7923 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7924
7925 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7926 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7927 else
7928 {
7929 if (!pHid->Attr.n.u1Present)
7930 {
7931 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7932 AssertRelease(uSel == 0);
7933 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7934 return iemRaiseGeneralProtectionFault0(pVCpu);
7935 }
7936
7937 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7938 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7939 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7940 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7941 *pu64BaseAddr = pHid->u64Base;
7942 }
7943 return VINF_SUCCESS;
7944}
7945
7946
7947/**
7948 * Checks if the given segment can be read from, raise the appropriate
7949 * exception if not.
7950 *
7951 * @returns VBox strict status code.
7952 *
7953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7954 * @param pHid Pointer to the hidden register.
7955 * @param iSegReg The register number.
7956 * @param pu64BaseAddr Where to return the base address to use for the
7957 * segment. (In 64-bit code it may differ from the
7958 * base in the hidden segment.)
7959 */
7960IEM_STATIC VBOXSTRICTRC
7961iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7962{
7963 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7964
7965 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7966 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7967 else
7968 {
7969 if (!pHid->Attr.n.u1Present)
7970 {
7971 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7972 AssertRelease(uSel == 0);
7973 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7974 return iemRaiseGeneralProtectionFault0(pVCpu);
7975 }
7976
7977 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7978 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7979 *pu64BaseAddr = pHid->u64Base;
7980 }
7981 return VINF_SUCCESS;
7982}
7983
7984
7985/**
7986 * Applies the segment limit, base and attributes.
7987 *
7988 * This may raise a \#GP or \#SS.
7989 *
7990 * @returns VBox strict status code.
7991 *
7992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7993 * @param fAccess The kind of access which is being performed.
7994 * @param iSegReg The index of the segment register to apply.
7995 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7996 * TSS, ++).
7997 * @param cbMem The access size.
7998 * @param pGCPtrMem Pointer to the guest memory address to apply
7999 * segmentation to. Input and output parameter.
8000 */
8001IEM_STATIC VBOXSTRICTRC
8002iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8003{
8004 if (iSegReg == UINT8_MAX)
8005 return VINF_SUCCESS;
8006
8007 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8008 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8009 switch (pVCpu->iem.s.enmCpuMode)
8010 {
8011 case IEMMODE_16BIT:
8012 case IEMMODE_32BIT:
8013 {
8014 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8015 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8016
8017 if ( pSel->Attr.n.u1Present
8018 && !pSel->Attr.n.u1Unusable)
8019 {
8020 Assert(pSel->Attr.n.u1DescType);
8021 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8022 {
8023 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8024 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8025 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8026
8027 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8028 {
8029 /** @todo CPL check. */
8030 }
8031
8032 /*
8033 * There are two kinds of data selectors, normal and expand down.
8034 */
8035 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8036 {
8037 if ( GCPtrFirst32 > pSel->u32Limit
8038 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8039 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8040 }
8041 else
8042 {
8043 /*
8044 * The upper boundary is defined by the B bit, not the G bit!
8045 */
8046 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8047 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8048 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8049 }
8050 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8051 }
8052 else
8053 {
8054
8055 /*
8056 * Code selector and usually be used to read thru, writing is
8057 * only permitted in real and V8086 mode.
8058 */
8059 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8060 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8061 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8062 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8063 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8064
8065 if ( GCPtrFirst32 > pSel->u32Limit
8066 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8067 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8068
8069 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8070 {
8071 /** @todo CPL check. */
8072 }
8073
8074 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8075 }
8076 }
8077 else
8078 return iemRaiseGeneralProtectionFault0(pVCpu);
8079 return VINF_SUCCESS;
8080 }
8081
8082 case IEMMODE_64BIT:
8083 {
8084 RTGCPTR GCPtrMem = *pGCPtrMem;
8085 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8086 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8087
8088 Assert(cbMem >= 1);
8089 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8090 return VINF_SUCCESS;
8091 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8092 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8093 return iemRaiseGeneralProtectionFault0(pVCpu);
8094 }
8095
8096 default:
8097 AssertFailedReturn(VERR_IEM_IPE_7);
8098 }
8099}
8100
8101
8102/**
8103 * Translates a virtual address to a physical physical address and checks if we
8104 * can access the page as specified.
8105 *
8106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8107 * @param GCPtrMem The virtual address.
8108 * @param fAccess The intended access.
8109 * @param pGCPhysMem Where to return the physical address.
8110 */
8111IEM_STATIC VBOXSTRICTRC
8112iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8113{
8114 /** @todo Need a different PGM interface here. We're currently using
8115 * generic / REM interfaces. this won't cut it for R0. */
8116 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8117 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
8118 * here. */
8119 RTGCPHYS GCPhys;
8120 uint64_t fFlags;
8121 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8122 if (RT_FAILURE(rc))
8123 {
8124 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8125 /** @todo Check unassigned memory in unpaged mode. */
8126 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8127 *pGCPhysMem = NIL_RTGCPHYS;
8128 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8129 }
8130
8131 /* If the page is writable and does not have the no-exec bit set, all
8132 access is allowed. Otherwise we'll have to check more carefully... */
8133 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8134 {
8135 /* Write to read only memory? */
8136 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8137 && !(fFlags & X86_PTE_RW)
8138 && ( (pVCpu->iem.s.uCpl == 3
8139 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8140 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8141 {
8142 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8143 *pGCPhysMem = NIL_RTGCPHYS;
8144 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8145 }
8146
8147 /* Kernel memory accessed by userland? */
8148 if ( !(fFlags & X86_PTE_US)
8149 && pVCpu->iem.s.uCpl == 3
8150 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8151 {
8152 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8153 *pGCPhysMem = NIL_RTGCPHYS;
8154 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8155 }
8156
8157 /* Executing non-executable memory? */
8158 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8159 && (fFlags & X86_PTE_PAE_NX)
8160 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8161 {
8162 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8163 *pGCPhysMem = NIL_RTGCPHYS;
8164 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8165 VERR_ACCESS_DENIED);
8166 }
8167 }
8168
8169 /*
8170 * Set the dirty / access flags.
8171 * ASSUMES this is set when the address is translated rather than on committ...
8172 */
8173 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8174 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8175 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8176 {
8177 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8178 AssertRC(rc2);
8179 }
8180
8181 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8182 *pGCPhysMem = GCPhys;
8183 return VINF_SUCCESS;
8184}
8185
8186
8187
8188/**
8189 * Maps a physical page.
8190 *
8191 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8193 * @param GCPhysMem The physical address.
8194 * @param fAccess The intended access.
8195 * @param ppvMem Where to return the mapping address.
8196 * @param pLock The PGM lock.
8197 */
8198IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8199{
8200#ifdef IEM_LOG_MEMORY_WRITES
8201 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8202 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8203#endif
8204
8205 /** @todo This API may require some improving later. A private deal with PGM
8206 * regarding locking and unlocking needs to be struct. A couple of TLBs
8207 * living in PGM, but with publicly accessible inlined access methods
8208 * could perhaps be an even better solution. */
8209 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8210 GCPhysMem,
8211 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8212 pVCpu->iem.s.fBypassHandlers,
8213 ppvMem,
8214 pLock);
8215 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8216 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8217
8218 return rc;
8219}
8220
8221
8222/**
8223 * Unmap a page previously mapped by iemMemPageMap.
8224 *
8225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8226 * @param GCPhysMem The physical address.
8227 * @param fAccess The intended access.
8228 * @param pvMem What iemMemPageMap returned.
8229 * @param pLock The PGM lock.
8230 */
8231DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8232{
8233 NOREF(pVCpu);
8234 NOREF(GCPhysMem);
8235 NOREF(fAccess);
8236 NOREF(pvMem);
8237 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8238}
8239
8240
8241/**
8242 * Looks up a memory mapping entry.
8243 *
8244 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8246 * @param pvMem The memory address.
8247 * @param fAccess The access to.
8248 */
8249DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8250{
8251 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8252 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8253 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8254 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8255 return 0;
8256 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8257 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8258 return 1;
8259 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8260 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8261 return 2;
8262 return VERR_NOT_FOUND;
8263}
8264
8265
8266/**
8267 * Finds a free memmap entry when using iNextMapping doesn't work.
8268 *
8269 * @returns Memory mapping index, 1024 on failure.
8270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8271 */
8272IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8273{
8274 /*
8275 * The easy case.
8276 */
8277 if (pVCpu->iem.s.cActiveMappings == 0)
8278 {
8279 pVCpu->iem.s.iNextMapping = 1;
8280 return 0;
8281 }
8282
8283 /* There should be enough mappings for all instructions. */
8284 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8285
8286 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8287 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8288 return i;
8289
8290 AssertFailedReturn(1024);
8291}
8292
8293
8294/**
8295 * Commits a bounce buffer that needs writing back and unmaps it.
8296 *
8297 * @returns Strict VBox status code.
8298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8299 * @param iMemMap The index of the buffer to commit.
8300 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8301 * Always false in ring-3, obviously.
8302 */
8303IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8304{
8305 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8306 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8307#ifdef IN_RING3
8308 Assert(!fPostponeFail);
8309 RT_NOREF_PV(fPostponeFail);
8310#endif
8311
8312 /*
8313 * Do the writing.
8314 */
8315 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8316 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8317 {
8318 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8319 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8320 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8321 if (!pVCpu->iem.s.fBypassHandlers)
8322 {
8323 /*
8324 * Carefully and efficiently dealing with access handler return
8325 * codes make this a little bloated.
8326 */
8327 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8328 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8329 pbBuf,
8330 cbFirst,
8331 PGMACCESSORIGIN_IEM);
8332 if (rcStrict == VINF_SUCCESS)
8333 {
8334 if (cbSecond)
8335 {
8336 rcStrict = PGMPhysWrite(pVM,
8337 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8338 pbBuf + cbFirst,
8339 cbSecond,
8340 PGMACCESSORIGIN_IEM);
8341 if (rcStrict == VINF_SUCCESS)
8342 { /* nothing */ }
8343 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8344 {
8345 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8346 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8347 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8348 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8349 }
8350#ifndef IN_RING3
8351 else if (fPostponeFail)
8352 {
8353 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8354 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8355 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8356 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8357 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8358 return iemSetPassUpStatus(pVCpu, rcStrict);
8359 }
8360#endif
8361 else
8362 {
8363 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8364 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8365 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8366 return rcStrict;
8367 }
8368 }
8369 }
8370 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8371 {
8372 if (!cbSecond)
8373 {
8374 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8375 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8376 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8377 }
8378 else
8379 {
8380 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8382 pbBuf + cbFirst,
8383 cbSecond,
8384 PGMACCESSORIGIN_IEM);
8385 if (rcStrict2 == VINF_SUCCESS)
8386 {
8387 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8388 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8389 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8390 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8391 }
8392 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8393 {
8394 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8395 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8396 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8397 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8398 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8399 }
8400#ifndef IN_RING3
8401 else if (fPostponeFail)
8402 {
8403 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8405 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8406 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8407 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8408 return iemSetPassUpStatus(pVCpu, rcStrict);
8409 }
8410#endif
8411 else
8412 {
8413 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8414 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8416 return rcStrict2;
8417 }
8418 }
8419 }
8420#ifndef IN_RING3
8421 else if (fPostponeFail)
8422 {
8423 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8426 if (!cbSecond)
8427 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8428 else
8429 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8430 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8431 return iemSetPassUpStatus(pVCpu, rcStrict);
8432 }
8433#endif
8434 else
8435 {
8436 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8437 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8439 return rcStrict;
8440 }
8441 }
8442 else
8443 {
8444 /*
8445 * No access handlers, much simpler.
8446 */
8447 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8448 if (RT_SUCCESS(rc))
8449 {
8450 if (cbSecond)
8451 {
8452 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8453 if (RT_SUCCESS(rc))
8454 { /* likely */ }
8455 else
8456 {
8457 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8459 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8460 return rc;
8461 }
8462 }
8463 }
8464 else
8465 {
8466 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8467 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8468 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8469 return rc;
8470 }
8471 }
8472 }
8473
8474#if defined(IEM_LOG_MEMORY_WRITES)
8475 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8476 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8477 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8478 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8479 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8480 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8481
8482 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8483 g_cbIemWrote = cbWrote;
8484 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8485#endif
8486
8487 /*
8488 * Free the mapping entry.
8489 */
8490 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8491 Assert(pVCpu->iem.s.cActiveMappings != 0);
8492 pVCpu->iem.s.cActiveMappings--;
8493 return VINF_SUCCESS;
8494}
8495
8496
8497/**
8498 * iemMemMap worker that deals with a request crossing pages.
8499 */
8500IEM_STATIC VBOXSTRICTRC
8501iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8502{
8503 /*
8504 * Do the address translations.
8505 */
8506 RTGCPHYS GCPhysFirst;
8507 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8508 if (rcStrict != VINF_SUCCESS)
8509 return rcStrict;
8510
8511 RTGCPHYS GCPhysSecond;
8512 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8513 fAccess, &GCPhysSecond);
8514 if (rcStrict != VINF_SUCCESS)
8515 return rcStrict;
8516 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8517
8518 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8519
8520 /*
8521 * Read in the current memory content if it's a read, execute or partial
8522 * write access.
8523 */
8524 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8525 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8526 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8527
8528 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8529 {
8530 if (!pVCpu->iem.s.fBypassHandlers)
8531 {
8532 /*
8533 * Must carefully deal with access handler status codes here,
8534 * makes the code a bit bloated.
8535 */
8536 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8537 if (rcStrict == VINF_SUCCESS)
8538 {
8539 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8540 if (rcStrict == VINF_SUCCESS)
8541 { /*likely */ }
8542 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8543 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8544 else
8545 {
8546 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8547 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8548 return rcStrict;
8549 }
8550 }
8551 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8552 {
8553 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8554 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8555 {
8556 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8557 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8558 }
8559 else
8560 {
8561 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8562 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8563 return rcStrict2;
8564 }
8565 }
8566 else
8567 {
8568 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8569 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8570 return rcStrict;
8571 }
8572 }
8573 else
8574 {
8575 /*
8576 * No informational status codes here, much more straight forward.
8577 */
8578 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8579 if (RT_SUCCESS(rc))
8580 {
8581 Assert(rc == VINF_SUCCESS);
8582 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8583 if (RT_SUCCESS(rc))
8584 Assert(rc == VINF_SUCCESS);
8585 else
8586 {
8587 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8588 return rc;
8589 }
8590 }
8591 else
8592 {
8593 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8594 return rc;
8595 }
8596 }
8597 }
8598#ifdef VBOX_STRICT
8599 else
8600 memset(pbBuf, 0xcc, cbMem);
8601 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8602 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8603#endif
8604
8605 /*
8606 * Commit the bounce buffer entry.
8607 */
8608 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8609 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8610 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8611 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8612 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8613 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8614 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8615 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8616 pVCpu->iem.s.cActiveMappings++;
8617
8618 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8619 *ppvMem = pbBuf;
8620 return VINF_SUCCESS;
8621}
8622
8623
8624/**
8625 * iemMemMap woker that deals with iemMemPageMap failures.
8626 */
8627IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8628 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8629{
8630 /*
8631 * Filter out conditions we can handle and the ones which shouldn't happen.
8632 */
8633 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8634 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8635 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8636 {
8637 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8638 return rcMap;
8639 }
8640 pVCpu->iem.s.cPotentialExits++;
8641
8642 /*
8643 * Read in the current memory content if it's a read, execute or partial
8644 * write access.
8645 */
8646 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8647 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8648 {
8649 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8650 memset(pbBuf, 0xff, cbMem);
8651 else
8652 {
8653 int rc;
8654 if (!pVCpu->iem.s.fBypassHandlers)
8655 {
8656 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8657 if (rcStrict == VINF_SUCCESS)
8658 { /* nothing */ }
8659 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8660 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8661 else
8662 {
8663 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8664 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8665 return rcStrict;
8666 }
8667 }
8668 else
8669 {
8670 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8671 if (RT_SUCCESS(rc))
8672 { /* likely */ }
8673 else
8674 {
8675 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8676 GCPhysFirst, rc));
8677 return rc;
8678 }
8679 }
8680 }
8681 }
8682#ifdef VBOX_STRICT
8683 else
8684 memset(pbBuf, 0xcc, cbMem);
8685#endif
8686#ifdef VBOX_STRICT
8687 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8688 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8689#endif
8690
8691 /*
8692 * Commit the bounce buffer entry.
8693 */
8694 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8695 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8696 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8697 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8698 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8699 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8700 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8701 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8702 pVCpu->iem.s.cActiveMappings++;
8703
8704 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8705 *ppvMem = pbBuf;
8706 return VINF_SUCCESS;
8707}
8708
8709
8710
8711/**
8712 * Maps the specified guest memory for the given kind of access.
8713 *
8714 * This may be using bounce buffering of the memory if it's crossing a page
8715 * boundary or if there is an access handler installed for any of it. Because
8716 * of lock prefix guarantees, we're in for some extra clutter when this
8717 * happens.
8718 *
8719 * This may raise a \#GP, \#SS, \#PF or \#AC.
8720 *
8721 * @returns VBox strict status code.
8722 *
8723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8724 * @param ppvMem Where to return the pointer to the mapped
8725 * memory.
8726 * @param cbMem The number of bytes to map. This is usually 1,
8727 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8728 * string operations it can be up to a page.
8729 * @param iSegReg The index of the segment register to use for
8730 * this access. The base and limits are checked.
8731 * Use UINT8_MAX to indicate that no segmentation
8732 * is required (for IDT, GDT and LDT accesses).
8733 * @param GCPtrMem The address of the guest memory.
8734 * @param fAccess How the memory is being accessed. The
8735 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8736 * how to map the memory, while the
8737 * IEM_ACCESS_WHAT_XXX bit is used when raising
8738 * exceptions.
8739 */
8740IEM_STATIC VBOXSTRICTRC
8741iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8742{
8743 /*
8744 * Check the input and figure out which mapping entry to use.
8745 */
8746 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8747 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8748 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8749
8750 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8751 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8752 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8753 {
8754 iMemMap = iemMemMapFindFree(pVCpu);
8755 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8756 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8757 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8758 pVCpu->iem.s.aMemMappings[2].fAccess),
8759 VERR_IEM_IPE_9);
8760 }
8761
8762 /*
8763 * Map the memory, checking that we can actually access it. If something
8764 * slightly complicated happens, fall back on bounce buffering.
8765 */
8766 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8767 if (rcStrict != VINF_SUCCESS)
8768 return rcStrict;
8769
8770 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8771 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8772
8773 RTGCPHYS GCPhysFirst;
8774 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8775 if (rcStrict != VINF_SUCCESS)
8776 return rcStrict;
8777
8778 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8779 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8780 if (fAccess & IEM_ACCESS_TYPE_READ)
8781 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8782
8783 void *pvMem;
8784 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8785 if (rcStrict != VINF_SUCCESS)
8786 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8787
8788 /*
8789 * Fill in the mapping table entry.
8790 */
8791 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8792 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8793 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8794 pVCpu->iem.s.cActiveMappings++;
8795
8796 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8797 *ppvMem = pvMem;
8798
8799 return VINF_SUCCESS;
8800}
8801
8802
8803/**
8804 * Commits the guest memory if bounce buffered and unmaps it.
8805 *
8806 * @returns Strict VBox status code.
8807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8808 * @param pvMem The mapping.
8809 * @param fAccess The kind of access.
8810 */
8811IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8812{
8813 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8814 AssertReturn(iMemMap >= 0, iMemMap);
8815
8816 /* If it's bounce buffered, we may need to write back the buffer. */
8817 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8818 {
8819 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8820 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8821 }
8822 /* Otherwise unlock it. */
8823 else
8824 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8825
8826 /* Free the entry. */
8827 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8828 Assert(pVCpu->iem.s.cActiveMappings != 0);
8829 pVCpu->iem.s.cActiveMappings--;
8830 return VINF_SUCCESS;
8831}
8832
8833#ifdef IEM_WITH_SETJMP
8834
8835/**
8836 * Maps the specified guest memory for the given kind of access, longjmp on
8837 * error.
8838 *
8839 * This may be using bounce buffering of the memory if it's crossing a page
8840 * boundary or if there is an access handler installed for any of it. Because
8841 * of lock prefix guarantees, we're in for some extra clutter when this
8842 * happens.
8843 *
8844 * This may raise a \#GP, \#SS, \#PF or \#AC.
8845 *
8846 * @returns Pointer to the mapped memory.
8847 *
8848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8849 * @param cbMem The number of bytes to map. This is usually 1,
8850 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8851 * string operations it can be up to a page.
8852 * @param iSegReg The index of the segment register to use for
8853 * this access. The base and limits are checked.
8854 * Use UINT8_MAX to indicate that no segmentation
8855 * is required (for IDT, GDT and LDT accesses).
8856 * @param GCPtrMem The address of the guest memory.
8857 * @param fAccess How the memory is being accessed. The
8858 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8859 * how to map the memory, while the
8860 * IEM_ACCESS_WHAT_XXX bit is used when raising
8861 * exceptions.
8862 */
8863IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8864{
8865 /*
8866 * Check the input and figure out which mapping entry to use.
8867 */
8868 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8869 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8870 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8871
8872 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8873 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8874 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8875 {
8876 iMemMap = iemMemMapFindFree(pVCpu);
8877 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8878 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8879 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8880 pVCpu->iem.s.aMemMappings[2].fAccess),
8881 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8882 }
8883
8884 /*
8885 * Map the memory, checking that we can actually access it. If something
8886 * slightly complicated happens, fall back on bounce buffering.
8887 */
8888 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8889 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8890 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8891
8892 /* Crossing a page boundary? */
8893 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8894 { /* No (likely). */ }
8895 else
8896 {
8897 void *pvMem;
8898 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8899 if (rcStrict == VINF_SUCCESS)
8900 return pvMem;
8901 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8902 }
8903
8904 RTGCPHYS GCPhysFirst;
8905 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8906 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8907 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8908
8909 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8910 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8911 if (fAccess & IEM_ACCESS_TYPE_READ)
8912 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8913
8914 void *pvMem;
8915 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8916 if (rcStrict == VINF_SUCCESS)
8917 { /* likely */ }
8918 else
8919 {
8920 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8921 if (rcStrict == VINF_SUCCESS)
8922 return pvMem;
8923 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8924 }
8925
8926 /*
8927 * Fill in the mapping table entry.
8928 */
8929 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8930 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8931 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8932 pVCpu->iem.s.cActiveMappings++;
8933
8934 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8935 return pvMem;
8936}
8937
8938
8939/**
8940 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8941 *
8942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8943 * @param pvMem The mapping.
8944 * @param fAccess The kind of access.
8945 */
8946IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8947{
8948 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8949 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8950
8951 /* If it's bounce buffered, we may need to write back the buffer. */
8952 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8953 {
8954 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8955 {
8956 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8957 if (rcStrict == VINF_SUCCESS)
8958 return;
8959 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8960 }
8961 }
8962 /* Otherwise unlock it. */
8963 else
8964 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8965
8966 /* Free the entry. */
8967 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8968 Assert(pVCpu->iem.s.cActiveMappings != 0);
8969 pVCpu->iem.s.cActiveMappings--;
8970}
8971
8972#endif /* IEM_WITH_SETJMP */
8973
8974#ifndef IN_RING3
8975/**
8976 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8977 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8978 *
8979 * Allows the instruction to be completed and retired, while the IEM user will
8980 * return to ring-3 immediately afterwards and do the postponed writes there.
8981 *
8982 * @returns VBox status code (no strict statuses). Caller must check
8983 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8984 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8985 * @param pvMem The mapping.
8986 * @param fAccess The kind of access.
8987 */
8988IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8989{
8990 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8991 AssertReturn(iMemMap >= 0, iMemMap);
8992
8993 /* If it's bounce buffered, we may need to write back the buffer. */
8994 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8995 {
8996 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8997 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8998 }
8999 /* Otherwise unlock it. */
9000 else
9001 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9002
9003 /* Free the entry. */
9004 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9005 Assert(pVCpu->iem.s.cActiveMappings != 0);
9006 pVCpu->iem.s.cActiveMappings--;
9007 return VINF_SUCCESS;
9008}
9009#endif
9010
9011
9012/**
9013 * Rollbacks mappings, releasing page locks and such.
9014 *
9015 * The caller shall only call this after checking cActiveMappings.
9016 *
9017 * @returns Strict VBox status code to pass up.
9018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9019 */
9020IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9021{
9022 Assert(pVCpu->iem.s.cActiveMappings > 0);
9023
9024 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9025 while (iMemMap-- > 0)
9026 {
9027 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9028 if (fAccess != IEM_ACCESS_INVALID)
9029 {
9030 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9031 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9032 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9033 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9034 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9035 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9036 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9037 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9038 pVCpu->iem.s.cActiveMappings--;
9039 }
9040 }
9041}
9042
9043
9044/**
9045 * Fetches a data byte.
9046 *
9047 * @returns Strict VBox status code.
9048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9049 * @param pu8Dst Where to return the byte.
9050 * @param iSegReg The index of the segment register to use for
9051 * this access. The base and limits are checked.
9052 * @param GCPtrMem The address of the guest memory.
9053 */
9054IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9055{
9056 /* The lazy approach for now... */
9057 uint8_t const *pu8Src;
9058 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9059 if (rc == VINF_SUCCESS)
9060 {
9061 *pu8Dst = *pu8Src;
9062 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9063 }
9064 return rc;
9065}
9066
9067
9068#ifdef IEM_WITH_SETJMP
9069/**
9070 * Fetches a data byte, longjmp on error.
9071 *
9072 * @returns The byte.
9073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9074 * @param iSegReg The index of the segment register to use for
9075 * this access. The base and limits are checked.
9076 * @param GCPtrMem The address of the guest memory.
9077 */
9078DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9079{
9080 /* The lazy approach for now... */
9081 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9082 uint8_t const bRet = *pu8Src;
9083 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9084 return bRet;
9085}
9086#endif /* IEM_WITH_SETJMP */
9087
9088
9089/**
9090 * Fetches a data word.
9091 *
9092 * @returns Strict VBox status code.
9093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9094 * @param pu16Dst Where to return the word.
9095 * @param iSegReg The index of the segment register to use for
9096 * this access. The base and limits are checked.
9097 * @param GCPtrMem The address of the guest memory.
9098 */
9099IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9100{
9101 /* The lazy approach for now... */
9102 uint16_t const *pu16Src;
9103 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9104 if (rc == VINF_SUCCESS)
9105 {
9106 *pu16Dst = *pu16Src;
9107 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9108 }
9109 return rc;
9110}
9111
9112
9113#ifdef IEM_WITH_SETJMP
9114/**
9115 * Fetches a data word, longjmp on error.
9116 *
9117 * @returns The word
9118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9119 * @param iSegReg The index of the segment register to use for
9120 * this access. The base and limits are checked.
9121 * @param GCPtrMem The address of the guest memory.
9122 */
9123DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9124{
9125 /* The lazy approach for now... */
9126 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9127 uint16_t const u16Ret = *pu16Src;
9128 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9129 return u16Ret;
9130}
9131#endif
9132
9133
9134/**
9135 * Fetches a data dword.
9136 *
9137 * @returns Strict VBox status code.
9138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9139 * @param pu32Dst Where to return the dword.
9140 * @param iSegReg The index of the segment register to use for
9141 * this access. The base and limits are checked.
9142 * @param GCPtrMem The address of the guest memory.
9143 */
9144IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9145{
9146 /* The lazy approach for now... */
9147 uint32_t const *pu32Src;
9148 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9149 if (rc == VINF_SUCCESS)
9150 {
9151 *pu32Dst = *pu32Src;
9152 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9153 }
9154 return rc;
9155}
9156
9157
9158#ifdef IEM_WITH_SETJMP
9159
9160IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9161{
9162 Assert(cbMem >= 1);
9163 Assert(iSegReg < X86_SREG_COUNT);
9164
9165 /*
9166 * 64-bit mode is simpler.
9167 */
9168 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9169 {
9170 if (iSegReg >= X86_SREG_FS)
9171 {
9172 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9173 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9174 GCPtrMem += pSel->u64Base;
9175 }
9176
9177 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9178 return GCPtrMem;
9179 }
9180 /*
9181 * 16-bit and 32-bit segmentation.
9182 */
9183 else
9184 {
9185 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9186 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9187 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9188 == X86DESCATTR_P /* data, expand up */
9189 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9190 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9191 {
9192 /* expand up */
9193 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9194 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9195 && GCPtrLast32 > (uint32_t)GCPtrMem))
9196 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9197 }
9198 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9199 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9200 {
9201 /* expand down */
9202 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9203 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9204 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9205 && GCPtrLast32 > (uint32_t)GCPtrMem))
9206 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9207 }
9208 else
9209 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9210 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9211 }
9212 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9213}
9214
9215
9216IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9217{
9218 Assert(cbMem >= 1);
9219 Assert(iSegReg < X86_SREG_COUNT);
9220
9221 /*
9222 * 64-bit mode is simpler.
9223 */
9224 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9225 {
9226 if (iSegReg >= X86_SREG_FS)
9227 {
9228 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9229 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9230 GCPtrMem += pSel->u64Base;
9231 }
9232
9233 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9234 return GCPtrMem;
9235 }
9236 /*
9237 * 16-bit and 32-bit segmentation.
9238 */
9239 else
9240 {
9241 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9242 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9243 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9244 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9245 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9246 {
9247 /* expand up */
9248 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9249 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9250 && GCPtrLast32 > (uint32_t)GCPtrMem))
9251 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9252 }
9253 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9254 {
9255 /* expand down */
9256 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9257 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9258 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9259 && GCPtrLast32 > (uint32_t)GCPtrMem))
9260 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9261 }
9262 else
9263 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9264 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9265 }
9266 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9267}
9268
9269
9270/**
9271 * Fetches a data dword, longjmp on error, fallback/safe version.
9272 *
9273 * @returns The dword
9274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9275 * @param iSegReg The index of the segment register to use for
9276 * this access. The base and limits are checked.
9277 * @param GCPtrMem The address of the guest memory.
9278 */
9279IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9280{
9281 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9282 uint32_t const u32Ret = *pu32Src;
9283 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9284 return u32Ret;
9285}
9286
9287
9288/**
9289 * Fetches a data dword, longjmp on error.
9290 *
9291 * @returns The dword
9292 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9293 * @param iSegReg The index of the segment register to use for
9294 * this access. The base and limits are checked.
9295 * @param GCPtrMem The address of the guest memory.
9296 */
9297DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9298{
9299# ifdef IEM_WITH_DATA_TLB
9300 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9301 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9302 {
9303 /// @todo more later.
9304 }
9305
9306 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9307# else
9308 /* The lazy approach. */
9309 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9310 uint32_t const u32Ret = *pu32Src;
9311 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9312 return u32Ret;
9313# endif
9314}
9315#endif
9316
9317
9318#ifdef SOME_UNUSED_FUNCTION
9319/**
9320 * Fetches a data dword and sign extends it to a qword.
9321 *
9322 * @returns Strict VBox status code.
9323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9324 * @param pu64Dst Where to return the sign extended value.
9325 * @param iSegReg The index of the segment register to use for
9326 * this access. The base and limits are checked.
9327 * @param GCPtrMem The address of the guest memory.
9328 */
9329IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9330{
9331 /* The lazy approach for now... */
9332 int32_t const *pi32Src;
9333 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9334 if (rc == VINF_SUCCESS)
9335 {
9336 *pu64Dst = *pi32Src;
9337 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9338 }
9339#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9340 else
9341 *pu64Dst = 0;
9342#endif
9343 return rc;
9344}
9345#endif
9346
9347
9348/**
9349 * Fetches a data qword.
9350 *
9351 * @returns Strict VBox status code.
9352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9353 * @param pu64Dst Where to return the qword.
9354 * @param iSegReg The index of the segment register to use for
9355 * this access. The base and limits are checked.
9356 * @param GCPtrMem The address of the guest memory.
9357 */
9358IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9359{
9360 /* The lazy approach for now... */
9361 uint64_t const *pu64Src;
9362 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9363 if (rc == VINF_SUCCESS)
9364 {
9365 *pu64Dst = *pu64Src;
9366 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9367 }
9368 return rc;
9369}
9370
9371
9372#ifdef IEM_WITH_SETJMP
9373/**
9374 * Fetches a data qword, longjmp on error.
9375 *
9376 * @returns The qword.
9377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9378 * @param iSegReg The index of the segment register to use for
9379 * this access. The base and limits are checked.
9380 * @param GCPtrMem The address of the guest memory.
9381 */
9382DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9383{
9384 /* The lazy approach for now... */
9385 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9386 uint64_t const u64Ret = *pu64Src;
9387 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9388 return u64Ret;
9389}
9390#endif
9391
9392
9393/**
9394 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9395 *
9396 * @returns Strict VBox status code.
9397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9398 * @param pu64Dst Where to return the qword.
9399 * @param iSegReg The index of the segment register to use for
9400 * this access. The base and limits are checked.
9401 * @param GCPtrMem The address of the guest memory.
9402 */
9403IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9404{
9405 /* The lazy approach for now... */
9406 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9407 if (RT_UNLIKELY(GCPtrMem & 15))
9408 return iemRaiseGeneralProtectionFault0(pVCpu);
9409
9410 uint64_t const *pu64Src;
9411 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9412 if (rc == VINF_SUCCESS)
9413 {
9414 *pu64Dst = *pu64Src;
9415 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9416 }
9417 return rc;
9418}
9419
9420
9421#ifdef IEM_WITH_SETJMP
9422/**
9423 * Fetches a data qword, longjmp on error.
9424 *
9425 * @returns The qword.
9426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9427 * @param iSegReg The index of the segment register to use for
9428 * this access. The base and limits are checked.
9429 * @param GCPtrMem The address of the guest memory.
9430 */
9431DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9432{
9433 /* The lazy approach for now... */
9434 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9435 if (RT_LIKELY(!(GCPtrMem & 15)))
9436 {
9437 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9438 uint64_t const u64Ret = *pu64Src;
9439 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9440 return u64Ret;
9441 }
9442
9443 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9444 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9445}
9446#endif
9447
9448
9449/**
9450 * Fetches a data tword.
9451 *
9452 * @returns Strict VBox status code.
9453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9454 * @param pr80Dst Where to return the tword.
9455 * @param iSegReg The index of the segment register to use for
9456 * this access. The base and limits are checked.
9457 * @param GCPtrMem The address of the guest memory.
9458 */
9459IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9460{
9461 /* The lazy approach for now... */
9462 PCRTFLOAT80U pr80Src;
9463 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9464 if (rc == VINF_SUCCESS)
9465 {
9466 *pr80Dst = *pr80Src;
9467 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9468 }
9469 return rc;
9470}
9471
9472
9473#ifdef IEM_WITH_SETJMP
9474/**
9475 * Fetches a data tword, longjmp on error.
9476 *
9477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9478 * @param pr80Dst Where to return the tword.
9479 * @param iSegReg The index of the segment register to use for
9480 * this access. The base and limits are checked.
9481 * @param GCPtrMem The address of the guest memory.
9482 */
9483DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9484{
9485 /* The lazy approach for now... */
9486 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9487 *pr80Dst = *pr80Src;
9488 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9489}
9490#endif
9491
9492
9493/**
9494 * Fetches a data dqword (double qword), generally SSE related.
9495 *
9496 * @returns Strict VBox status code.
9497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9498 * @param pu128Dst Where to return the qword.
9499 * @param iSegReg The index of the segment register to use for
9500 * this access. The base and limits are checked.
9501 * @param GCPtrMem The address of the guest memory.
9502 */
9503IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9504{
9505 /* The lazy approach for now... */
9506 PCRTUINT128U pu128Src;
9507 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9508 if (rc == VINF_SUCCESS)
9509 {
9510 pu128Dst->au64[0] = pu128Src->au64[0];
9511 pu128Dst->au64[1] = pu128Src->au64[1];
9512 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9513 }
9514 return rc;
9515}
9516
9517
9518#ifdef IEM_WITH_SETJMP
9519/**
9520 * Fetches a data dqword (double qword), generally SSE related.
9521 *
9522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9523 * @param pu128Dst Where to return the qword.
9524 * @param iSegReg The index of the segment register to use for
9525 * this access. The base and limits are checked.
9526 * @param GCPtrMem The address of the guest memory.
9527 */
9528IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9529{
9530 /* The lazy approach for now... */
9531 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9532 pu128Dst->au64[0] = pu128Src->au64[0];
9533 pu128Dst->au64[1] = pu128Src->au64[1];
9534 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9535}
9536#endif
9537
9538
9539/**
9540 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9541 * related.
9542 *
9543 * Raises \#GP(0) if not aligned.
9544 *
9545 * @returns Strict VBox status code.
9546 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9547 * @param pu128Dst Where to return the qword.
9548 * @param iSegReg The index of the segment register to use for
9549 * this access. The base and limits are checked.
9550 * @param GCPtrMem The address of the guest memory.
9551 */
9552IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9553{
9554 /* The lazy approach for now... */
9555 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9556 if ( (GCPtrMem & 15)
9557 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9558 return iemRaiseGeneralProtectionFault0(pVCpu);
9559
9560 PCRTUINT128U pu128Src;
9561 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9562 if (rc == VINF_SUCCESS)
9563 {
9564 pu128Dst->au64[0] = pu128Src->au64[0];
9565 pu128Dst->au64[1] = pu128Src->au64[1];
9566 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9567 }
9568 return rc;
9569}
9570
9571
9572#ifdef IEM_WITH_SETJMP
9573/**
9574 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9575 * related, longjmp on error.
9576 *
9577 * Raises \#GP(0) if not aligned.
9578 *
9579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9580 * @param pu128Dst Where to return the qword.
9581 * @param iSegReg The index of the segment register to use for
9582 * this access. The base and limits are checked.
9583 * @param GCPtrMem The address of the guest memory.
9584 */
9585DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9586{
9587 /* The lazy approach for now... */
9588 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9589 if ( (GCPtrMem & 15) == 0
9590 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9591 {
9592 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9593 pu128Dst->au64[0] = pu128Src->au64[0];
9594 pu128Dst->au64[1] = pu128Src->au64[1];
9595 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9596 return;
9597 }
9598
9599 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9600 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9601}
9602#endif
9603
9604
9605/**
9606 * Fetches a data oword (octo word), generally AVX related.
9607 *
9608 * @returns Strict VBox status code.
9609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9610 * @param pu256Dst Where to return the qword.
9611 * @param iSegReg The index of the segment register to use for
9612 * this access. The base and limits are checked.
9613 * @param GCPtrMem The address of the guest memory.
9614 */
9615IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9616{
9617 /* The lazy approach for now... */
9618 PCRTUINT256U pu256Src;
9619 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9620 if (rc == VINF_SUCCESS)
9621 {
9622 pu256Dst->au64[0] = pu256Src->au64[0];
9623 pu256Dst->au64[1] = pu256Src->au64[1];
9624 pu256Dst->au64[2] = pu256Src->au64[2];
9625 pu256Dst->au64[3] = pu256Src->au64[3];
9626 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9627 }
9628 return rc;
9629}
9630
9631
9632#ifdef IEM_WITH_SETJMP
9633/**
9634 * Fetches a data oword (octo word), generally AVX related.
9635 *
9636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9637 * @param pu256Dst Where to return the qword.
9638 * @param iSegReg The index of the segment register to use for
9639 * this access. The base and limits are checked.
9640 * @param GCPtrMem The address of the guest memory.
9641 */
9642IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9643{
9644 /* The lazy approach for now... */
9645 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9646 pu256Dst->au64[0] = pu256Src->au64[0];
9647 pu256Dst->au64[1] = pu256Src->au64[1];
9648 pu256Dst->au64[2] = pu256Src->au64[2];
9649 pu256Dst->au64[3] = pu256Src->au64[3];
9650 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9651}
9652#endif
9653
9654
9655/**
9656 * Fetches a data oword (octo word) at an aligned address, generally AVX
9657 * related.
9658 *
9659 * Raises \#GP(0) if not aligned.
9660 *
9661 * @returns Strict VBox status code.
9662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9663 * @param pu256Dst Where to return the qword.
9664 * @param iSegReg The index of the segment register to use for
9665 * this access. The base and limits are checked.
9666 * @param GCPtrMem The address of the guest memory.
9667 */
9668IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9669{
9670 /* The lazy approach for now... */
9671 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9672 if (GCPtrMem & 31)
9673 return iemRaiseGeneralProtectionFault0(pVCpu);
9674
9675 PCRTUINT256U pu256Src;
9676 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9677 if (rc == VINF_SUCCESS)
9678 {
9679 pu256Dst->au64[0] = pu256Src->au64[0];
9680 pu256Dst->au64[1] = pu256Src->au64[1];
9681 pu256Dst->au64[2] = pu256Src->au64[2];
9682 pu256Dst->au64[3] = pu256Src->au64[3];
9683 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9684 }
9685 return rc;
9686}
9687
9688
9689#ifdef IEM_WITH_SETJMP
9690/**
9691 * Fetches a data oword (octo word) at an aligned address, generally AVX
9692 * related, longjmp on error.
9693 *
9694 * Raises \#GP(0) if not aligned.
9695 *
9696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9697 * @param pu256Dst Where to return the qword.
9698 * @param iSegReg The index of the segment register to use for
9699 * this access. The base and limits are checked.
9700 * @param GCPtrMem The address of the guest memory.
9701 */
9702DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9703{
9704 /* The lazy approach for now... */
9705 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9706 if ((GCPtrMem & 31) == 0)
9707 {
9708 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9709 pu256Dst->au64[0] = pu256Src->au64[0];
9710 pu256Dst->au64[1] = pu256Src->au64[1];
9711 pu256Dst->au64[2] = pu256Src->au64[2];
9712 pu256Dst->au64[3] = pu256Src->au64[3];
9713 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9714 return;
9715 }
9716
9717 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9718 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9719}
9720#endif
9721
9722
9723
9724/**
9725 * Fetches a descriptor register (lgdt, lidt).
9726 *
9727 * @returns Strict VBox status code.
9728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9729 * @param pcbLimit Where to return the limit.
9730 * @param pGCPtrBase Where to return the base.
9731 * @param iSegReg The index of the segment register to use for
9732 * this access. The base and limits are checked.
9733 * @param GCPtrMem The address of the guest memory.
9734 * @param enmOpSize The effective operand size.
9735 */
9736IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9737 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9738{
9739 /*
9740 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9741 * little special:
9742 * - The two reads are done separately.
9743 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9744 * - We suspect the 386 to actually commit the limit before the base in
9745 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9746 * don't try emulate this eccentric behavior, because it's not well
9747 * enough understood and rather hard to trigger.
9748 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9749 */
9750 VBOXSTRICTRC rcStrict;
9751 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9752 {
9753 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9754 if (rcStrict == VINF_SUCCESS)
9755 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9756 }
9757 else
9758 {
9759 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9760 if (enmOpSize == IEMMODE_32BIT)
9761 {
9762 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9763 {
9764 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9765 if (rcStrict == VINF_SUCCESS)
9766 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9767 }
9768 else
9769 {
9770 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9771 if (rcStrict == VINF_SUCCESS)
9772 {
9773 *pcbLimit = (uint16_t)uTmp;
9774 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9775 }
9776 }
9777 if (rcStrict == VINF_SUCCESS)
9778 *pGCPtrBase = uTmp;
9779 }
9780 else
9781 {
9782 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9783 if (rcStrict == VINF_SUCCESS)
9784 {
9785 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9786 if (rcStrict == VINF_SUCCESS)
9787 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9788 }
9789 }
9790 }
9791 return rcStrict;
9792}
9793
9794
9795
9796/**
9797 * Stores a data byte.
9798 *
9799 * @returns Strict VBox status code.
9800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9801 * @param iSegReg The index of the segment register to use for
9802 * this access. The base and limits are checked.
9803 * @param GCPtrMem The address of the guest memory.
9804 * @param u8Value The value to store.
9805 */
9806IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9807{
9808 /* The lazy approach for now... */
9809 uint8_t *pu8Dst;
9810 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9811 if (rc == VINF_SUCCESS)
9812 {
9813 *pu8Dst = u8Value;
9814 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9815 }
9816 return rc;
9817}
9818
9819
9820#ifdef IEM_WITH_SETJMP
9821/**
9822 * Stores a data byte, longjmp on error.
9823 *
9824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9825 * @param iSegReg The index of the segment register to use for
9826 * this access. The base and limits are checked.
9827 * @param GCPtrMem The address of the guest memory.
9828 * @param u8Value The value to store.
9829 */
9830IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9831{
9832 /* The lazy approach for now... */
9833 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9834 *pu8Dst = u8Value;
9835 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9836}
9837#endif
9838
9839
9840/**
9841 * Stores a data word.
9842 *
9843 * @returns Strict VBox status code.
9844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9845 * @param iSegReg The index of the segment register to use for
9846 * this access. The base and limits are checked.
9847 * @param GCPtrMem The address of the guest memory.
9848 * @param u16Value The value to store.
9849 */
9850IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9851{
9852 /* The lazy approach for now... */
9853 uint16_t *pu16Dst;
9854 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9855 if (rc == VINF_SUCCESS)
9856 {
9857 *pu16Dst = u16Value;
9858 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9859 }
9860 return rc;
9861}
9862
9863
9864#ifdef IEM_WITH_SETJMP
9865/**
9866 * Stores a data word, longjmp on error.
9867 *
9868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9869 * @param iSegReg The index of the segment register to use for
9870 * this access. The base and limits are checked.
9871 * @param GCPtrMem The address of the guest memory.
9872 * @param u16Value The value to store.
9873 */
9874IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9875{
9876 /* The lazy approach for now... */
9877 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9878 *pu16Dst = u16Value;
9879 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9880}
9881#endif
9882
9883
9884/**
9885 * Stores a data dword.
9886 *
9887 * @returns Strict VBox status code.
9888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9889 * @param iSegReg The index of the segment register to use for
9890 * this access. The base and limits are checked.
9891 * @param GCPtrMem The address of the guest memory.
9892 * @param u32Value The value to store.
9893 */
9894IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9895{
9896 /* The lazy approach for now... */
9897 uint32_t *pu32Dst;
9898 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9899 if (rc == VINF_SUCCESS)
9900 {
9901 *pu32Dst = u32Value;
9902 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9903 }
9904 return rc;
9905}
9906
9907
9908#ifdef IEM_WITH_SETJMP
9909/**
9910 * Stores a data dword.
9911 *
9912 * @returns Strict VBox status code.
9913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9914 * @param iSegReg The index of the segment register to use for
9915 * this access. The base and limits are checked.
9916 * @param GCPtrMem The address of the guest memory.
9917 * @param u32Value The value to store.
9918 */
9919IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9920{
9921 /* The lazy approach for now... */
9922 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9923 *pu32Dst = u32Value;
9924 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9925}
9926#endif
9927
9928
9929/**
9930 * Stores a data qword.
9931 *
9932 * @returns Strict VBox status code.
9933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9934 * @param iSegReg The index of the segment register to use for
9935 * this access. The base and limits are checked.
9936 * @param GCPtrMem The address of the guest memory.
9937 * @param u64Value The value to store.
9938 */
9939IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9940{
9941 /* The lazy approach for now... */
9942 uint64_t *pu64Dst;
9943 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9944 if (rc == VINF_SUCCESS)
9945 {
9946 *pu64Dst = u64Value;
9947 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9948 }
9949 return rc;
9950}
9951
9952
9953#ifdef IEM_WITH_SETJMP
9954/**
9955 * Stores a data qword, longjmp on error.
9956 *
9957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9958 * @param iSegReg The index of the segment register to use for
9959 * this access. The base and limits are checked.
9960 * @param GCPtrMem The address of the guest memory.
9961 * @param u64Value The value to store.
9962 */
9963IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9964{
9965 /* The lazy approach for now... */
9966 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9967 *pu64Dst = u64Value;
9968 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9969}
9970#endif
9971
9972
9973/**
9974 * Stores a data dqword.
9975 *
9976 * @returns Strict VBox status code.
9977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9978 * @param iSegReg The index of the segment register to use for
9979 * this access. The base and limits are checked.
9980 * @param GCPtrMem The address of the guest memory.
9981 * @param u128Value The value to store.
9982 */
9983IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9984{
9985 /* The lazy approach for now... */
9986 PRTUINT128U pu128Dst;
9987 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9988 if (rc == VINF_SUCCESS)
9989 {
9990 pu128Dst->au64[0] = u128Value.au64[0];
9991 pu128Dst->au64[1] = u128Value.au64[1];
9992 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9993 }
9994 return rc;
9995}
9996
9997
9998#ifdef IEM_WITH_SETJMP
9999/**
10000 * Stores a data dqword, longjmp on error.
10001 *
10002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10003 * @param iSegReg The index of the segment register to use for
10004 * this access. The base and limits are checked.
10005 * @param GCPtrMem The address of the guest memory.
10006 * @param u128Value The value to store.
10007 */
10008IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10009{
10010 /* The lazy approach for now... */
10011 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10012 pu128Dst->au64[0] = u128Value.au64[0];
10013 pu128Dst->au64[1] = u128Value.au64[1];
10014 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10015}
10016#endif
10017
10018
10019/**
10020 * Stores a data dqword, SSE aligned.
10021 *
10022 * @returns Strict VBox status code.
10023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10024 * @param iSegReg The index of the segment register to use for
10025 * this access. The base and limits are checked.
10026 * @param GCPtrMem The address of the guest memory.
10027 * @param u128Value The value to store.
10028 */
10029IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10030{
10031 /* The lazy approach for now... */
10032 if ( (GCPtrMem & 15)
10033 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10034 return iemRaiseGeneralProtectionFault0(pVCpu);
10035
10036 PRTUINT128U pu128Dst;
10037 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10038 if (rc == VINF_SUCCESS)
10039 {
10040 pu128Dst->au64[0] = u128Value.au64[0];
10041 pu128Dst->au64[1] = u128Value.au64[1];
10042 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10043 }
10044 return rc;
10045}
10046
10047
10048#ifdef IEM_WITH_SETJMP
10049/**
10050 * Stores a data dqword, SSE aligned.
10051 *
10052 * @returns Strict VBox status code.
10053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10054 * @param iSegReg The index of the segment register to use for
10055 * this access. The base and limits are checked.
10056 * @param GCPtrMem The address of the guest memory.
10057 * @param u128Value The value to store.
10058 */
10059DECL_NO_INLINE(IEM_STATIC, void)
10060iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10061{
10062 /* The lazy approach for now... */
10063 if ( (GCPtrMem & 15) == 0
10064 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10065 {
10066 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10067 pu128Dst->au64[0] = u128Value.au64[0];
10068 pu128Dst->au64[1] = u128Value.au64[1];
10069 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10070 return;
10071 }
10072
10073 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10074 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10075}
10076#endif
10077
10078
10079/**
10080 * Stores a data dqword.
10081 *
10082 * @returns Strict VBox status code.
10083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10084 * @param iSegReg The index of the segment register to use for
10085 * this access. The base and limits are checked.
10086 * @param GCPtrMem The address of the guest memory.
10087 * @param pu256Value Pointer to the value to store.
10088 */
10089IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10090{
10091 /* The lazy approach for now... */
10092 PRTUINT256U pu256Dst;
10093 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10094 if (rc == VINF_SUCCESS)
10095 {
10096 pu256Dst->au64[0] = pu256Value->au64[0];
10097 pu256Dst->au64[1] = pu256Value->au64[1];
10098 pu256Dst->au64[2] = pu256Value->au64[2];
10099 pu256Dst->au64[3] = pu256Value->au64[3];
10100 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10101 }
10102 return rc;
10103}
10104
10105
10106#ifdef IEM_WITH_SETJMP
10107/**
10108 * Stores a data dqword, longjmp on error.
10109 *
10110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10111 * @param iSegReg The index of the segment register to use for
10112 * this access. The base and limits are checked.
10113 * @param GCPtrMem The address of the guest memory.
10114 * @param pu256Value Pointer to the value to store.
10115 */
10116IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10117{
10118 /* The lazy approach for now... */
10119 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10120 pu256Dst->au64[0] = pu256Value->au64[0];
10121 pu256Dst->au64[1] = pu256Value->au64[1];
10122 pu256Dst->au64[2] = pu256Value->au64[2];
10123 pu256Dst->au64[3] = pu256Value->au64[3];
10124 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10125}
10126#endif
10127
10128
10129/**
10130 * Stores a data dqword, AVX aligned.
10131 *
10132 * @returns Strict VBox status code.
10133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10134 * @param iSegReg The index of the segment register to use for
10135 * this access. The base and limits are checked.
10136 * @param GCPtrMem The address of the guest memory.
10137 * @param pu256Value Pointer to the value to store.
10138 */
10139IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10140{
10141 /* The lazy approach for now... */
10142 if (GCPtrMem & 31)
10143 return iemRaiseGeneralProtectionFault0(pVCpu);
10144
10145 PRTUINT256U pu256Dst;
10146 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10147 if (rc == VINF_SUCCESS)
10148 {
10149 pu256Dst->au64[0] = pu256Value->au64[0];
10150 pu256Dst->au64[1] = pu256Value->au64[1];
10151 pu256Dst->au64[2] = pu256Value->au64[2];
10152 pu256Dst->au64[3] = pu256Value->au64[3];
10153 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10154 }
10155 return rc;
10156}
10157
10158
10159#ifdef IEM_WITH_SETJMP
10160/**
10161 * Stores a data dqword, AVX aligned.
10162 *
10163 * @returns Strict VBox status code.
10164 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10165 * @param iSegReg The index of the segment register to use for
10166 * this access. The base and limits are checked.
10167 * @param GCPtrMem The address of the guest memory.
10168 * @param pu256Value Pointer to the value to store.
10169 */
10170DECL_NO_INLINE(IEM_STATIC, void)
10171iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10172{
10173 /* The lazy approach for now... */
10174 if ((GCPtrMem & 31) == 0)
10175 {
10176 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10177 pu256Dst->au64[0] = pu256Value->au64[0];
10178 pu256Dst->au64[1] = pu256Value->au64[1];
10179 pu256Dst->au64[2] = pu256Value->au64[2];
10180 pu256Dst->au64[3] = pu256Value->au64[3];
10181 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10182 return;
10183 }
10184
10185 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10186 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10187}
10188#endif
10189
10190
10191/**
10192 * Stores a descriptor register (sgdt, sidt).
10193 *
10194 * @returns Strict VBox status code.
10195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10196 * @param cbLimit The limit.
10197 * @param GCPtrBase The base address.
10198 * @param iSegReg The index of the segment register to use for
10199 * this access. The base and limits are checked.
10200 * @param GCPtrMem The address of the guest memory.
10201 */
10202IEM_STATIC VBOXSTRICTRC
10203iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10204{
10205 /*
10206 * The SIDT and SGDT instructions actually stores the data using two
10207 * independent writes. The instructions does not respond to opsize prefixes.
10208 */
10209 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10210 if (rcStrict == VINF_SUCCESS)
10211 {
10212 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10213 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10214 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10215 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10216 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10217 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10218 else
10219 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10220 }
10221 return rcStrict;
10222}
10223
10224
10225/**
10226 * Pushes a word onto the stack.
10227 *
10228 * @returns Strict VBox status code.
10229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10230 * @param u16Value The value to push.
10231 */
10232IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10233{
10234 /* Increment the stack pointer. */
10235 uint64_t uNewRsp;
10236 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10237
10238 /* Write the word the lazy way. */
10239 uint16_t *pu16Dst;
10240 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10241 if (rc == VINF_SUCCESS)
10242 {
10243 *pu16Dst = u16Value;
10244 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10245 }
10246
10247 /* Commit the new RSP value unless we an access handler made trouble. */
10248 if (rc == VINF_SUCCESS)
10249 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10250
10251 return rc;
10252}
10253
10254
10255/**
10256 * Pushes a dword onto the stack.
10257 *
10258 * @returns Strict VBox status code.
10259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10260 * @param u32Value The value to push.
10261 */
10262IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10263{
10264 /* Increment the stack pointer. */
10265 uint64_t uNewRsp;
10266 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10267
10268 /* Write the dword the lazy way. */
10269 uint32_t *pu32Dst;
10270 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10271 if (rc == VINF_SUCCESS)
10272 {
10273 *pu32Dst = u32Value;
10274 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10275 }
10276
10277 /* Commit the new RSP value unless we an access handler made trouble. */
10278 if (rc == VINF_SUCCESS)
10279 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10280
10281 return rc;
10282}
10283
10284
10285/**
10286 * Pushes a dword segment register value onto the stack.
10287 *
10288 * @returns Strict VBox status code.
10289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10290 * @param u32Value The value to push.
10291 */
10292IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10293{
10294 /* Increment the stack pointer. */
10295 uint64_t uNewRsp;
10296 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10297
10298 /* The intel docs talks about zero extending the selector register
10299 value. My actual intel CPU here might be zero extending the value
10300 but it still only writes the lower word... */
10301 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10302 * happens when crossing an electric page boundrary, is the high word checked
10303 * for write accessibility or not? Probably it is. What about segment limits?
10304 * It appears this behavior is also shared with trap error codes.
10305 *
10306 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10307 * ancient hardware when it actually did change. */
10308 uint16_t *pu16Dst;
10309 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10310 if (rc == VINF_SUCCESS)
10311 {
10312 *pu16Dst = (uint16_t)u32Value;
10313 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10314 }
10315
10316 /* Commit the new RSP value unless we an access handler made trouble. */
10317 if (rc == VINF_SUCCESS)
10318 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10319
10320 return rc;
10321}
10322
10323
10324/**
10325 * Pushes a qword onto the stack.
10326 *
10327 * @returns Strict VBox status code.
10328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10329 * @param u64Value The value to push.
10330 */
10331IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10332{
10333 /* Increment the stack pointer. */
10334 uint64_t uNewRsp;
10335 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10336
10337 /* Write the word the lazy way. */
10338 uint64_t *pu64Dst;
10339 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10340 if (rc == VINF_SUCCESS)
10341 {
10342 *pu64Dst = u64Value;
10343 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10344 }
10345
10346 /* Commit the new RSP value unless we an access handler made trouble. */
10347 if (rc == VINF_SUCCESS)
10348 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10349
10350 return rc;
10351}
10352
10353
10354/**
10355 * Pops a word from the stack.
10356 *
10357 * @returns Strict VBox status code.
10358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10359 * @param pu16Value Where to store the popped value.
10360 */
10361IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10362{
10363 /* Increment the stack pointer. */
10364 uint64_t uNewRsp;
10365 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10366
10367 /* Write the word the lazy way. */
10368 uint16_t const *pu16Src;
10369 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10370 if (rc == VINF_SUCCESS)
10371 {
10372 *pu16Value = *pu16Src;
10373 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10374
10375 /* Commit the new RSP value. */
10376 if (rc == VINF_SUCCESS)
10377 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10378 }
10379
10380 return rc;
10381}
10382
10383
10384/**
10385 * Pops a dword from the stack.
10386 *
10387 * @returns Strict VBox status code.
10388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10389 * @param pu32Value Where to store the popped value.
10390 */
10391IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10392{
10393 /* Increment the stack pointer. */
10394 uint64_t uNewRsp;
10395 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10396
10397 /* Write the word the lazy way. */
10398 uint32_t const *pu32Src;
10399 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10400 if (rc == VINF_SUCCESS)
10401 {
10402 *pu32Value = *pu32Src;
10403 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10404
10405 /* Commit the new RSP value. */
10406 if (rc == VINF_SUCCESS)
10407 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10408 }
10409
10410 return rc;
10411}
10412
10413
10414/**
10415 * Pops a qword from the stack.
10416 *
10417 * @returns Strict VBox status code.
10418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10419 * @param pu64Value Where to store the popped value.
10420 */
10421IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
10422{
10423 /* Increment the stack pointer. */
10424 uint64_t uNewRsp;
10425 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10426
10427 /* Write the word the lazy way. */
10428 uint64_t const *pu64Src;
10429 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10430 if (rc == VINF_SUCCESS)
10431 {
10432 *pu64Value = *pu64Src;
10433 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10434
10435 /* Commit the new RSP value. */
10436 if (rc == VINF_SUCCESS)
10437 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10438 }
10439
10440 return rc;
10441}
10442
10443
10444/**
10445 * Pushes a word onto the stack, using a temporary stack pointer.
10446 *
10447 * @returns Strict VBox status code.
10448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10449 * @param u16Value The value to push.
10450 * @param pTmpRsp Pointer to the temporary stack pointer.
10451 */
10452IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10453{
10454 /* Increment the stack pointer. */
10455 RTUINT64U NewRsp = *pTmpRsp;
10456 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10457
10458 /* Write the word the lazy way. */
10459 uint16_t *pu16Dst;
10460 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10461 if (rc == VINF_SUCCESS)
10462 {
10463 *pu16Dst = u16Value;
10464 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10465 }
10466
10467 /* Commit the new RSP value unless we an access handler made trouble. */
10468 if (rc == VINF_SUCCESS)
10469 *pTmpRsp = NewRsp;
10470
10471 return rc;
10472}
10473
10474
10475/**
10476 * Pushes a dword onto the stack, using a temporary stack pointer.
10477 *
10478 * @returns Strict VBox status code.
10479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10480 * @param u32Value The value to push.
10481 * @param pTmpRsp Pointer to the temporary stack pointer.
10482 */
10483IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10484{
10485 /* Increment the stack pointer. */
10486 RTUINT64U NewRsp = *pTmpRsp;
10487 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10488
10489 /* Write the word the lazy way. */
10490 uint32_t *pu32Dst;
10491 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10492 if (rc == VINF_SUCCESS)
10493 {
10494 *pu32Dst = u32Value;
10495 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10496 }
10497
10498 /* Commit the new RSP value unless we an access handler made trouble. */
10499 if (rc == VINF_SUCCESS)
10500 *pTmpRsp = NewRsp;
10501
10502 return rc;
10503}
10504
10505
10506/**
10507 * Pushes a dword onto the stack, using a temporary stack pointer.
10508 *
10509 * @returns Strict VBox status code.
10510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10511 * @param u64Value The value to push.
10512 * @param pTmpRsp Pointer to the temporary stack pointer.
10513 */
10514IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10515{
10516 /* Increment the stack pointer. */
10517 RTUINT64U NewRsp = *pTmpRsp;
10518 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10519
10520 /* Write the word the lazy way. */
10521 uint64_t *pu64Dst;
10522 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10523 if (rc == VINF_SUCCESS)
10524 {
10525 *pu64Dst = u64Value;
10526 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10527 }
10528
10529 /* Commit the new RSP value unless we an access handler made trouble. */
10530 if (rc == VINF_SUCCESS)
10531 *pTmpRsp = NewRsp;
10532
10533 return rc;
10534}
10535
10536
10537/**
10538 * Pops a word from the stack, using a temporary stack pointer.
10539 *
10540 * @returns Strict VBox status code.
10541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10542 * @param pu16Value Where to store the popped value.
10543 * @param pTmpRsp Pointer to the temporary stack pointer.
10544 */
10545IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10546{
10547 /* Increment the stack pointer. */
10548 RTUINT64U NewRsp = *pTmpRsp;
10549 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10550
10551 /* Write the word the lazy way. */
10552 uint16_t const *pu16Src;
10553 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10554 if (rc == VINF_SUCCESS)
10555 {
10556 *pu16Value = *pu16Src;
10557 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10558
10559 /* Commit the new RSP value. */
10560 if (rc == VINF_SUCCESS)
10561 *pTmpRsp = NewRsp;
10562 }
10563
10564 return rc;
10565}
10566
10567
10568/**
10569 * Pops a dword from the stack, using a temporary stack pointer.
10570 *
10571 * @returns Strict VBox status code.
10572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10573 * @param pu32Value Where to store the popped value.
10574 * @param pTmpRsp Pointer to the temporary stack pointer.
10575 */
10576IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10577{
10578 /* Increment the stack pointer. */
10579 RTUINT64U NewRsp = *pTmpRsp;
10580 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10581
10582 /* Write the word the lazy way. */
10583 uint32_t const *pu32Src;
10584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10585 if (rc == VINF_SUCCESS)
10586 {
10587 *pu32Value = *pu32Src;
10588 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10589
10590 /* Commit the new RSP value. */
10591 if (rc == VINF_SUCCESS)
10592 *pTmpRsp = NewRsp;
10593 }
10594
10595 return rc;
10596}
10597
10598
10599/**
10600 * Pops a qword from the stack, using a temporary stack pointer.
10601 *
10602 * @returns Strict VBox status code.
10603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10604 * @param pu64Value Where to store the popped value.
10605 * @param pTmpRsp Pointer to the temporary stack pointer.
10606 */
10607IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10608{
10609 /* Increment the stack pointer. */
10610 RTUINT64U NewRsp = *pTmpRsp;
10611 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10612
10613 /* Write the word the lazy way. */
10614 uint64_t const *pu64Src;
10615 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10616 if (rcStrict == VINF_SUCCESS)
10617 {
10618 *pu64Value = *pu64Src;
10619 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10620
10621 /* Commit the new RSP value. */
10622 if (rcStrict == VINF_SUCCESS)
10623 *pTmpRsp = NewRsp;
10624 }
10625
10626 return rcStrict;
10627}
10628
10629
10630/**
10631 * Begin a special stack push (used by interrupt, exceptions and such).
10632 *
10633 * This will raise \#SS or \#PF if appropriate.
10634 *
10635 * @returns Strict VBox status code.
10636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10637 * @param cbMem The number of bytes to push onto the stack.
10638 * @param ppvMem Where to return the pointer to the stack memory.
10639 * As with the other memory functions this could be
10640 * direct access or bounce buffered access, so
10641 * don't commit register until the commit call
10642 * succeeds.
10643 * @param puNewRsp Where to return the new RSP value. This must be
10644 * passed unchanged to
10645 * iemMemStackPushCommitSpecial().
10646 */
10647IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10648{
10649 Assert(cbMem < UINT8_MAX);
10650 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10651 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10652}
10653
10654
10655/**
10656 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10657 *
10658 * This will update the rSP.
10659 *
10660 * @returns Strict VBox status code.
10661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10662 * @param pvMem The pointer returned by
10663 * iemMemStackPushBeginSpecial().
10664 * @param uNewRsp The new RSP value returned by
10665 * iemMemStackPushBeginSpecial().
10666 */
10667IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
10668{
10669 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10670 if (rcStrict == VINF_SUCCESS)
10671 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10672 return rcStrict;
10673}
10674
10675
10676/**
10677 * Begin a special stack pop (used by iret, retf and such).
10678 *
10679 * This will raise \#SS or \#PF if appropriate.
10680 *
10681 * @returns Strict VBox status code.
10682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10683 * @param cbMem The number of bytes to pop from the stack.
10684 * @param ppvMem Where to return the pointer to the stack memory.
10685 * @param puNewRsp Where to return the new RSP value. This must be
10686 * assigned to CPUMCTX::rsp manually some time
10687 * after iemMemStackPopDoneSpecial() has been
10688 * called.
10689 */
10690IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10691{
10692 Assert(cbMem < UINT8_MAX);
10693 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10694 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10695}
10696
10697
10698/**
10699 * Continue a special stack pop (used by iret and retf).
10700 *
10701 * This will raise \#SS or \#PF if appropriate.
10702 *
10703 * @returns Strict VBox status code.
10704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10705 * @param cbMem The number of bytes to pop from the stack.
10706 * @param ppvMem Where to return the pointer to the stack memory.
10707 * @param puNewRsp Where to return the new RSP value. This must be
10708 * assigned to CPUMCTX::rsp manually some time
10709 * after iemMemStackPopDoneSpecial() has been
10710 * called.
10711 */
10712IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10713{
10714 Assert(cbMem < UINT8_MAX);
10715 RTUINT64U NewRsp;
10716 NewRsp.u = *puNewRsp;
10717 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10718 *puNewRsp = NewRsp.u;
10719 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10720}
10721
10722
10723/**
10724 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10725 * iemMemStackPopContinueSpecial).
10726 *
10727 * The caller will manually commit the rSP.
10728 *
10729 * @returns Strict VBox status code.
10730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10731 * @param pvMem The pointer returned by
10732 * iemMemStackPopBeginSpecial() or
10733 * iemMemStackPopContinueSpecial().
10734 */
10735IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
10736{
10737 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10738}
10739
10740
10741/**
10742 * Fetches a system table byte.
10743 *
10744 * @returns Strict VBox status code.
10745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10746 * @param pbDst Where to return the byte.
10747 * @param iSegReg The index of the segment register to use for
10748 * this access. The base and limits are checked.
10749 * @param GCPtrMem The address of the guest memory.
10750 */
10751IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10752{
10753 /* The lazy approach for now... */
10754 uint8_t const *pbSrc;
10755 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10756 if (rc == VINF_SUCCESS)
10757 {
10758 *pbDst = *pbSrc;
10759 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10760 }
10761 return rc;
10762}
10763
10764
10765/**
10766 * Fetches a system table word.
10767 *
10768 * @returns Strict VBox status code.
10769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10770 * @param pu16Dst Where to return the word.
10771 * @param iSegReg The index of the segment register to use for
10772 * this access. The base and limits are checked.
10773 * @param GCPtrMem The address of the guest memory.
10774 */
10775IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10776{
10777 /* The lazy approach for now... */
10778 uint16_t const *pu16Src;
10779 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10780 if (rc == VINF_SUCCESS)
10781 {
10782 *pu16Dst = *pu16Src;
10783 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10784 }
10785 return rc;
10786}
10787
10788
10789/**
10790 * Fetches a system table dword.
10791 *
10792 * @returns Strict VBox status code.
10793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10794 * @param pu32Dst Where to return the dword.
10795 * @param iSegReg The index of the segment register to use for
10796 * this access. The base and limits are checked.
10797 * @param GCPtrMem The address of the guest memory.
10798 */
10799IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10800{
10801 /* The lazy approach for now... */
10802 uint32_t const *pu32Src;
10803 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10804 if (rc == VINF_SUCCESS)
10805 {
10806 *pu32Dst = *pu32Src;
10807 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10808 }
10809 return rc;
10810}
10811
10812
10813/**
10814 * Fetches a system table qword.
10815 *
10816 * @returns Strict VBox status code.
10817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10818 * @param pu64Dst Where to return the qword.
10819 * @param iSegReg The index of the segment register to use for
10820 * this access. The base and limits are checked.
10821 * @param GCPtrMem The address of the guest memory.
10822 */
10823IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10824{
10825 /* The lazy approach for now... */
10826 uint64_t const *pu64Src;
10827 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10828 if (rc == VINF_SUCCESS)
10829 {
10830 *pu64Dst = *pu64Src;
10831 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10832 }
10833 return rc;
10834}
10835
10836
10837/**
10838 * Fetches a descriptor table entry with caller specified error code.
10839 *
10840 * @returns Strict VBox status code.
10841 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10842 * @param pDesc Where to return the descriptor table entry.
10843 * @param uSel The selector which table entry to fetch.
10844 * @param uXcpt The exception to raise on table lookup error.
10845 * @param uErrorCode The error code associated with the exception.
10846 */
10847IEM_STATIC VBOXSTRICTRC
10848iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10849{
10850 AssertPtr(pDesc);
10851 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10852
10853 /** @todo did the 286 require all 8 bytes to be accessible? */
10854 /*
10855 * Get the selector table base and check bounds.
10856 */
10857 RTGCPTR GCPtrBase;
10858 if (uSel & X86_SEL_LDT)
10859 {
10860 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10861 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10862 {
10863 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10864 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10865 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10866 uErrorCode, 0);
10867 }
10868
10869 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10870 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10871 }
10872 else
10873 {
10874 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10875 {
10876 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10877 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10878 uErrorCode, 0);
10879 }
10880 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10881 }
10882
10883 /*
10884 * Read the legacy descriptor and maybe the long mode extensions if
10885 * required.
10886 */
10887 VBOXSTRICTRC rcStrict;
10888 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10889 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10890 else
10891 {
10892 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10893 if (rcStrict == VINF_SUCCESS)
10894 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10895 if (rcStrict == VINF_SUCCESS)
10896 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10897 if (rcStrict == VINF_SUCCESS)
10898 pDesc->Legacy.au16[3] = 0;
10899 else
10900 return rcStrict;
10901 }
10902
10903 if (rcStrict == VINF_SUCCESS)
10904 {
10905 if ( !IEM_IS_LONG_MODE(pVCpu)
10906 || pDesc->Legacy.Gen.u1DescType)
10907 pDesc->Long.au64[1] = 0;
10908 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10909 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10910 else
10911 {
10912 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10913 /** @todo is this the right exception? */
10914 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10915 }
10916 }
10917 return rcStrict;
10918}
10919
10920
10921/**
10922 * Fetches a descriptor table entry.
10923 *
10924 * @returns Strict VBox status code.
10925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10926 * @param pDesc Where to return the descriptor table entry.
10927 * @param uSel The selector which table entry to fetch.
10928 * @param uXcpt The exception to raise on table lookup error.
10929 */
10930IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10931{
10932 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10933}
10934
10935
10936/**
10937 * Fakes a long mode stack selector for SS = 0.
10938 *
10939 * @param pDescSs Where to return the fake stack descriptor.
10940 * @param uDpl The DPL we want.
10941 */
10942IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10943{
10944 pDescSs->Long.au64[0] = 0;
10945 pDescSs->Long.au64[1] = 0;
10946 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10947 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10948 pDescSs->Long.Gen.u2Dpl = uDpl;
10949 pDescSs->Long.Gen.u1Present = 1;
10950 pDescSs->Long.Gen.u1Long = 1;
10951}
10952
10953
10954/**
10955 * Marks the selector descriptor as accessed (only non-system descriptors).
10956 *
10957 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10958 * will therefore skip the limit checks.
10959 *
10960 * @returns Strict VBox status code.
10961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10962 * @param uSel The selector.
10963 */
10964IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
10965{
10966 /*
10967 * Get the selector table base and calculate the entry address.
10968 */
10969 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10970 ? pVCpu->cpum.GstCtx.ldtr.u64Base
10971 : pVCpu->cpum.GstCtx.gdtr.pGdt;
10972 GCPtr += uSel & X86_SEL_MASK;
10973
10974 /*
10975 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10976 * ugly stuff to avoid this. This will make sure it's an atomic access
10977 * as well more or less remove any question about 8-bit or 32-bit accesss.
10978 */
10979 VBOXSTRICTRC rcStrict;
10980 uint32_t volatile *pu32;
10981 if ((GCPtr & 3) == 0)
10982 {
10983 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10984 GCPtr += 2 + 2;
10985 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10986 if (rcStrict != VINF_SUCCESS)
10987 return rcStrict;
10988 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10989 }
10990 else
10991 {
10992 /* The misaligned GDT/LDT case, map the whole thing. */
10993 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10994 if (rcStrict != VINF_SUCCESS)
10995 return rcStrict;
10996 switch ((uintptr_t)pu32 & 3)
10997 {
10998 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10999 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11000 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11001 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11002 }
11003 }
11004
11005 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11006}
11007
11008/** @} */
11009
11010
11011/*
11012 * Include the C/C++ implementation of instruction.
11013 */
11014#include "IEMAllCImpl.cpp.h"
11015
11016
11017
11018/** @name "Microcode" macros.
11019 *
11020 * The idea is that we should be able to use the same code to interpret
11021 * instructions as well as recompiler instructions. Thus this obfuscation.
11022 *
11023 * @{
11024 */
11025#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11026#define IEM_MC_END() }
11027#define IEM_MC_PAUSE() do {} while (0)
11028#define IEM_MC_CONTINUE() do {} while (0)
11029
11030/** Internal macro. */
11031#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11032 do \
11033 { \
11034 VBOXSTRICTRC rcStrict2 = a_Expr; \
11035 if (rcStrict2 != VINF_SUCCESS) \
11036 return rcStrict2; \
11037 } while (0)
11038
11039
11040#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11041#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11042#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11043#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11044#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11045#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11046#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11047#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11048#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11049 do { \
11050 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11051 return iemRaiseDeviceNotAvailable(pVCpu); \
11052 } while (0)
11053#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11054 do { \
11055 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11056 return iemRaiseDeviceNotAvailable(pVCpu); \
11057 } while (0)
11058#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11059 do { \
11060 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11061 return iemRaiseMathFault(pVCpu); \
11062 } while (0)
11063#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11064 do { \
11065 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11066 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11067 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11068 return iemRaiseUndefinedOpcode(pVCpu); \
11069 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11070 return iemRaiseDeviceNotAvailable(pVCpu); \
11071 } while (0)
11072#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11073 do { \
11074 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11075 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11076 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11077 return iemRaiseUndefinedOpcode(pVCpu); \
11078 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11079 return iemRaiseDeviceNotAvailable(pVCpu); \
11080 } while (0)
11081#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11082 do { \
11083 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11084 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11085 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11086 return iemRaiseUndefinedOpcode(pVCpu); \
11087 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11088 return iemRaiseDeviceNotAvailable(pVCpu); \
11089 } while (0)
11090#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11091 do { \
11092 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11093 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11094 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11095 return iemRaiseUndefinedOpcode(pVCpu); \
11096 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11097 return iemRaiseDeviceNotAvailable(pVCpu); \
11098 } while (0)
11099#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11100 do { \
11101 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11102 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11103 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11104 return iemRaiseUndefinedOpcode(pVCpu); \
11105 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11106 return iemRaiseDeviceNotAvailable(pVCpu); \
11107 } while (0)
11108#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11109 do { \
11110 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11111 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11112 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11113 return iemRaiseUndefinedOpcode(pVCpu); \
11114 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11115 return iemRaiseDeviceNotAvailable(pVCpu); \
11116 } while (0)
11117#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11118 do { \
11119 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11120 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11121 return iemRaiseUndefinedOpcode(pVCpu); \
11122 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11123 return iemRaiseDeviceNotAvailable(pVCpu); \
11124 } while (0)
11125#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11126 do { \
11127 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11128 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11129 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11130 return iemRaiseUndefinedOpcode(pVCpu); \
11131 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11132 return iemRaiseDeviceNotAvailable(pVCpu); \
11133 } while (0)
11134#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11135 do { \
11136 if (pVCpu->iem.s.uCpl != 0) \
11137 return iemRaiseGeneralProtectionFault0(pVCpu); \
11138 } while (0)
11139#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11140 do { \
11141 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11142 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11143 } while (0)
11144#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11145 do { \
11146 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11147 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11148 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11149 return iemRaiseUndefinedOpcode(pVCpu); \
11150 } while (0)
11151#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11152 do { \
11153 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11154 return iemRaiseGeneralProtectionFault0(pVCpu); \
11155 } while (0)
11156
11157
11158#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11159#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11160#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11161#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11162#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11163#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11164#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11165 uint32_t a_Name; \
11166 uint32_t *a_pName = &a_Name
11167#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11168 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11169
11170#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11171#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11172
11173#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11174#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11175#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11176#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11177#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11178#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11179#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11180#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11181#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11182#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11183#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11184#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11185#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11186#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11187#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11188#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11189#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11190#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11191 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11192 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11193 } while (0)
11194#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11195 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11196 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11197 } while (0)
11198#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11199 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11200 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11201 } while (0)
11202/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11203#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11204 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11205 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11206 } while (0)
11207#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11208 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11209 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11210 } while (0)
11211/** @note Not for IOPL or IF testing or modification. */
11212#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11213#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11214#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11215#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11216
11217#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11218#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11219#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11220#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11221#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11222#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11223#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11224#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11225#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11226#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11227/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11228#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11229 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11230 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11231 } while (0)
11232#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11233 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11234 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11235 } while (0)
11236#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11237 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11238
11239
11240#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11241#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11242/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11243 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11244#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11245#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11246/** @note Not for IOPL or IF testing or modification. */
11247#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11248
11249#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11250#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11251#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11252 do { \
11253 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11254 *pu32Reg += (a_u32Value); \
11255 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11256 } while (0)
11257#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11258
11259#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11260#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11261#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11262 do { \
11263 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11264 *pu32Reg -= (a_u32Value); \
11265 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11266 } while (0)
11267#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11268#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11269
11270#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11271#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11272#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11273#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11274#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11275#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11276#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11277
11278#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11279#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11280#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11281#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11282
11283#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11284#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11285#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11286
11287#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11288#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11289#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11290
11291#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11292#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11293#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11294
11295#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11296#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11297#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11298
11299#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11300
11301#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11302
11303#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11304#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11305#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11306 do { \
11307 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11308 *pu32Reg &= (a_u32Value); \
11309 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11310 } while (0)
11311#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11312
11313#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11314#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11315#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11316 do { \
11317 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11318 *pu32Reg |= (a_u32Value); \
11319 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11320 } while (0)
11321#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11322
11323
11324/** @note Not for IOPL or IF modification. */
11325#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11326/** @note Not for IOPL or IF modification. */
11327#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11328/** @note Not for IOPL or IF modification. */
11329#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11330
11331#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11332
11333/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11334#define IEM_MC_FPU_TO_MMX_MODE() do { \
11335 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11336 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11337 } while (0)
11338
11339/** Switches the FPU state from MMX mode (FTW=0xffff). */
11340#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11341 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11342 } while (0)
11343
11344#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11345 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11346#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11347 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11348#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11349 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11350 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11351 } while (0)
11352#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11353 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11354 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11355 } while (0)
11356#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11357 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11358#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11359 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11360#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11361 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11362
11363#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11364 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11365 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11366 } while (0)
11367#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11368 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11369#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11370 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11371#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11372 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11373#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11374 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11375 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11376 } while (0)
11377#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11378 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11379#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11380 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11381 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11382 } while (0)
11383#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11384 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11385#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11386 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11387 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11388 } while (0)
11389#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11390 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11391#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11392 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11393#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11394 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11395#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11396 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11397#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11398 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11399 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11400 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11401 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11402 } while (0)
11403
11404#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11405 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11406 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11407 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11408 } while (0)
11409#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11410 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11411 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11412 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11413 } while (0)
11414#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11415 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11416 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11417 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11418 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11419 } while (0)
11420#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11421 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11422 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11423 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11424 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11425 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11426 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11427 } while (0)
11428
11429#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11430#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11431 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11432 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11433 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11434 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11435 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11436 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11437 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11438 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11439 } while (0)
11440#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11441 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11442 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11443 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11444 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11445 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11446 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11447 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11448 } while (0)
11449#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11450 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11451 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11452 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11453 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11454 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11455 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11456 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11457 } while (0)
11458#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11459 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11460 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11461 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11462 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11463 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11464 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11465 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11466 } while (0)
11467
11468#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11469 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11470#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11471 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11472#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11473 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11474#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11475 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11476 uintptr_t const iYRegTmp = (a_iYReg); \
11477 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11478 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11479 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11480 } while (0)
11481
11482#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11483 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11484 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11485 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11486 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11487 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11488 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11489 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11490 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11491 } while (0)
11492#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11493 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11494 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11495 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11496 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11497 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11498 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11499 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11500 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11501 } while (0)
11502#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11503 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11504 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11505 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11506 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11507 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11508 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11509 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11510 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11511 } while (0)
11512
11513#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11514 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11515 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11516 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11517 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11518 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11519 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11520 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11521 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11522 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11523 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11524 } while (0)
11525#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11526 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11527 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11528 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11529 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11530 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11531 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11532 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11533 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11534 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11535 } while (0)
11536#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11537 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11538 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11539 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11540 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11541 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11542 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11543 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11544 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11545 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11546 } while (0)
11547#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11548 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11549 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11550 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11551 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11552 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11553 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11554 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11555 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11556 } while (0)
11557
11558#ifndef IEM_WITH_SETJMP
11559# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11560 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11561# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11562 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11563# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11564 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11565#else
11566# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11567 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11568# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11569 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11570# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11571 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11572#endif
11573
11574#ifndef IEM_WITH_SETJMP
11575# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11576 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11577# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11578 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11579# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11580 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11581#else
11582# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11583 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11584# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11585 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11586# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11587 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11588#endif
11589
11590#ifndef IEM_WITH_SETJMP
11591# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11592 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11593# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11594 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11595# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11596 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11597#else
11598# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11599 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11600# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11601 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11602# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11603 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11604#endif
11605
11606#ifdef SOME_UNUSED_FUNCTION
11607# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11608 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11609#endif
11610
11611#ifndef IEM_WITH_SETJMP
11612# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11613 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11614# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11615 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11616# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11617 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11618# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11619 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11620#else
11621# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11622 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11623# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11624 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11625# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11626 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11627# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11628 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11629#endif
11630
11631#ifndef IEM_WITH_SETJMP
11632# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11633 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11634# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11635 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11636# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11637 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11638#else
11639# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11640 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11641# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11642 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11643# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11644 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11645#endif
11646
11647#ifndef IEM_WITH_SETJMP
11648# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11650# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11651 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11652#else
11653# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11654 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11655# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11656 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11657#endif
11658
11659#ifndef IEM_WITH_SETJMP
11660# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11662# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11664#else
11665# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11666 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11667# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11668 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11669#endif
11670
11671
11672
11673#ifndef IEM_WITH_SETJMP
11674# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11675 do { \
11676 uint8_t u8Tmp; \
11677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11678 (a_u16Dst) = u8Tmp; \
11679 } while (0)
11680# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11681 do { \
11682 uint8_t u8Tmp; \
11683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11684 (a_u32Dst) = u8Tmp; \
11685 } while (0)
11686# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11687 do { \
11688 uint8_t u8Tmp; \
11689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11690 (a_u64Dst) = u8Tmp; \
11691 } while (0)
11692# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11693 do { \
11694 uint16_t u16Tmp; \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11696 (a_u32Dst) = u16Tmp; \
11697 } while (0)
11698# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11699 do { \
11700 uint16_t u16Tmp; \
11701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11702 (a_u64Dst) = u16Tmp; \
11703 } while (0)
11704# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11705 do { \
11706 uint32_t u32Tmp; \
11707 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11708 (a_u64Dst) = u32Tmp; \
11709 } while (0)
11710#else /* IEM_WITH_SETJMP */
11711# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11712 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11713# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11714 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11715# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11716 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11717# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11718 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11719# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11720 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11721# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11722 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11723#endif /* IEM_WITH_SETJMP */
11724
11725#ifndef IEM_WITH_SETJMP
11726# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11727 do { \
11728 uint8_t u8Tmp; \
11729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11730 (a_u16Dst) = (int8_t)u8Tmp; \
11731 } while (0)
11732# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11733 do { \
11734 uint8_t u8Tmp; \
11735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11736 (a_u32Dst) = (int8_t)u8Tmp; \
11737 } while (0)
11738# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11739 do { \
11740 uint8_t u8Tmp; \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11742 (a_u64Dst) = (int8_t)u8Tmp; \
11743 } while (0)
11744# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11745 do { \
11746 uint16_t u16Tmp; \
11747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11748 (a_u32Dst) = (int16_t)u16Tmp; \
11749 } while (0)
11750# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11751 do { \
11752 uint16_t u16Tmp; \
11753 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11754 (a_u64Dst) = (int16_t)u16Tmp; \
11755 } while (0)
11756# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11757 do { \
11758 uint32_t u32Tmp; \
11759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11760 (a_u64Dst) = (int32_t)u32Tmp; \
11761 } while (0)
11762#else /* IEM_WITH_SETJMP */
11763# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11764 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11765# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11766 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11767# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11768 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11769# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11770 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11771# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11772 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11773# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11774 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11775#endif /* IEM_WITH_SETJMP */
11776
11777#ifndef IEM_WITH_SETJMP
11778# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11779 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11780# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11781 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11782# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11784# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11785 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11786#else
11787# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11788 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11789# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11790 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11791# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11792 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11793# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11794 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11795#endif
11796
11797#ifndef IEM_WITH_SETJMP
11798# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11799 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11800# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11801 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11802# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11803 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11804# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11805 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11806#else
11807# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11808 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11809# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11810 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11811# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11812 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11813# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11814 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11815#endif
11816
11817#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11818#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11819#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11820#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11821#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11822#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11823#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11824 do { \
11825 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11826 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11827 } while (0)
11828
11829#ifndef IEM_WITH_SETJMP
11830# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11831 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11832# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11833 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11834#else
11835# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11836 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11837# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11838 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11839#endif
11840
11841#ifndef IEM_WITH_SETJMP
11842# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11843 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11844# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11845 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11846#else
11847# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11848 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11849# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11850 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11851#endif
11852
11853
11854#define IEM_MC_PUSH_U16(a_u16Value) \
11855 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11856#define IEM_MC_PUSH_U32(a_u32Value) \
11857 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11858#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11859 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11860#define IEM_MC_PUSH_U64(a_u64Value) \
11861 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11862
11863#define IEM_MC_POP_U16(a_pu16Value) \
11864 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11865#define IEM_MC_POP_U32(a_pu32Value) \
11866 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11867#define IEM_MC_POP_U64(a_pu64Value) \
11868 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11869
11870/** Maps guest memory for direct or bounce buffered access.
11871 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11872 * @remarks May return.
11873 */
11874#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11875 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11876
11877/** Maps guest memory for direct or bounce buffered access.
11878 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11879 * @remarks May return.
11880 */
11881#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11882 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11883
11884/** Commits the memory and unmaps the guest memory.
11885 * @remarks May return.
11886 */
11887#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11888 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11889
11890/** Commits the memory and unmaps the guest memory unless the FPU status word
11891 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11892 * that would cause FLD not to store.
11893 *
11894 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11895 * store, while \#P will not.
11896 *
11897 * @remarks May in theory return - for now.
11898 */
11899#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11900 do { \
11901 if ( !(a_u16FSW & X86_FSW_ES) \
11902 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11903 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11904 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11905 } while (0)
11906
11907/** Calculate efficient address from R/M. */
11908#ifndef IEM_WITH_SETJMP
11909# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11910 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11911#else
11912# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11913 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11914#endif
11915
11916#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11917#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11918#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11919#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11920#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11921#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11922#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11923
11924/**
11925 * Defers the rest of the instruction emulation to a C implementation routine
11926 * and returns, only taking the standard parameters.
11927 *
11928 * @param a_pfnCImpl The pointer to the C routine.
11929 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11930 */
11931#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11932
11933/**
11934 * Defers the rest of instruction emulation to a C implementation routine and
11935 * returns, taking one argument in addition to the standard ones.
11936 *
11937 * @param a_pfnCImpl The pointer to the C routine.
11938 * @param a0 The argument.
11939 */
11940#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11941
11942/**
11943 * Defers the rest of the instruction emulation to a C implementation routine
11944 * and returns, taking two arguments in addition to the standard ones.
11945 *
11946 * @param a_pfnCImpl The pointer to the C routine.
11947 * @param a0 The first extra argument.
11948 * @param a1 The second extra argument.
11949 */
11950#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11951
11952/**
11953 * Defers the rest of the instruction emulation to a C implementation routine
11954 * and returns, taking three arguments in addition to the standard ones.
11955 *
11956 * @param a_pfnCImpl The pointer to the C routine.
11957 * @param a0 The first extra argument.
11958 * @param a1 The second extra argument.
11959 * @param a2 The third extra argument.
11960 */
11961#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11962
11963/**
11964 * Defers the rest of the instruction emulation to a C implementation routine
11965 * and returns, taking four arguments in addition to the standard ones.
11966 *
11967 * @param a_pfnCImpl The pointer to the C routine.
11968 * @param a0 The first extra argument.
11969 * @param a1 The second extra argument.
11970 * @param a2 The third extra argument.
11971 * @param a3 The fourth extra argument.
11972 */
11973#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11974
11975/**
11976 * Defers the rest of the instruction emulation to a C implementation routine
11977 * and returns, taking two arguments in addition to the standard ones.
11978 *
11979 * @param a_pfnCImpl The pointer to the C routine.
11980 * @param a0 The first extra argument.
11981 * @param a1 The second extra argument.
11982 * @param a2 The third extra argument.
11983 * @param a3 The fourth extra argument.
11984 * @param a4 The fifth extra argument.
11985 */
11986#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11987
11988/**
11989 * Defers the entire instruction emulation to a C implementation routine and
11990 * returns, only taking the standard parameters.
11991 *
11992 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11993 *
11994 * @param a_pfnCImpl The pointer to the C routine.
11995 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11996 */
11997#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11998
11999/**
12000 * Defers the entire instruction emulation to a C implementation routine and
12001 * returns, taking one argument in addition to the standard ones.
12002 *
12003 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12004 *
12005 * @param a_pfnCImpl The pointer to the C routine.
12006 * @param a0 The argument.
12007 */
12008#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12009
12010/**
12011 * Defers the entire instruction emulation to a C implementation routine and
12012 * returns, taking two arguments in addition to the standard ones.
12013 *
12014 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12015 *
12016 * @param a_pfnCImpl The pointer to the C routine.
12017 * @param a0 The first extra argument.
12018 * @param a1 The second extra argument.
12019 */
12020#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12021
12022/**
12023 * Defers the entire instruction emulation to a C implementation routine and
12024 * returns, taking three arguments in addition to the standard ones.
12025 *
12026 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12027 *
12028 * @param a_pfnCImpl The pointer to the C routine.
12029 * @param a0 The first extra argument.
12030 * @param a1 The second extra argument.
12031 * @param a2 The third extra argument.
12032 */
12033#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12034
12035/**
12036 * Calls a FPU assembly implementation taking one visible argument.
12037 *
12038 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12039 * @param a0 The first extra argument.
12040 */
12041#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12042 do { \
12043 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12044 } while (0)
12045
12046/**
12047 * Calls a FPU assembly implementation taking two visible arguments.
12048 *
12049 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12050 * @param a0 The first extra argument.
12051 * @param a1 The second extra argument.
12052 */
12053#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12054 do { \
12055 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12056 } while (0)
12057
12058/**
12059 * Calls a FPU assembly implementation taking three visible arguments.
12060 *
12061 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12062 * @param a0 The first extra argument.
12063 * @param a1 The second extra argument.
12064 * @param a2 The third extra argument.
12065 */
12066#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12067 do { \
12068 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12069 } while (0)
12070
12071#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12072 do { \
12073 (a_FpuData).FSW = (a_FSW); \
12074 (a_FpuData).r80Result = *(a_pr80Value); \
12075 } while (0)
12076
12077/** Pushes FPU result onto the stack. */
12078#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12079 iemFpuPushResult(pVCpu, &a_FpuData)
12080/** Pushes FPU result onto the stack and sets the FPUDP. */
12081#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12082 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12083
12084/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12085#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12086 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12087
12088/** Stores FPU result in a stack register. */
12089#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12090 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12091/** Stores FPU result in a stack register and pops the stack. */
12092#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12093 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12094/** Stores FPU result in a stack register and sets the FPUDP. */
12095#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12096 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12097/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12098 * stack. */
12099#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12100 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12101
12102/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12103#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12104 iemFpuUpdateOpcodeAndIp(pVCpu)
12105/** Free a stack register (for FFREE and FFREEP). */
12106#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12107 iemFpuStackFree(pVCpu, a_iStReg)
12108/** Increment the FPU stack pointer. */
12109#define IEM_MC_FPU_STACK_INC_TOP() \
12110 iemFpuStackIncTop(pVCpu)
12111/** Decrement the FPU stack pointer. */
12112#define IEM_MC_FPU_STACK_DEC_TOP() \
12113 iemFpuStackDecTop(pVCpu)
12114
12115/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12116#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12117 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12118/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12119#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12120 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12121/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12122#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12123 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12124/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12125#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12126 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12127/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12128 * stack. */
12129#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12130 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12131/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12132#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12133 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12134
12135/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12136#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12137 iemFpuStackUnderflow(pVCpu, a_iStDst)
12138/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12139 * stack. */
12140#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12141 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12142/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12143 * FPUDS. */
12144#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12145 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12146/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12147 * FPUDS. Pops stack. */
12148#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12149 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12150/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12151 * stack twice. */
12152#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12153 iemFpuStackUnderflowThenPopPop(pVCpu)
12154/** Raises a FPU stack underflow exception for an instruction pushing a result
12155 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12156#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12157 iemFpuStackPushUnderflow(pVCpu)
12158/** Raises a FPU stack underflow exception for an instruction pushing a result
12159 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12160#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12161 iemFpuStackPushUnderflowTwo(pVCpu)
12162
12163/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12164 * FPUIP, FPUCS and FOP. */
12165#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12166 iemFpuStackPushOverflow(pVCpu)
12167/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12168 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12169#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12170 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12171/** Prepares for using the FPU state.
12172 * Ensures that we can use the host FPU in the current context (RC+R0.
12173 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12174#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12175/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12176#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12177/** Actualizes the guest FPU state so it can be accessed and modified. */
12178#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12179
12180/** Prepares for using the SSE state.
12181 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12182 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12183#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12184/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12185#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12186/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12187#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12188
12189/** Prepares for using the AVX state.
12190 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12191 * Ensures the guest AVX state in the CPUMCTX is up to date.
12192 * @note This will include the AVX512 state too when support for it is added
12193 * due to the zero extending feature of VEX instruction. */
12194#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12195/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12196#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12197/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12198#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12199
12200/**
12201 * Calls a MMX assembly implementation taking two visible arguments.
12202 *
12203 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12204 * @param a0 The first extra argument.
12205 * @param a1 The second extra argument.
12206 */
12207#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12208 do { \
12209 IEM_MC_PREPARE_FPU_USAGE(); \
12210 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12211 } while (0)
12212
12213/**
12214 * Calls a MMX assembly implementation taking three visible arguments.
12215 *
12216 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12217 * @param a0 The first extra argument.
12218 * @param a1 The second extra argument.
12219 * @param a2 The third extra argument.
12220 */
12221#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12222 do { \
12223 IEM_MC_PREPARE_FPU_USAGE(); \
12224 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12225 } while (0)
12226
12227
12228/**
12229 * Calls a SSE assembly implementation taking two visible arguments.
12230 *
12231 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12232 * @param a0 The first extra argument.
12233 * @param a1 The second extra argument.
12234 */
12235#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12236 do { \
12237 IEM_MC_PREPARE_SSE_USAGE(); \
12238 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12239 } while (0)
12240
12241/**
12242 * Calls a SSE assembly implementation taking three visible arguments.
12243 *
12244 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12245 * @param a0 The first extra argument.
12246 * @param a1 The second extra argument.
12247 * @param a2 The third extra argument.
12248 */
12249#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12250 do { \
12251 IEM_MC_PREPARE_SSE_USAGE(); \
12252 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12253 } while (0)
12254
12255
12256/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12257 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12258#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12259 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12260
12261/**
12262 * Calls a AVX assembly implementation taking two visible arguments.
12263 *
12264 * There is one implicit zero'th argument, a pointer to the extended state.
12265 *
12266 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12267 * @param a1 The first extra argument.
12268 * @param a2 The second extra argument.
12269 */
12270#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12271 do { \
12272 IEM_MC_PREPARE_AVX_USAGE(); \
12273 a_pfnAImpl(pXState, (a1), (a2)); \
12274 } while (0)
12275
12276/**
12277 * Calls a AVX assembly implementation taking three visible arguments.
12278 *
12279 * There is one implicit zero'th argument, a pointer to the extended state.
12280 *
12281 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12282 * @param a1 The first extra argument.
12283 * @param a2 The second extra argument.
12284 * @param a3 The third extra argument.
12285 */
12286#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12287 do { \
12288 IEM_MC_PREPARE_AVX_USAGE(); \
12289 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12290 } while (0)
12291
12292/** @note Not for IOPL or IF testing. */
12293#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12294/** @note Not for IOPL or IF testing. */
12295#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12296/** @note Not for IOPL or IF testing. */
12297#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12298/** @note Not for IOPL or IF testing. */
12299#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12300/** @note Not for IOPL or IF testing. */
12301#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12302 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12303 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12304/** @note Not for IOPL or IF testing. */
12305#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12306 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12307 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12308/** @note Not for IOPL or IF testing. */
12309#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12310 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12311 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12312 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12313/** @note Not for IOPL or IF testing. */
12314#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12315 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12316 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12317 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12318#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12319#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12320#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12321/** @note Not for IOPL or IF testing. */
12322#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12323 if ( pVCpu->cpum.GstCtx.cx != 0 \
12324 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12325/** @note Not for IOPL or IF testing. */
12326#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12327 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12328 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12329/** @note Not for IOPL or IF testing. */
12330#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12331 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12332 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12333/** @note Not for IOPL or IF testing. */
12334#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12335 if ( pVCpu->cpum.GstCtx.cx != 0 \
12336 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12337/** @note Not for IOPL or IF testing. */
12338#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12339 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12340 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12341/** @note Not for IOPL or IF testing. */
12342#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12343 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12344 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12345#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12346#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12347
12348#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12349 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12350#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12351 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12352#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12353 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12354#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12355 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12356#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12357 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12358#define IEM_MC_IF_FCW_IM() \
12359 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12360
12361#define IEM_MC_ELSE() } else {
12362#define IEM_MC_ENDIF() } do {} while (0)
12363
12364/** @} */
12365
12366
12367/** @name Opcode Debug Helpers.
12368 * @{
12369 */
12370#ifdef VBOX_WITH_STATISTICS
12371# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12372#else
12373# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12374#endif
12375
12376#ifdef DEBUG
12377# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12378 do { \
12379 IEMOP_INC_STATS(a_Stats); \
12380 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12381 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12382 } while (0)
12383
12384# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12385 do { \
12386 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12387 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12388 (void)RT_CONCAT(OP_,a_Upper); \
12389 (void)(a_fDisHints); \
12390 (void)(a_fIemHints); \
12391 } while (0)
12392
12393# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12394 do { \
12395 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12396 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12397 (void)RT_CONCAT(OP_,a_Upper); \
12398 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12399 (void)(a_fDisHints); \
12400 (void)(a_fIemHints); \
12401 } while (0)
12402
12403# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12404 do { \
12405 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12406 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12407 (void)RT_CONCAT(OP_,a_Upper); \
12408 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12409 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12410 (void)(a_fDisHints); \
12411 (void)(a_fIemHints); \
12412 } while (0)
12413
12414# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12415 do { \
12416 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12417 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12418 (void)RT_CONCAT(OP_,a_Upper); \
12419 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12420 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12421 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12422 (void)(a_fDisHints); \
12423 (void)(a_fIemHints); \
12424 } while (0)
12425
12426# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12427 do { \
12428 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12429 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12430 (void)RT_CONCAT(OP_,a_Upper); \
12431 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12432 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12433 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12434 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12435 (void)(a_fDisHints); \
12436 (void)(a_fIemHints); \
12437 } while (0)
12438
12439#else
12440# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12441
12442# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12443 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12444# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12445 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12446# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12447 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12448# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12449 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12450# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12451 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12452
12453#endif
12454
12455#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12456 IEMOP_MNEMONIC0EX(a_Lower, \
12457 #a_Lower, \
12458 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12459#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12460 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12461 #a_Lower " " #a_Op1, \
12462 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12463#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12464 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12465 #a_Lower " " #a_Op1 "," #a_Op2, \
12466 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12467#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12468 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12469 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12470 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12471#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12472 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12473 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12474 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12475
12476/** @} */
12477
12478
12479/** @name Opcode Helpers.
12480 * @{
12481 */
12482
12483#ifdef IN_RING3
12484# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12485 do { \
12486 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12487 else \
12488 { \
12489 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12490 return IEMOP_RAISE_INVALID_OPCODE(); \
12491 } \
12492 } while (0)
12493#else
12494# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12495 do { \
12496 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12497 else return IEMOP_RAISE_INVALID_OPCODE(); \
12498 } while (0)
12499#endif
12500
12501/** The instruction requires a 186 or later. */
12502#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12503# define IEMOP_HLP_MIN_186() do { } while (0)
12504#else
12505# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12506#endif
12507
12508/** The instruction requires a 286 or later. */
12509#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12510# define IEMOP_HLP_MIN_286() do { } while (0)
12511#else
12512# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12513#endif
12514
12515/** The instruction requires a 386 or later. */
12516#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12517# define IEMOP_HLP_MIN_386() do { } while (0)
12518#else
12519# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12520#endif
12521
12522/** The instruction requires a 386 or later if the given expression is true. */
12523#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12524# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12525#else
12526# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12527#endif
12528
12529/** The instruction requires a 486 or later. */
12530#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12531# define IEMOP_HLP_MIN_486() do { } while (0)
12532#else
12533# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12534#endif
12535
12536/** The instruction requires a Pentium (586) or later. */
12537#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12538# define IEMOP_HLP_MIN_586() do { } while (0)
12539#else
12540# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12541#endif
12542
12543/** The instruction requires a PentiumPro (686) or later. */
12544#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12545# define IEMOP_HLP_MIN_686() do { } while (0)
12546#else
12547# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12548#endif
12549
12550
12551/** The instruction raises an \#UD in real and V8086 mode. */
12552#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12553 do \
12554 { \
12555 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12556 else return IEMOP_RAISE_INVALID_OPCODE(); \
12557 } while (0)
12558
12559#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12560/** This instruction raises an \#UD in real and V8086 mode or when not using a
12561 * 64-bit code segment when in long mode (applicable to all VMX instructions
12562 * except VMCALL).
12563 */
12564#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12565 do \
12566 { \
12567 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12568 && ( !IEM_IS_LONG_MODE(pVCpu) \
12569 || IEM_IS_64BIT_CODE(pVCpu))) \
12570 { /* likely */ } \
12571 else \
12572 { \
12573 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12574 { \
12575 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12576 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12577 return IEMOP_RAISE_INVALID_OPCODE(); \
12578 } \
12579 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12580 { \
12581 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12582 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12583 return IEMOP_RAISE_INVALID_OPCODE(); \
12584 } \
12585 } \
12586 } while (0)
12587
12588/** The instruction can only be executed in VMX operation (VMX root mode and
12589 * non-root mode).
12590 *
12591 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12592 */
12593# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12594 do \
12595 { \
12596 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12597 else \
12598 { \
12599 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12600 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12601 return IEMOP_RAISE_INVALID_OPCODE(); \
12602 } \
12603 } while (0)
12604#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12605
12606/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12607 * 64-bit mode. */
12608#define IEMOP_HLP_NO_64BIT() \
12609 do \
12610 { \
12611 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12612 return IEMOP_RAISE_INVALID_OPCODE(); \
12613 } while (0)
12614
12615/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12616 * 64-bit mode. */
12617#define IEMOP_HLP_ONLY_64BIT() \
12618 do \
12619 { \
12620 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12621 return IEMOP_RAISE_INVALID_OPCODE(); \
12622 } while (0)
12623
12624/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12625#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12626 do \
12627 { \
12628 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12629 iemRecalEffOpSize64Default(pVCpu); \
12630 } while (0)
12631
12632/** The instruction has 64-bit operand size if 64-bit mode. */
12633#define IEMOP_HLP_64BIT_OP_SIZE() \
12634 do \
12635 { \
12636 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12637 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12638 } while (0)
12639
12640/** Only a REX prefix immediately preceeding the first opcode byte takes
12641 * effect. This macro helps ensuring this as well as logging bad guest code. */
12642#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12643 do \
12644 { \
12645 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12646 { \
12647 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12648 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12649 pVCpu->iem.s.uRexB = 0; \
12650 pVCpu->iem.s.uRexIndex = 0; \
12651 pVCpu->iem.s.uRexReg = 0; \
12652 iemRecalEffOpSize(pVCpu); \
12653 } \
12654 } while (0)
12655
12656/**
12657 * Done decoding.
12658 */
12659#define IEMOP_HLP_DONE_DECODING() \
12660 do \
12661 { \
12662 /*nothing for now, maybe later... */ \
12663 } while (0)
12664
12665/**
12666 * Done decoding, raise \#UD exception if lock prefix present.
12667 */
12668#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12669 do \
12670 { \
12671 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12672 { /* likely */ } \
12673 else \
12674 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12675 } while (0)
12676
12677
12678/**
12679 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12680 * repnz or size prefixes are present, or if in real or v8086 mode.
12681 */
12682#define IEMOP_HLP_DONE_VEX_DECODING() \
12683 do \
12684 { \
12685 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12686 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12687 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12688 { /* likely */ } \
12689 else \
12690 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12691 } while (0)
12692
12693/**
12694 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12695 * repnz or size prefixes are present, or if in real or v8086 mode.
12696 */
12697#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12698 do \
12699 { \
12700 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12701 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12702 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12703 && pVCpu->iem.s.uVexLength == 0)) \
12704 { /* likely */ } \
12705 else \
12706 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12707 } while (0)
12708
12709
12710/**
12711 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12712 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12713 * register 0, or if in real or v8086 mode.
12714 */
12715#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12716 do \
12717 { \
12718 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12719 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12720 && !pVCpu->iem.s.uVex3rdReg \
12721 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12722 { /* likely */ } \
12723 else \
12724 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12725 } while (0)
12726
12727/**
12728 * Done decoding VEX, no V, L=0.
12729 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12730 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12731 */
12732#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12733 do \
12734 { \
12735 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12736 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12737 && pVCpu->iem.s.uVexLength == 0 \
12738 && pVCpu->iem.s.uVex3rdReg == 0 \
12739 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12740 { /* likely */ } \
12741 else \
12742 return IEMOP_RAISE_INVALID_OPCODE(); \
12743 } while (0)
12744
12745#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12746 do \
12747 { \
12748 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12749 { /* likely */ } \
12750 else \
12751 { \
12752 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12753 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12754 } \
12755 } while (0)
12756#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12757 do \
12758 { \
12759 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12760 { /* likely */ } \
12761 else \
12762 { \
12763 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12764 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12765 } \
12766 } while (0)
12767
12768/**
12769 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12770 * are present.
12771 */
12772#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12773 do \
12774 { \
12775 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12776 { /* likely */ } \
12777 else \
12778 return IEMOP_RAISE_INVALID_OPCODE(); \
12779 } while (0)
12780
12781/**
12782 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12783 * prefixes are present.
12784 */
12785#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12786 do \
12787 { \
12788 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12789 { /* likely */ } \
12790 else \
12791 return IEMOP_RAISE_INVALID_OPCODE(); \
12792 } while (0)
12793
12794
12795/**
12796 * Calculates the effective address of a ModR/M memory operand.
12797 *
12798 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12799 *
12800 * @return Strict VBox status code.
12801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12802 * @param bRm The ModRM byte.
12803 * @param cbImm The size of any immediate following the
12804 * effective address opcode bytes. Important for
12805 * RIP relative addressing.
12806 * @param pGCPtrEff Where to return the effective address.
12807 */
12808IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12809{
12810 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12811# define SET_SS_DEF() \
12812 do \
12813 { \
12814 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12815 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12816 } while (0)
12817
12818 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12819 {
12820/** @todo Check the effective address size crap! */
12821 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12822 {
12823 uint16_t u16EffAddr;
12824
12825 /* Handle the disp16 form with no registers first. */
12826 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12827 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12828 else
12829 {
12830 /* Get the displacment. */
12831 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12832 {
12833 case 0: u16EffAddr = 0; break;
12834 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12835 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12836 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12837 }
12838
12839 /* Add the base and index registers to the disp. */
12840 switch (bRm & X86_MODRM_RM_MASK)
12841 {
12842 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12843 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12844 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12845 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12846 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12847 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12848 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12849 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12850 }
12851 }
12852
12853 *pGCPtrEff = u16EffAddr;
12854 }
12855 else
12856 {
12857 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12858 uint32_t u32EffAddr;
12859
12860 /* Handle the disp32 form with no registers first. */
12861 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12862 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12863 else
12864 {
12865 /* Get the register (or SIB) value. */
12866 switch ((bRm & X86_MODRM_RM_MASK))
12867 {
12868 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12869 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12870 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12871 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12872 case 4: /* SIB */
12873 {
12874 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12875
12876 /* Get the index and scale it. */
12877 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12878 {
12879 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12880 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12881 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12882 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12883 case 4: u32EffAddr = 0; /*none */ break;
12884 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12885 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12886 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12887 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12888 }
12889 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12890
12891 /* add base */
12892 switch (bSib & X86_SIB_BASE_MASK)
12893 {
12894 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12895 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12896 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12897 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12898 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12899 case 5:
12900 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12901 {
12902 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12903 SET_SS_DEF();
12904 }
12905 else
12906 {
12907 uint32_t u32Disp;
12908 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12909 u32EffAddr += u32Disp;
12910 }
12911 break;
12912 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12913 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12914 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12915 }
12916 break;
12917 }
12918 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12919 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12920 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12921 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12922 }
12923
12924 /* Get and add the displacement. */
12925 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12926 {
12927 case 0:
12928 break;
12929 case 1:
12930 {
12931 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12932 u32EffAddr += i8Disp;
12933 break;
12934 }
12935 case 2:
12936 {
12937 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12938 u32EffAddr += u32Disp;
12939 break;
12940 }
12941 default:
12942 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12943 }
12944
12945 }
12946 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12947 *pGCPtrEff = u32EffAddr;
12948 else
12949 {
12950 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12951 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12952 }
12953 }
12954 }
12955 else
12956 {
12957 uint64_t u64EffAddr;
12958
12959 /* Handle the rip+disp32 form with no registers first. */
12960 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12961 {
12962 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12963 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12964 }
12965 else
12966 {
12967 /* Get the register (or SIB) value. */
12968 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12969 {
12970 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12971 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12972 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12973 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12974 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
12975 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12976 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12977 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12978 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12979 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12980 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12981 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12982 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12983 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12984 /* SIB */
12985 case 4:
12986 case 12:
12987 {
12988 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12989
12990 /* Get the index and scale it. */
12991 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12992 {
12993 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12994 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12995 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12996 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12997 case 4: u64EffAddr = 0; /*none */ break;
12998 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
12999 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13000 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13001 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13002 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13003 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13004 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13005 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13006 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13007 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13008 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13009 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13010 }
13011 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13012
13013 /* add base */
13014 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13015 {
13016 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13017 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13018 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13019 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13020 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13021 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13022 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13023 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13024 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13025 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13026 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13027 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13028 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13029 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13030 /* complicated encodings */
13031 case 5:
13032 case 13:
13033 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13034 {
13035 if (!pVCpu->iem.s.uRexB)
13036 {
13037 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13038 SET_SS_DEF();
13039 }
13040 else
13041 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13042 }
13043 else
13044 {
13045 uint32_t u32Disp;
13046 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13047 u64EffAddr += (int32_t)u32Disp;
13048 }
13049 break;
13050 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13051 }
13052 break;
13053 }
13054 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13055 }
13056
13057 /* Get and add the displacement. */
13058 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13059 {
13060 case 0:
13061 break;
13062 case 1:
13063 {
13064 int8_t i8Disp;
13065 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13066 u64EffAddr += i8Disp;
13067 break;
13068 }
13069 case 2:
13070 {
13071 uint32_t u32Disp;
13072 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13073 u64EffAddr += (int32_t)u32Disp;
13074 break;
13075 }
13076 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13077 }
13078
13079 }
13080
13081 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13082 *pGCPtrEff = u64EffAddr;
13083 else
13084 {
13085 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13086 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13087 }
13088 }
13089
13090 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13091 return VINF_SUCCESS;
13092}
13093
13094
13095/**
13096 * Calculates the effective address of a ModR/M memory operand.
13097 *
13098 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13099 *
13100 * @return Strict VBox status code.
13101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13102 * @param bRm The ModRM byte.
13103 * @param cbImm The size of any immediate following the
13104 * effective address opcode bytes. Important for
13105 * RIP relative addressing.
13106 * @param pGCPtrEff Where to return the effective address.
13107 * @param offRsp RSP displacement.
13108 */
13109IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13110{
13111 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13112# define SET_SS_DEF() \
13113 do \
13114 { \
13115 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13116 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13117 } while (0)
13118
13119 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13120 {
13121/** @todo Check the effective address size crap! */
13122 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13123 {
13124 uint16_t u16EffAddr;
13125
13126 /* Handle the disp16 form with no registers first. */
13127 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13128 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13129 else
13130 {
13131 /* Get the displacment. */
13132 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13133 {
13134 case 0: u16EffAddr = 0; break;
13135 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13136 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13137 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13138 }
13139
13140 /* Add the base and index registers to the disp. */
13141 switch (bRm & X86_MODRM_RM_MASK)
13142 {
13143 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13144 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13145 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13146 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13147 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13148 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13149 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13150 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13151 }
13152 }
13153
13154 *pGCPtrEff = u16EffAddr;
13155 }
13156 else
13157 {
13158 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13159 uint32_t u32EffAddr;
13160
13161 /* Handle the disp32 form with no registers first. */
13162 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13163 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13164 else
13165 {
13166 /* Get the register (or SIB) value. */
13167 switch ((bRm & X86_MODRM_RM_MASK))
13168 {
13169 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13170 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13171 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13172 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13173 case 4: /* SIB */
13174 {
13175 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13176
13177 /* Get the index and scale it. */
13178 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13179 {
13180 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13181 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13182 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13183 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13184 case 4: u32EffAddr = 0; /*none */ break;
13185 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13186 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13187 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13188 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13189 }
13190 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13191
13192 /* add base */
13193 switch (bSib & X86_SIB_BASE_MASK)
13194 {
13195 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13196 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13197 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13198 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13199 case 4:
13200 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13201 SET_SS_DEF();
13202 break;
13203 case 5:
13204 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13205 {
13206 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13207 SET_SS_DEF();
13208 }
13209 else
13210 {
13211 uint32_t u32Disp;
13212 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13213 u32EffAddr += u32Disp;
13214 }
13215 break;
13216 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13217 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13218 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13219 }
13220 break;
13221 }
13222 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13223 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13224 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13225 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13226 }
13227
13228 /* Get and add the displacement. */
13229 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13230 {
13231 case 0:
13232 break;
13233 case 1:
13234 {
13235 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13236 u32EffAddr += i8Disp;
13237 break;
13238 }
13239 case 2:
13240 {
13241 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13242 u32EffAddr += u32Disp;
13243 break;
13244 }
13245 default:
13246 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13247 }
13248
13249 }
13250 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13251 *pGCPtrEff = u32EffAddr;
13252 else
13253 {
13254 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13255 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13256 }
13257 }
13258 }
13259 else
13260 {
13261 uint64_t u64EffAddr;
13262
13263 /* Handle the rip+disp32 form with no registers first. */
13264 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13265 {
13266 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13267 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13268 }
13269 else
13270 {
13271 /* Get the register (or SIB) value. */
13272 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13273 {
13274 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13275 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13276 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13277 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13278 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13279 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13280 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13281 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13282 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13283 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13284 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13285 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13286 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13287 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13288 /* SIB */
13289 case 4:
13290 case 12:
13291 {
13292 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13293
13294 /* Get the index and scale it. */
13295 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13296 {
13297 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13298 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13299 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13300 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13301 case 4: u64EffAddr = 0; /*none */ break;
13302 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13303 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13304 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13305 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13306 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13307 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13308 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13309 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13310 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13311 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13312 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13313 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13314 }
13315 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13316
13317 /* add base */
13318 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13319 {
13320 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13321 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13322 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13323 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13324 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13325 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13326 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13327 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13328 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13329 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13330 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13331 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13332 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13333 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13334 /* complicated encodings */
13335 case 5:
13336 case 13:
13337 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13338 {
13339 if (!pVCpu->iem.s.uRexB)
13340 {
13341 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13342 SET_SS_DEF();
13343 }
13344 else
13345 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13346 }
13347 else
13348 {
13349 uint32_t u32Disp;
13350 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13351 u64EffAddr += (int32_t)u32Disp;
13352 }
13353 break;
13354 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13355 }
13356 break;
13357 }
13358 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13359 }
13360
13361 /* Get and add the displacement. */
13362 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13363 {
13364 case 0:
13365 break;
13366 case 1:
13367 {
13368 int8_t i8Disp;
13369 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13370 u64EffAddr += i8Disp;
13371 break;
13372 }
13373 case 2:
13374 {
13375 uint32_t u32Disp;
13376 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13377 u64EffAddr += (int32_t)u32Disp;
13378 break;
13379 }
13380 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13381 }
13382
13383 }
13384
13385 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13386 *pGCPtrEff = u64EffAddr;
13387 else
13388 {
13389 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13390 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13391 }
13392 }
13393
13394 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13395 return VINF_SUCCESS;
13396}
13397
13398
13399#ifdef IEM_WITH_SETJMP
13400/**
13401 * Calculates the effective address of a ModR/M memory operand.
13402 *
13403 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13404 *
13405 * May longjmp on internal error.
13406 *
13407 * @return The effective address.
13408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13409 * @param bRm The ModRM byte.
13410 * @param cbImm The size of any immediate following the
13411 * effective address opcode bytes. Important for
13412 * RIP relative addressing.
13413 */
13414IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
13415{
13416 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13417# define SET_SS_DEF() \
13418 do \
13419 { \
13420 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13421 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13422 } while (0)
13423
13424 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13425 {
13426/** @todo Check the effective address size crap! */
13427 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13428 {
13429 uint16_t u16EffAddr;
13430
13431 /* Handle the disp16 form with no registers first. */
13432 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13433 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13434 else
13435 {
13436 /* Get the displacment. */
13437 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13438 {
13439 case 0: u16EffAddr = 0; break;
13440 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13441 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13442 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13443 }
13444
13445 /* Add the base and index registers to the disp. */
13446 switch (bRm & X86_MODRM_RM_MASK)
13447 {
13448 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13449 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13450 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13451 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13452 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13453 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13454 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13455 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13456 }
13457 }
13458
13459 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13460 return u16EffAddr;
13461 }
13462
13463 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13464 uint32_t u32EffAddr;
13465
13466 /* Handle the disp32 form with no registers first. */
13467 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13468 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13469 else
13470 {
13471 /* Get the register (or SIB) value. */
13472 switch ((bRm & X86_MODRM_RM_MASK))
13473 {
13474 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13475 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13476 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13477 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13478 case 4: /* SIB */
13479 {
13480 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13481
13482 /* Get the index and scale it. */
13483 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13484 {
13485 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13486 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13487 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13488 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13489 case 4: u32EffAddr = 0; /*none */ break;
13490 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13491 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13492 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13493 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13494 }
13495 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13496
13497 /* add base */
13498 switch (bSib & X86_SIB_BASE_MASK)
13499 {
13500 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13501 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13502 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13503 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13504 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13505 case 5:
13506 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13507 {
13508 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13509 SET_SS_DEF();
13510 }
13511 else
13512 {
13513 uint32_t u32Disp;
13514 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13515 u32EffAddr += u32Disp;
13516 }
13517 break;
13518 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13519 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13520 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13521 }
13522 break;
13523 }
13524 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13525 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13526 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13527 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13528 }
13529
13530 /* Get and add the displacement. */
13531 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13532 {
13533 case 0:
13534 break;
13535 case 1:
13536 {
13537 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13538 u32EffAddr += i8Disp;
13539 break;
13540 }
13541 case 2:
13542 {
13543 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13544 u32EffAddr += u32Disp;
13545 break;
13546 }
13547 default:
13548 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13549 }
13550 }
13551
13552 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13553 {
13554 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13555 return u32EffAddr;
13556 }
13557 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13558 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13559 return u32EffAddr & UINT16_MAX;
13560 }
13561
13562 uint64_t u64EffAddr;
13563
13564 /* Handle the rip+disp32 form with no registers first. */
13565 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13566 {
13567 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13568 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13569 }
13570 else
13571 {
13572 /* Get the register (or SIB) value. */
13573 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13574 {
13575 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13576 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13577 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13578 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13579 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13580 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13581 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13582 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13583 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13584 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13585 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13586 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13587 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13588 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13589 /* SIB */
13590 case 4:
13591 case 12:
13592 {
13593 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13594
13595 /* Get the index and scale it. */
13596 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13597 {
13598 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13599 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13600 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13601 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13602 case 4: u64EffAddr = 0; /*none */ break;
13603 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13604 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13605 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13606 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13607 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13608 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13609 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13610 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13611 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13612 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13613 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13614 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13615 }
13616 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13617
13618 /* add base */
13619 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13620 {
13621 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13622 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13623 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13624 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13625 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13626 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13627 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13628 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13629 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13630 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13631 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13632 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13633 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13634 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13635 /* complicated encodings */
13636 case 5:
13637 case 13:
13638 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13639 {
13640 if (!pVCpu->iem.s.uRexB)
13641 {
13642 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13643 SET_SS_DEF();
13644 }
13645 else
13646 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13647 }
13648 else
13649 {
13650 uint32_t u32Disp;
13651 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13652 u64EffAddr += (int32_t)u32Disp;
13653 }
13654 break;
13655 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13656 }
13657 break;
13658 }
13659 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13660 }
13661
13662 /* Get and add the displacement. */
13663 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13664 {
13665 case 0:
13666 break;
13667 case 1:
13668 {
13669 int8_t i8Disp;
13670 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13671 u64EffAddr += i8Disp;
13672 break;
13673 }
13674 case 2:
13675 {
13676 uint32_t u32Disp;
13677 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13678 u64EffAddr += (int32_t)u32Disp;
13679 break;
13680 }
13681 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13682 }
13683
13684 }
13685
13686 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13687 {
13688 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13689 return u64EffAddr;
13690 }
13691 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13692 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13693 return u64EffAddr & UINT32_MAX;
13694}
13695#endif /* IEM_WITH_SETJMP */
13696
13697/** @} */
13698
13699
13700
13701/*
13702 * Include the instructions
13703 */
13704#include "IEMAllInstructions.cpp.h"
13705
13706
13707
13708#ifdef LOG_ENABLED
13709/**
13710 * Logs the current instruction.
13711 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13712 * @param fSameCtx Set if we have the same context information as the VMM,
13713 * clear if we may have already executed an instruction in
13714 * our debug context. When clear, we assume IEMCPU holds
13715 * valid CPU mode info.
13716 *
13717 * The @a fSameCtx parameter is now misleading and obsolete.
13718 * @param pszFunction The IEM function doing the execution.
13719 */
13720IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
13721{
13722# ifdef IN_RING3
13723 if (LogIs2Enabled())
13724 {
13725 char szInstr[256];
13726 uint32_t cbInstr = 0;
13727 if (fSameCtx)
13728 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13729 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13730 szInstr, sizeof(szInstr), &cbInstr);
13731 else
13732 {
13733 uint32_t fFlags = 0;
13734 switch (pVCpu->iem.s.enmCpuMode)
13735 {
13736 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13737 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13738 case IEMMODE_16BIT:
13739 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13740 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13741 else
13742 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13743 break;
13744 }
13745 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13746 szInstr, sizeof(szInstr), &cbInstr);
13747 }
13748
13749 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13750 Log2(("**** %s\n"
13751 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13752 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13753 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13754 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13755 " %s\n"
13756 , pszFunction,
13757 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13758 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13759 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13760 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13761 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13762 szInstr));
13763
13764 if (LogIs3Enabled())
13765 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13766 }
13767 else
13768# endif
13769 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13770 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13771 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13772}
13773#endif /* LOG_ENABLED */
13774
13775
13776#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13777/**
13778 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
13779 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
13780 *
13781 * @returns Modified rcStrict.
13782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13783 * @param rcStrict The instruction execution status.
13784 */
13785static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13786{
13787 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
13788 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
13789 {
13790 /* VMX preemption timer takes priority over NMI-window exits. */
13791 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
13792 {
13793 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
13794 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
13795 }
13796 /*
13797 * Check remaining intercepts.
13798 *
13799 * NMI-window and Interrupt-window VM-exits.
13800 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
13801 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
13802 *
13803 * See Intel spec. 26.7.6 "NMI-Window Exiting".
13804 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
13805 */
13806 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
13807 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13808 && !TRPMHasTrap(pVCpu))
13809 {
13810 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
13811 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
13812 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
13813 {
13814 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
13815 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
13816 }
13817 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
13818 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
13819 {
13820 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
13821 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
13822 }
13823 }
13824 }
13825 /* TPR-below threshold/APIC write has the highest priority. */
13826 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
13827 {
13828 rcStrict = iemVmxApicWriteEmulation(pVCpu);
13829 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13830 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
13831 }
13832 /* MTF takes priority over VMX-preemption timer. */
13833 else
13834 {
13835 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
13836 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13837 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
13838 }
13839 return rcStrict;
13840}
13841#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13842
13843
13844/**
13845 * Makes status code addjustments (pass up from I/O and access handler)
13846 * as well as maintaining statistics.
13847 *
13848 * @returns Strict VBox status code to pass up.
13849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13850 * @param rcStrict The status from executing an instruction.
13851 */
13852DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13853{
13854 if (rcStrict != VINF_SUCCESS)
13855 {
13856 if (RT_SUCCESS(rcStrict))
13857 {
13858 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13859 || rcStrict == VINF_IOM_R3_IOPORT_READ
13860 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13861 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13862 || rcStrict == VINF_IOM_R3_MMIO_READ
13863 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13864 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13865 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13866 || rcStrict == VINF_CPUM_R3_MSR_READ
13867 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13868 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13869 || rcStrict == VINF_EM_RAW_TO_R3
13870 || rcStrict == VINF_EM_TRIPLE_FAULT
13871 || rcStrict == VINF_GIM_R3_HYPERCALL
13872 /* raw-mode / virt handlers only: */
13873 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13874 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13875 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13876 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13877 || rcStrict == VINF_SELM_SYNC_GDT
13878 || rcStrict == VINF_CSAM_PENDING_ACTION
13879 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13880 /* nested hw.virt codes: */
13881 || rcStrict == VINF_VMX_VMEXIT
13882 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13883 || rcStrict == VINF_SVM_VMEXIT
13884 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13885/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13886 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13887#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13888 if ( rcStrict == VINF_VMX_VMEXIT
13889 && rcPassUp == VINF_SUCCESS)
13890 rcStrict = VINF_SUCCESS;
13891 else
13892#endif
13893#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13894 if ( rcStrict == VINF_SVM_VMEXIT
13895 && rcPassUp == VINF_SUCCESS)
13896 rcStrict = VINF_SUCCESS;
13897 else
13898#endif
13899 if (rcPassUp == VINF_SUCCESS)
13900 pVCpu->iem.s.cRetInfStatuses++;
13901 else if ( rcPassUp < VINF_EM_FIRST
13902 || rcPassUp > VINF_EM_LAST
13903 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13904 {
13905 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13906 pVCpu->iem.s.cRetPassUpStatus++;
13907 rcStrict = rcPassUp;
13908 }
13909 else
13910 {
13911 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13912 pVCpu->iem.s.cRetInfStatuses++;
13913 }
13914 }
13915 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13916 pVCpu->iem.s.cRetAspectNotImplemented++;
13917 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13918 pVCpu->iem.s.cRetInstrNotImplemented++;
13919 else
13920 pVCpu->iem.s.cRetErrStatuses++;
13921 }
13922 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13923 {
13924 pVCpu->iem.s.cRetPassUpStatus++;
13925 rcStrict = pVCpu->iem.s.rcPassUp;
13926 }
13927
13928 return rcStrict;
13929}
13930
13931
13932/**
13933 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13934 * IEMExecOneWithPrefetchedByPC.
13935 *
13936 * Similar code is found in IEMExecLots.
13937 *
13938 * @return Strict VBox status code.
13939 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13940 * @param fExecuteInhibit If set, execute the instruction following CLI,
13941 * POP SS and MOV SS,GR.
13942 * @param pszFunction The calling function name.
13943 */
13944DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
13945{
13946 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13947 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13948 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13949 RT_NOREF_PV(pszFunction);
13950
13951#ifdef IEM_WITH_SETJMP
13952 VBOXSTRICTRC rcStrict;
13953 jmp_buf JmpBuf;
13954 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13955 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13956 if ((rcStrict = setjmp(JmpBuf)) == 0)
13957 {
13958 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13959 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13960 }
13961 else
13962 pVCpu->iem.s.cLongJumps++;
13963 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13964#else
13965 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13966 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13967#endif
13968 if (rcStrict == VINF_SUCCESS)
13969 pVCpu->iem.s.cInstructions++;
13970 if (pVCpu->iem.s.cActiveMappings > 0)
13971 {
13972 Assert(rcStrict != VINF_SUCCESS);
13973 iemMemRollback(pVCpu);
13974 }
13975 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13976 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13977 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13978
13979//#ifdef DEBUG
13980// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13981//#endif
13982
13983#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13984 /*
13985 * Perform any VMX nested-guest instruction boundary actions.
13986 *
13987 * If any of these causes a VM-exit, we must skip executing the next
13988 * instruction (would run into stale page tables). A VM-exit makes sure
13989 * there is no interrupt-inhibition, so that should ensure we don't go
13990 * to try execute the next instruction. Clearing fExecuteInhibit is
13991 * problematic because of the setjmp/longjmp clobbering above.
13992 */
13993 if ( rcStrict == VINF_SUCCESS
13994 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
13995 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
13996 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
13997#endif
13998
13999 /* Execute the next instruction as well if a cli, pop ss or
14000 mov ss, Gr has just completed successfully. */
14001 if ( fExecuteInhibit
14002 && rcStrict == VINF_SUCCESS
14003 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14004 && EMIsInhibitInterruptsActive(pVCpu))
14005 {
14006 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14007 if (rcStrict == VINF_SUCCESS)
14008 {
14009#ifdef LOG_ENABLED
14010 iemLogCurInstr(pVCpu, false, pszFunction);
14011#endif
14012#ifdef IEM_WITH_SETJMP
14013 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14014 if ((rcStrict = setjmp(JmpBuf)) == 0)
14015 {
14016 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14017 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14018 }
14019 else
14020 pVCpu->iem.s.cLongJumps++;
14021 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14022#else
14023 IEM_OPCODE_GET_NEXT_U8(&b);
14024 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14025#endif
14026 if (rcStrict == VINF_SUCCESS)
14027 pVCpu->iem.s.cInstructions++;
14028 if (pVCpu->iem.s.cActiveMappings > 0)
14029 {
14030 Assert(rcStrict != VINF_SUCCESS);
14031 iemMemRollback(pVCpu);
14032 }
14033 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14034 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14035 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14036 }
14037 else if (pVCpu->iem.s.cActiveMappings > 0)
14038 iemMemRollback(pVCpu);
14039 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
14040 }
14041
14042 /*
14043 * Return value fiddling, statistics and sanity assertions.
14044 */
14045 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14046
14047 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14048 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14049 return rcStrict;
14050}
14051
14052
14053/**
14054 * Execute one instruction.
14055 *
14056 * @return Strict VBox status code.
14057 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14058 */
14059VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14060{
14061#ifdef LOG_ENABLED
14062 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14063#endif
14064
14065 /*
14066 * Do the decoding and emulation.
14067 */
14068 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14069 if (rcStrict == VINF_SUCCESS)
14070 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14071 else if (pVCpu->iem.s.cActiveMappings > 0)
14072 iemMemRollback(pVCpu);
14073
14074 if (rcStrict != VINF_SUCCESS)
14075 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14076 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14077 return rcStrict;
14078}
14079
14080
14081VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14082{
14083 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14084
14085 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14086 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14087 if (rcStrict == VINF_SUCCESS)
14088 {
14089 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14090 if (pcbWritten)
14091 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14092 }
14093 else if (pVCpu->iem.s.cActiveMappings > 0)
14094 iemMemRollback(pVCpu);
14095
14096 return rcStrict;
14097}
14098
14099
14100VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14101 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14102{
14103 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14104
14105 VBOXSTRICTRC rcStrict;
14106 if ( cbOpcodeBytes
14107 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14108 {
14109 iemInitDecoder(pVCpu, false);
14110#ifdef IEM_WITH_CODE_TLB
14111 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14112 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14113 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14114 pVCpu->iem.s.offCurInstrStart = 0;
14115 pVCpu->iem.s.offInstrNextByte = 0;
14116#else
14117 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14118 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14119#endif
14120 rcStrict = VINF_SUCCESS;
14121 }
14122 else
14123 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14124 if (rcStrict == VINF_SUCCESS)
14125 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14126 else if (pVCpu->iem.s.cActiveMappings > 0)
14127 iemMemRollback(pVCpu);
14128
14129 return rcStrict;
14130}
14131
14132
14133VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14134{
14135 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14136
14137 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14138 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14139 if (rcStrict == VINF_SUCCESS)
14140 {
14141 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14142 if (pcbWritten)
14143 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14144 }
14145 else if (pVCpu->iem.s.cActiveMappings > 0)
14146 iemMemRollback(pVCpu);
14147
14148 return rcStrict;
14149}
14150
14151
14152VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14153 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14154{
14155 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14156
14157 VBOXSTRICTRC rcStrict;
14158 if ( cbOpcodeBytes
14159 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14160 {
14161 iemInitDecoder(pVCpu, true);
14162#ifdef IEM_WITH_CODE_TLB
14163 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14164 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14165 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14166 pVCpu->iem.s.offCurInstrStart = 0;
14167 pVCpu->iem.s.offInstrNextByte = 0;
14168#else
14169 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14170 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14171#endif
14172 rcStrict = VINF_SUCCESS;
14173 }
14174 else
14175 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14176 if (rcStrict == VINF_SUCCESS)
14177 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14178 else if (pVCpu->iem.s.cActiveMappings > 0)
14179 iemMemRollback(pVCpu);
14180
14181 return rcStrict;
14182}
14183
14184
14185/**
14186 * For debugging DISGetParamSize, may come in handy.
14187 *
14188 * @returns Strict VBox status code.
14189 * @param pVCpu The cross context virtual CPU structure of the
14190 * calling EMT.
14191 * @param pCtxCore The context core structure.
14192 * @param OpcodeBytesPC The PC of the opcode bytes.
14193 * @param pvOpcodeBytes Prefeched opcode bytes.
14194 * @param cbOpcodeBytes Number of prefetched bytes.
14195 * @param pcbWritten Where to return the number of bytes written.
14196 * Optional.
14197 */
14198VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14199 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14200 uint32_t *pcbWritten)
14201{
14202 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14203
14204 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14205 VBOXSTRICTRC rcStrict;
14206 if ( cbOpcodeBytes
14207 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14208 {
14209 iemInitDecoder(pVCpu, true);
14210#ifdef IEM_WITH_CODE_TLB
14211 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14212 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14213 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14214 pVCpu->iem.s.offCurInstrStart = 0;
14215 pVCpu->iem.s.offInstrNextByte = 0;
14216#else
14217 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14218 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14219#endif
14220 rcStrict = VINF_SUCCESS;
14221 }
14222 else
14223 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14224 if (rcStrict == VINF_SUCCESS)
14225 {
14226 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14227 if (pcbWritten)
14228 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14229 }
14230 else if (pVCpu->iem.s.cActiveMappings > 0)
14231 iemMemRollback(pVCpu);
14232
14233 return rcStrict;
14234}
14235
14236
14237VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14238{
14239 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14240 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14241
14242 /*
14243 * See if there is an interrupt pending in TRPM, inject it if we can.
14244 */
14245 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14246#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14247 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14248 if (fIntrEnabled)
14249 {
14250 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14251 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14252 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14253 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
14254 else
14255 {
14256 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14257 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14258 }
14259 }
14260#else
14261 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14262#endif
14263
14264 /** @todo What if we are injecting an exception and not an interrupt? Is that
14265 * possible here? For now we assert it is indeed only an interrupt. */
14266 if ( fIntrEnabled
14267 && TRPMHasTrap(pVCpu)
14268 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14269 {
14270 uint8_t u8TrapNo;
14271 TRPMEVENT enmType;
14272 uint32_t uErrCode;
14273 RTGCPTR uCr2;
14274 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
14275 AssertRC(rc2);
14276 Assert(enmType == TRPM_HARDWARE_INT);
14277 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14278 TRPMResetTrap(pVCpu);
14279#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14280 /* Injecting an event may cause a VM-exit. */
14281 if ( rcStrict != VINF_SUCCESS
14282 && rcStrict != VINF_IEM_RAISED_XCPT)
14283 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14284#else
14285 NOREF(rcStrict);
14286#endif
14287 }
14288
14289 /*
14290 * Initial decoder init w/ prefetch, then setup setjmp.
14291 */
14292 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14293 if (rcStrict == VINF_SUCCESS)
14294 {
14295#ifdef IEM_WITH_SETJMP
14296 jmp_buf JmpBuf;
14297 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14298 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14299 pVCpu->iem.s.cActiveMappings = 0;
14300 if ((rcStrict = setjmp(JmpBuf)) == 0)
14301#endif
14302 {
14303 /*
14304 * The run loop. We limit ourselves to 4096 instructions right now.
14305 */
14306 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14307 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14308 for (;;)
14309 {
14310 /*
14311 * Log the state.
14312 */
14313#ifdef LOG_ENABLED
14314 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14315#endif
14316
14317 /*
14318 * Do the decoding and emulation.
14319 */
14320 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14321 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14322 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14323 {
14324 Assert(pVCpu->iem.s.cActiveMappings == 0);
14325 pVCpu->iem.s.cInstructions++;
14326 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14327 {
14328 uint64_t fCpu = pVCpu->fLocalForcedActions
14329 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14330 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14331 | VMCPU_FF_TLB_FLUSH
14332 | VMCPU_FF_INHIBIT_INTERRUPTS
14333 | VMCPU_FF_BLOCK_NMIS
14334 | VMCPU_FF_UNHALT ));
14335
14336 if (RT_LIKELY( ( !fCpu
14337 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14338 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14339 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14340 {
14341 if (cMaxInstructionsGccStupidity-- > 0)
14342 {
14343 /* Poll timers every now an then according to the caller's specs. */
14344 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14345 || !TMTimerPollBool(pVM, pVCpu))
14346 {
14347 Assert(pVCpu->iem.s.cActiveMappings == 0);
14348 iemReInitDecoder(pVCpu);
14349 continue;
14350 }
14351 }
14352 }
14353 }
14354 Assert(pVCpu->iem.s.cActiveMappings == 0);
14355 }
14356 else if (pVCpu->iem.s.cActiveMappings > 0)
14357 iemMemRollback(pVCpu);
14358 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14359 break;
14360 }
14361 }
14362#ifdef IEM_WITH_SETJMP
14363 else
14364 {
14365 if (pVCpu->iem.s.cActiveMappings > 0)
14366 iemMemRollback(pVCpu);
14367# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14368 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14369# endif
14370 pVCpu->iem.s.cLongJumps++;
14371 }
14372 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14373#endif
14374
14375 /*
14376 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14377 */
14378 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14379 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14380 }
14381 else
14382 {
14383 if (pVCpu->iem.s.cActiveMappings > 0)
14384 iemMemRollback(pVCpu);
14385
14386#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14387 /*
14388 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14389 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14390 */
14391 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14392#endif
14393 }
14394
14395 /*
14396 * Maybe re-enter raw-mode and log.
14397 */
14398 if (rcStrict != VINF_SUCCESS)
14399 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14400 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14401 if (pcInstructions)
14402 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14403 return rcStrict;
14404}
14405
14406
14407/**
14408 * Interface used by EMExecuteExec, does exit statistics and limits.
14409 *
14410 * @returns Strict VBox status code.
14411 * @param pVCpu The cross context virtual CPU structure.
14412 * @param fWillExit To be defined.
14413 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14414 * @param cMaxInstructions Maximum number of instructions to execute.
14415 * @param cMaxInstructionsWithoutExits
14416 * The max number of instructions without exits.
14417 * @param pStats Where to return statistics.
14418 */
14419VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14420 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14421{
14422 NOREF(fWillExit); /** @todo define flexible exit crits */
14423
14424 /*
14425 * Initialize return stats.
14426 */
14427 pStats->cInstructions = 0;
14428 pStats->cExits = 0;
14429 pStats->cMaxExitDistance = 0;
14430 pStats->cReserved = 0;
14431
14432 /*
14433 * Initial decoder init w/ prefetch, then setup setjmp.
14434 */
14435 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14436 if (rcStrict == VINF_SUCCESS)
14437 {
14438#ifdef IEM_WITH_SETJMP
14439 jmp_buf JmpBuf;
14440 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14441 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14442 pVCpu->iem.s.cActiveMappings = 0;
14443 if ((rcStrict = setjmp(JmpBuf)) == 0)
14444#endif
14445 {
14446#ifdef IN_RING0
14447 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14448#endif
14449 uint32_t cInstructionSinceLastExit = 0;
14450
14451 /*
14452 * The run loop. We limit ourselves to 4096 instructions right now.
14453 */
14454 PVM pVM = pVCpu->CTX_SUFF(pVM);
14455 for (;;)
14456 {
14457 /*
14458 * Log the state.
14459 */
14460#ifdef LOG_ENABLED
14461 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14462#endif
14463
14464 /*
14465 * Do the decoding and emulation.
14466 */
14467 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14468
14469 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14470 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14471
14472 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14473 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14474 {
14475 pStats->cExits += 1;
14476 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14477 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14478 cInstructionSinceLastExit = 0;
14479 }
14480
14481 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14482 {
14483 Assert(pVCpu->iem.s.cActiveMappings == 0);
14484 pVCpu->iem.s.cInstructions++;
14485 pStats->cInstructions++;
14486 cInstructionSinceLastExit++;
14487 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14488 {
14489 uint64_t fCpu = pVCpu->fLocalForcedActions
14490 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14491 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14492 | VMCPU_FF_TLB_FLUSH
14493 | VMCPU_FF_INHIBIT_INTERRUPTS
14494 | VMCPU_FF_BLOCK_NMIS
14495 | VMCPU_FF_UNHALT ));
14496
14497 if (RT_LIKELY( ( ( !fCpu
14498 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14499 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14500 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14501 || pStats->cInstructions < cMinInstructions))
14502 {
14503 if (pStats->cInstructions < cMaxInstructions)
14504 {
14505 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14506 {
14507#ifdef IN_RING0
14508 if ( !fCheckPreemptionPending
14509 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14510#endif
14511 {
14512 Assert(pVCpu->iem.s.cActiveMappings == 0);
14513 iemReInitDecoder(pVCpu);
14514 continue;
14515 }
14516#ifdef IN_RING0
14517 rcStrict = VINF_EM_RAW_INTERRUPT;
14518 break;
14519#endif
14520 }
14521 }
14522 }
14523 Assert(!(fCpu & VMCPU_FF_IEM));
14524 }
14525 Assert(pVCpu->iem.s.cActiveMappings == 0);
14526 }
14527 else if (pVCpu->iem.s.cActiveMappings > 0)
14528 iemMemRollback(pVCpu);
14529 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14530 break;
14531 }
14532 }
14533#ifdef IEM_WITH_SETJMP
14534 else
14535 {
14536 if (pVCpu->iem.s.cActiveMappings > 0)
14537 iemMemRollback(pVCpu);
14538 pVCpu->iem.s.cLongJumps++;
14539 }
14540 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14541#endif
14542
14543 /*
14544 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14545 */
14546 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14547 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14548 }
14549 else
14550 {
14551 if (pVCpu->iem.s.cActiveMappings > 0)
14552 iemMemRollback(pVCpu);
14553
14554#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14555 /*
14556 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14557 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14558 */
14559 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14560#endif
14561 }
14562
14563 /*
14564 * Maybe re-enter raw-mode and log.
14565 */
14566 if (rcStrict != VINF_SUCCESS)
14567 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14568 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14569 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14570 return rcStrict;
14571}
14572
14573
14574/**
14575 * Injects a trap, fault, abort, software interrupt or external interrupt.
14576 *
14577 * The parameter list matches TRPMQueryTrapAll pretty closely.
14578 *
14579 * @returns Strict VBox status code.
14580 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14581 * @param u8TrapNo The trap number.
14582 * @param enmType What type is it (trap/fault/abort), software
14583 * interrupt or hardware interrupt.
14584 * @param uErrCode The error code if applicable.
14585 * @param uCr2 The CR2 value if applicable.
14586 * @param cbInstr The instruction length (only relevant for
14587 * software interrupts).
14588 */
14589VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14590 uint8_t cbInstr)
14591{
14592 iemInitDecoder(pVCpu, false);
14593#ifdef DBGFTRACE_ENABLED
14594 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14595 u8TrapNo, enmType, uErrCode, uCr2);
14596#endif
14597
14598 uint32_t fFlags;
14599 switch (enmType)
14600 {
14601 case TRPM_HARDWARE_INT:
14602 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14603 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14604 uErrCode = uCr2 = 0;
14605 break;
14606
14607 case TRPM_SOFTWARE_INT:
14608 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14609 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14610 uErrCode = uCr2 = 0;
14611 break;
14612
14613 case TRPM_TRAP:
14614 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14615 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14616 if (u8TrapNo == X86_XCPT_PF)
14617 fFlags |= IEM_XCPT_FLAGS_CR2;
14618 switch (u8TrapNo)
14619 {
14620 case X86_XCPT_DF:
14621 case X86_XCPT_TS:
14622 case X86_XCPT_NP:
14623 case X86_XCPT_SS:
14624 case X86_XCPT_PF:
14625 case X86_XCPT_AC:
14626 fFlags |= IEM_XCPT_FLAGS_ERR;
14627 break;
14628 }
14629 break;
14630
14631 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14632 }
14633
14634 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14635
14636 if (pVCpu->iem.s.cActiveMappings > 0)
14637 iemMemRollback(pVCpu);
14638
14639 return rcStrict;
14640}
14641
14642
14643/**
14644 * Injects the active TRPM event.
14645 *
14646 * @returns Strict VBox status code.
14647 * @param pVCpu The cross context virtual CPU structure.
14648 */
14649VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
14650{
14651#ifndef IEM_IMPLEMENTS_TASKSWITCH
14652 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14653#else
14654 uint8_t u8TrapNo;
14655 TRPMEVENT enmType;
14656 uint32_t uErrCode;
14657 RTGCUINTPTR uCr2;
14658 uint8_t cbInstr;
14659 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
14660 if (RT_FAILURE(rc))
14661 return rc;
14662
14663 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
14664 * ICEBP \#DB injection as a special case. */
14665 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14666#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14667 if (rcStrict == VINF_SVM_VMEXIT)
14668 rcStrict = VINF_SUCCESS;
14669#endif
14670#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14671 if (rcStrict == VINF_VMX_VMEXIT)
14672 rcStrict = VINF_SUCCESS;
14673#endif
14674 /** @todo Are there any other codes that imply the event was successfully
14675 * delivered to the guest? See @bugref{6607}. */
14676 if ( rcStrict == VINF_SUCCESS
14677 || rcStrict == VINF_IEM_RAISED_XCPT)
14678 TRPMResetTrap(pVCpu);
14679
14680 return rcStrict;
14681#endif
14682}
14683
14684
14685VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14686{
14687 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14688 return VERR_NOT_IMPLEMENTED;
14689}
14690
14691
14692VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14693{
14694 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14695 return VERR_NOT_IMPLEMENTED;
14696}
14697
14698
14699#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14700/**
14701 * Executes a IRET instruction with default operand size.
14702 *
14703 * This is for PATM.
14704 *
14705 * @returns VBox status code.
14706 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14707 * @param pCtxCore The register frame.
14708 */
14709VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
14710{
14711 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14712
14713 iemCtxCoreToCtx(pCtx, pCtxCore);
14714 iemInitDecoder(pVCpu);
14715 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14716 if (rcStrict == VINF_SUCCESS)
14717 iemCtxToCtxCore(pCtxCore, pCtx);
14718 else
14719 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14720 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14721 return rcStrict;
14722}
14723#endif
14724
14725
14726/**
14727 * Macro used by the IEMExec* method to check the given instruction length.
14728 *
14729 * Will return on failure!
14730 *
14731 * @param a_cbInstr The given instruction length.
14732 * @param a_cbMin The minimum length.
14733 */
14734#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14735 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14736 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14737
14738
14739/**
14740 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14741 *
14742 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14743 *
14744 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14746 * @param rcStrict The status code to fiddle.
14747 */
14748DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14749{
14750 iemUninitExec(pVCpu);
14751 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14752}
14753
14754
14755/**
14756 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14757 *
14758 * This API ASSUMES that the caller has already verified that the guest code is
14759 * allowed to access the I/O port. (The I/O port is in the DX register in the
14760 * guest state.)
14761 *
14762 * @returns Strict VBox status code.
14763 * @param pVCpu The cross context virtual CPU structure.
14764 * @param cbValue The size of the I/O port access (1, 2, or 4).
14765 * @param enmAddrMode The addressing mode.
14766 * @param fRepPrefix Indicates whether a repeat prefix is used
14767 * (doesn't matter which for this instruction).
14768 * @param cbInstr The instruction length in bytes.
14769 * @param iEffSeg The effective segment address.
14770 * @param fIoChecked Whether the access to the I/O port has been
14771 * checked or not. It's typically checked in the
14772 * HM scenario.
14773 */
14774VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14775 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14776{
14777 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14778 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14779
14780 /*
14781 * State init.
14782 */
14783 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14784
14785 /*
14786 * Switch orgy for getting to the right handler.
14787 */
14788 VBOXSTRICTRC rcStrict;
14789 if (fRepPrefix)
14790 {
14791 switch (enmAddrMode)
14792 {
14793 case IEMMODE_16BIT:
14794 switch (cbValue)
14795 {
14796 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14797 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14798 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14799 default:
14800 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14801 }
14802 break;
14803
14804 case IEMMODE_32BIT:
14805 switch (cbValue)
14806 {
14807 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14808 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14809 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14810 default:
14811 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14812 }
14813 break;
14814
14815 case IEMMODE_64BIT:
14816 switch (cbValue)
14817 {
14818 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14819 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14820 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14821 default:
14822 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14823 }
14824 break;
14825
14826 default:
14827 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14828 }
14829 }
14830 else
14831 {
14832 switch (enmAddrMode)
14833 {
14834 case IEMMODE_16BIT:
14835 switch (cbValue)
14836 {
14837 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14838 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14839 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14840 default:
14841 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14842 }
14843 break;
14844
14845 case IEMMODE_32BIT:
14846 switch (cbValue)
14847 {
14848 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14849 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14850 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14851 default:
14852 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14853 }
14854 break;
14855
14856 case IEMMODE_64BIT:
14857 switch (cbValue)
14858 {
14859 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14860 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14861 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14862 default:
14863 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14864 }
14865 break;
14866
14867 default:
14868 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14869 }
14870 }
14871
14872 if (pVCpu->iem.s.cActiveMappings)
14873 iemMemRollback(pVCpu);
14874
14875 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14876}
14877
14878
14879/**
14880 * Interface for HM and EM for executing string I/O IN (read) instructions.
14881 *
14882 * This API ASSUMES that the caller has already verified that the guest code is
14883 * allowed to access the I/O port. (The I/O port is in the DX register in the
14884 * guest state.)
14885 *
14886 * @returns Strict VBox status code.
14887 * @param pVCpu The cross context virtual CPU structure.
14888 * @param cbValue The size of the I/O port access (1, 2, or 4).
14889 * @param enmAddrMode The addressing mode.
14890 * @param fRepPrefix Indicates whether a repeat prefix is used
14891 * (doesn't matter which for this instruction).
14892 * @param cbInstr The instruction length in bytes.
14893 * @param fIoChecked Whether the access to the I/O port has been
14894 * checked or not. It's typically checked in the
14895 * HM scenario.
14896 */
14897VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14898 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14899{
14900 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14901
14902 /*
14903 * State init.
14904 */
14905 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14906
14907 /*
14908 * Switch orgy for getting to the right handler.
14909 */
14910 VBOXSTRICTRC rcStrict;
14911 if (fRepPrefix)
14912 {
14913 switch (enmAddrMode)
14914 {
14915 case IEMMODE_16BIT:
14916 switch (cbValue)
14917 {
14918 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14919 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14920 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14921 default:
14922 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14923 }
14924 break;
14925
14926 case IEMMODE_32BIT:
14927 switch (cbValue)
14928 {
14929 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14930 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14931 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14932 default:
14933 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14934 }
14935 break;
14936
14937 case IEMMODE_64BIT:
14938 switch (cbValue)
14939 {
14940 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14941 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14942 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14943 default:
14944 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14945 }
14946 break;
14947
14948 default:
14949 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14950 }
14951 }
14952 else
14953 {
14954 switch (enmAddrMode)
14955 {
14956 case IEMMODE_16BIT:
14957 switch (cbValue)
14958 {
14959 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14960 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14961 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14962 default:
14963 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14964 }
14965 break;
14966
14967 case IEMMODE_32BIT:
14968 switch (cbValue)
14969 {
14970 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14971 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14972 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14973 default:
14974 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14975 }
14976 break;
14977
14978 case IEMMODE_64BIT:
14979 switch (cbValue)
14980 {
14981 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14982 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14983 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14984 default:
14985 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14986 }
14987 break;
14988
14989 default:
14990 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14991 }
14992 }
14993
14994 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
14995 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14996}
14997
14998
14999/**
15000 * Interface for rawmode to write execute an OUT instruction.
15001 *
15002 * @returns Strict VBox status code.
15003 * @param pVCpu The cross context virtual CPU structure.
15004 * @param cbInstr The instruction length in bytes.
15005 * @param u16Port The port to read.
15006 * @param fImm Whether the port is specified using an immediate operand or
15007 * using the implicit DX register.
15008 * @param cbReg The register size.
15009 *
15010 * @remarks In ring-0 not all of the state needs to be synced in.
15011 */
15012VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15013{
15014 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15015 Assert(cbReg <= 4 && cbReg != 3);
15016
15017 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15018 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15019 Assert(!pVCpu->iem.s.cActiveMappings);
15020 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15021}
15022
15023
15024/**
15025 * Interface for rawmode to write execute an IN instruction.
15026 *
15027 * @returns Strict VBox status code.
15028 * @param pVCpu The cross context virtual CPU structure.
15029 * @param cbInstr The instruction length in bytes.
15030 * @param u16Port The port to read.
15031 * @param fImm Whether the port is specified using an immediate operand or
15032 * using the implicit DX.
15033 * @param cbReg The register size.
15034 */
15035VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15036{
15037 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15038 Assert(cbReg <= 4 && cbReg != 3);
15039
15040 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15041 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15042 Assert(!pVCpu->iem.s.cActiveMappings);
15043 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15044}
15045
15046
15047/**
15048 * Interface for HM and EM to write to a CRx register.
15049 *
15050 * @returns Strict VBox status code.
15051 * @param pVCpu The cross context virtual CPU structure.
15052 * @param cbInstr The instruction length in bytes.
15053 * @param iCrReg The control register number (destination).
15054 * @param iGReg The general purpose register number (source).
15055 *
15056 * @remarks In ring-0 not all of the state needs to be synced in.
15057 */
15058VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15059{
15060 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15061 Assert(iCrReg < 16);
15062 Assert(iGReg < 16);
15063
15064 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15065 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15066 Assert(!pVCpu->iem.s.cActiveMappings);
15067 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15068}
15069
15070
15071/**
15072 * Interface for HM and EM to read from a CRx register.
15073 *
15074 * @returns Strict VBox status code.
15075 * @param pVCpu The cross context virtual CPU structure.
15076 * @param cbInstr The instruction length in bytes.
15077 * @param iGReg The general purpose register number (destination).
15078 * @param iCrReg The control register number (source).
15079 *
15080 * @remarks In ring-0 not all of the state needs to be synced in.
15081 */
15082VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15083{
15084 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15085 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15086 | CPUMCTX_EXTRN_APIC_TPR);
15087 Assert(iCrReg < 16);
15088 Assert(iGReg < 16);
15089
15090 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15091 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15092 Assert(!pVCpu->iem.s.cActiveMappings);
15093 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15094}
15095
15096
15097/**
15098 * Interface for HM and EM to clear the CR0[TS] bit.
15099 *
15100 * @returns Strict VBox status code.
15101 * @param pVCpu The cross context virtual CPU structure.
15102 * @param cbInstr The instruction length in bytes.
15103 *
15104 * @remarks In ring-0 not all of the state needs to be synced in.
15105 */
15106VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15107{
15108 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15109
15110 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15111 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15112 Assert(!pVCpu->iem.s.cActiveMappings);
15113 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15114}
15115
15116
15117/**
15118 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15119 *
15120 * @returns Strict VBox status code.
15121 * @param pVCpu The cross context virtual CPU structure.
15122 * @param cbInstr The instruction length in bytes.
15123 * @param uValue The value to load into CR0.
15124 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15125 * memory operand. Otherwise pass NIL_RTGCPTR.
15126 *
15127 * @remarks In ring-0 not all of the state needs to be synced in.
15128 */
15129VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15130{
15131 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15132
15133 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15134 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15135 Assert(!pVCpu->iem.s.cActiveMappings);
15136 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15137}
15138
15139
15140/**
15141 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15142 *
15143 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15144 *
15145 * @returns Strict VBox status code.
15146 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15147 * @param cbInstr The instruction length in bytes.
15148 * @remarks In ring-0 not all of the state needs to be synced in.
15149 * @thread EMT(pVCpu)
15150 */
15151VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15152{
15153 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15154
15155 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15156 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15157 Assert(!pVCpu->iem.s.cActiveMappings);
15158 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15159}
15160
15161
15162/**
15163 * Interface for HM and EM to emulate the WBINVD instruction.
15164 *
15165 * @returns Strict VBox status code.
15166 * @param pVCpu The cross context virtual CPU structure.
15167 * @param cbInstr The instruction length in bytes.
15168 *
15169 * @remarks In ring-0 not all of the state needs to be synced in.
15170 */
15171VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15172{
15173 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15174
15175 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15176 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15177 Assert(!pVCpu->iem.s.cActiveMappings);
15178 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15179}
15180
15181
15182/**
15183 * Interface for HM and EM to emulate the INVD instruction.
15184 *
15185 * @returns Strict VBox status code.
15186 * @param pVCpu The cross context virtual CPU structure.
15187 * @param cbInstr The instruction length in bytes.
15188 *
15189 * @remarks In ring-0 not all of the state needs to be synced in.
15190 */
15191VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15192{
15193 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15194
15195 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15196 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15197 Assert(!pVCpu->iem.s.cActiveMappings);
15198 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15199}
15200
15201
15202/**
15203 * Interface for HM and EM to emulate the INVLPG instruction.
15204 *
15205 * @returns Strict VBox status code.
15206 * @retval VINF_PGM_SYNC_CR3
15207 *
15208 * @param pVCpu The cross context virtual CPU structure.
15209 * @param cbInstr The instruction length in bytes.
15210 * @param GCPtrPage The effective address of the page to invalidate.
15211 *
15212 * @remarks In ring-0 not all of the state needs to be synced in.
15213 */
15214VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15215{
15216 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15217
15218 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15219 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15220 Assert(!pVCpu->iem.s.cActiveMappings);
15221 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15222}
15223
15224
15225/**
15226 * Interface for HM and EM to emulate the INVPCID instruction.
15227 *
15228 * @returns Strict VBox status code.
15229 * @retval VINF_PGM_SYNC_CR3
15230 *
15231 * @param pVCpu The cross context virtual CPU structure.
15232 * @param cbInstr The instruction length in bytes.
15233 * @param iEffSeg The effective segment register.
15234 * @param GCPtrDesc The effective address of the INVPCID descriptor.
15235 * @param uType The invalidation type.
15236 *
15237 * @remarks In ring-0 not all of the state needs to be synced in.
15238 */
15239VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
15240 uint64_t uType)
15241{
15242 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
15243
15244 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15245 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
15246 Assert(!pVCpu->iem.s.cActiveMappings);
15247 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15248}
15249
15250
15251/**
15252 * Interface for HM and EM to emulate the CPUID instruction.
15253 *
15254 * @returns Strict VBox status code.
15255 *
15256 * @param pVCpu The cross context virtual CPU structure.
15257 * @param cbInstr The instruction length in bytes.
15258 *
15259 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15260 */
15261VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15262{
15263 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15264 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15265
15266 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15267 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15268 Assert(!pVCpu->iem.s.cActiveMappings);
15269 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15270}
15271
15272
15273/**
15274 * Interface for HM and EM to emulate the RDPMC instruction.
15275 *
15276 * @returns Strict VBox status code.
15277 *
15278 * @param pVCpu The cross context virtual CPU structure.
15279 * @param cbInstr The instruction length in bytes.
15280 *
15281 * @remarks Not all of the state needs to be synced in.
15282 */
15283VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15284{
15285 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15286 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15287
15288 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15289 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15290 Assert(!pVCpu->iem.s.cActiveMappings);
15291 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15292}
15293
15294
15295/**
15296 * Interface for HM and EM to emulate the RDTSC instruction.
15297 *
15298 * @returns Strict VBox status code.
15299 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15300 *
15301 * @param pVCpu The cross context virtual CPU structure.
15302 * @param cbInstr The instruction length in bytes.
15303 *
15304 * @remarks Not all of the state needs to be synced in.
15305 */
15306VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15307{
15308 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15309 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15310
15311 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15312 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15313 Assert(!pVCpu->iem.s.cActiveMappings);
15314 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15315}
15316
15317
15318/**
15319 * Interface for HM and EM to emulate the RDTSCP instruction.
15320 *
15321 * @returns Strict VBox status code.
15322 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15323 *
15324 * @param pVCpu The cross context virtual CPU structure.
15325 * @param cbInstr The instruction length in bytes.
15326 *
15327 * @remarks Not all of the state needs to be synced in. Recommended
15328 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15329 */
15330VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15331{
15332 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15333 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15334
15335 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15336 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15337 Assert(!pVCpu->iem.s.cActiveMappings);
15338 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15339}
15340
15341
15342/**
15343 * Interface for HM and EM to emulate the RDMSR instruction.
15344 *
15345 * @returns Strict VBox status code.
15346 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15347 *
15348 * @param pVCpu The cross context virtual CPU structure.
15349 * @param cbInstr The instruction length in bytes.
15350 *
15351 * @remarks Not all of the state needs to be synced in. Requires RCX and
15352 * (currently) all MSRs.
15353 */
15354VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15355{
15356 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15357 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15358
15359 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15360 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15361 Assert(!pVCpu->iem.s.cActiveMappings);
15362 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15363}
15364
15365
15366/**
15367 * Interface for HM and EM to emulate the WRMSR instruction.
15368 *
15369 * @returns Strict VBox status code.
15370 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15371 *
15372 * @param pVCpu The cross context virtual CPU structure.
15373 * @param cbInstr The instruction length in bytes.
15374 *
15375 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15376 * and (currently) all MSRs.
15377 */
15378VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15379{
15380 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15381 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15382 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15383
15384 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15385 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15386 Assert(!pVCpu->iem.s.cActiveMappings);
15387 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15388}
15389
15390
15391/**
15392 * Interface for HM and EM to emulate the MONITOR instruction.
15393 *
15394 * @returns Strict VBox status code.
15395 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15396 *
15397 * @param pVCpu The cross context virtual CPU structure.
15398 * @param cbInstr The instruction length in bytes.
15399 *
15400 * @remarks Not all of the state needs to be synced in.
15401 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15402 * are used.
15403 */
15404VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
15405{
15406 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15407 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15408
15409 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15410 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15411 Assert(!pVCpu->iem.s.cActiveMappings);
15412 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15413}
15414
15415
15416/**
15417 * Interface for HM and EM to emulate the MWAIT instruction.
15418 *
15419 * @returns Strict VBox status code.
15420 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15421 *
15422 * @param pVCpu The cross context virtual CPU structure.
15423 * @param cbInstr The instruction length in bytes.
15424 *
15425 * @remarks Not all of the state needs to be synced in.
15426 */
15427VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
15428{
15429 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15430 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15431
15432 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15433 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15434 Assert(!pVCpu->iem.s.cActiveMappings);
15435 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15436}
15437
15438
15439/**
15440 * Interface for HM and EM to emulate the HLT instruction.
15441 *
15442 * @returns Strict VBox status code.
15443 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15444 *
15445 * @param pVCpu The cross context virtual CPU structure.
15446 * @param cbInstr The instruction length in bytes.
15447 *
15448 * @remarks Not all of the state needs to be synced in.
15449 */
15450VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
15451{
15452 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15453
15454 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15455 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15456 Assert(!pVCpu->iem.s.cActiveMappings);
15457 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15458}
15459
15460
15461/**
15462 * Checks if IEM is in the process of delivering an event (interrupt or
15463 * exception).
15464 *
15465 * @returns true if we're in the process of raising an interrupt or exception,
15466 * false otherwise.
15467 * @param pVCpu The cross context virtual CPU structure.
15468 * @param puVector Where to store the vector associated with the
15469 * currently delivered event, optional.
15470 * @param pfFlags Where to store th event delivery flags (see
15471 * IEM_XCPT_FLAGS_XXX), optional.
15472 * @param puErr Where to store the error code associated with the
15473 * event, optional.
15474 * @param puCr2 Where to store the CR2 associated with the event,
15475 * optional.
15476 * @remarks The caller should check the flags to determine if the error code and
15477 * CR2 are valid for the event.
15478 */
15479VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15480{
15481 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15482 if (fRaisingXcpt)
15483 {
15484 if (puVector)
15485 *puVector = pVCpu->iem.s.uCurXcpt;
15486 if (pfFlags)
15487 *pfFlags = pVCpu->iem.s.fCurXcpt;
15488 if (puErr)
15489 *puErr = pVCpu->iem.s.uCurXcptErr;
15490 if (puCr2)
15491 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15492 }
15493 return fRaisingXcpt;
15494}
15495
15496#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15497
15498/**
15499 * Interface for HM and EM to emulate the CLGI instruction.
15500 *
15501 * @returns Strict VBox status code.
15502 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15503 * @param cbInstr The instruction length in bytes.
15504 * @thread EMT(pVCpu)
15505 */
15506VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15507{
15508 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15509
15510 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15511 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15512 Assert(!pVCpu->iem.s.cActiveMappings);
15513 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15514}
15515
15516
15517/**
15518 * Interface for HM and EM to emulate the STGI instruction.
15519 *
15520 * @returns Strict VBox status code.
15521 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15522 * @param cbInstr The instruction length in bytes.
15523 * @thread EMT(pVCpu)
15524 */
15525VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15526{
15527 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15528
15529 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15530 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15531 Assert(!pVCpu->iem.s.cActiveMappings);
15532 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15533}
15534
15535
15536/**
15537 * Interface for HM and EM to emulate the VMLOAD instruction.
15538 *
15539 * @returns Strict VBox status code.
15540 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15541 * @param cbInstr The instruction length in bytes.
15542 * @thread EMT(pVCpu)
15543 */
15544VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
15545{
15546 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15547
15548 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15549 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15550 Assert(!pVCpu->iem.s.cActiveMappings);
15551 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15552}
15553
15554
15555/**
15556 * Interface for HM and EM to emulate the VMSAVE instruction.
15557 *
15558 * @returns Strict VBox status code.
15559 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15560 * @param cbInstr The instruction length in bytes.
15561 * @thread EMT(pVCpu)
15562 */
15563VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
15564{
15565 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15566
15567 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15568 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15569 Assert(!pVCpu->iem.s.cActiveMappings);
15570 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15571}
15572
15573
15574/**
15575 * Interface for HM and EM to emulate the INVLPGA instruction.
15576 *
15577 * @returns Strict VBox status code.
15578 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15579 * @param cbInstr The instruction length in bytes.
15580 * @thread EMT(pVCpu)
15581 */
15582VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
15583{
15584 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15585
15586 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15587 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15588 Assert(!pVCpu->iem.s.cActiveMappings);
15589 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15590}
15591
15592
15593/**
15594 * Interface for HM and EM to emulate the VMRUN instruction.
15595 *
15596 * @returns Strict VBox status code.
15597 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15598 * @param cbInstr The instruction length in bytes.
15599 * @thread EMT(pVCpu)
15600 */
15601VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
15602{
15603 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15604 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15605
15606 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15607 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15608 Assert(!pVCpu->iem.s.cActiveMappings);
15609 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15610}
15611
15612
15613/**
15614 * Interface for HM and EM to emulate \#VMEXIT.
15615 *
15616 * @returns Strict VBox status code.
15617 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15618 * @param uExitCode The exit code.
15619 * @param uExitInfo1 The exit info. 1 field.
15620 * @param uExitInfo2 The exit info. 2 field.
15621 * @thread EMT(pVCpu)
15622 */
15623VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15624{
15625 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15626 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15627 if (pVCpu->iem.s.cActiveMappings)
15628 iemMemRollback(pVCpu);
15629 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15630}
15631
15632#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15633
15634#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15635
15636/**
15637 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15638 *
15639 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15640 * are performed. Bounds checks are strict builds only.
15641 *
15642 * @param pVmcs Pointer to the virtual VMCS.
15643 * @param u64VmcsField The VMCS field.
15644 * @param pu64Dst Where to store the VMCS value.
15645 *
15646 * @remarks May be called with interrupts disabled.
15647 * @todo This should probably be moved to CPUM someday.
15648 */
15649VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15650{
15651 AssertPtr(pVmcs);
15652 AssertPtr(pu64Dst);
15653 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15654}
15655
15656
15657/**
15658 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15659 *
15660 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15661 * are performed. Bounds checks are strict builds only.
15662 *
15663 * @param pVmcs Pointer to the virtual VMCS.
15664 * @param u64VmcsField The VMCS field.
15665 * @param u64Val The value to write.
15666 *
15667 * @remarks May be called with interrupts disabled.
15668 * @todo This should probably be moved to CPUM someday.
15669 */
15670VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15671{
15672 AssertPtr(pVmcs);
15673 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15674}
15675
15676
15677/**
15678 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15679 *
15680 * @returns Strict VBox status code.
15681 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15682 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15683 * the x2APIC device.
15684 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15685 *
15686 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15687 * @param idMsr The MSR being read.
15688 * @param pu64Value Pointer to the value being written or where to store the
15689 * value being read.
15690 * @param fWrite Whether this is an MSR write or read access.
15691 * @thread EMT(pVCpu)
15692 */
15693VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15694{
15695 Assert(pu64Value);
15696
15697 VBOXSTRICTRC rcStrict;
15698 if (fWrite)
15699 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15700 else
15701 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15702 Assert(!pVCpu->iem.s.cActiveMappings);
15703 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15704
15705}
15706
15707
15708/**
15709 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15710 *
15711 * @returns Strict VBox status code.
15712 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15713 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15714 *
15715 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15716 * @param pExitInfo Pointer to the VM-exit information.
15717 * @param pExitEventInfo Pointer to the VM-exit event information.
15718 * @thread EMT(pVCpu)
15719 */
15720VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15721{
15722 Assert(pExitInfo);
15723 Assert(pExitEventInfo);
15724 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15725 Assert(!pVCpu->iem.s.cActiveMappings);
15726 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15727
15728}
15729
15730
15731/**
15732 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15733 * VM-exit.
15734 *
15735 * @returns Strict VBox status code.
15736 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15737 * @thread EMT(pVCpu)
15738 */
15739VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
15740{
15741 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15742 Assert(!pVCpu->iem.s.cActiveMappings);
15743 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15744}
15745
15746
15747/**
15748 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15749 *
15750 * @returns Strict VBox status code.
15751 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15752 * @thread EMT(pVCpu)
15753 */
15754VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
15755{
15756 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15757 Assert(!pVCpu->iem.s.cActiveMappings);
15758 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15759}
15760
15761
15762/**
15763 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15764 *
15765 * @returns Strict VBox status code.
15766 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15767 * @param uVector The external interrupt vector (pass 0 if the external
15768 * interrupt is still pending).
15769 * @param fIntPending Whether the external interrupt is pending or
15770 * acknowdledged in the interrupt controller.
15771 * @thread EMT(pVCpu)
15772 */
15773VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
15774{
15775 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15776 Assert(!pVCpu->iem.s.cActiveMappings);
15777 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15778}
15779
15780
15781/**
15782 * Interface for HM and EM to emulate VM-exit due to exceptions.
15783 *
15784 * Exception includes NMIs, software exceptions (those generated by INT3 or
15785 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15786 *
15787 * @returns Strict VBox status code.
15788 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15789 * @param pExitInfo Pointer to the VM-exit information.
15790 * @param pExitEventInfo Pointer to the VM-exit event information.
15791 * @thread EMT(pVCpu)
15792 */
15793VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15794{
15795 Assert(pExitInfo);
15796 Assert(pExitEventInfo);
15797 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15798 Assert(!pVCpu->iem.s.cActiveMappings);
15799 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15800}
15801
15802
15803/**
15804 * Interface for HM and EM to emulate VM-exit due to NMIs.
15805 *
15806 * @returns Strict VBox status code.
15807 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15808 * @thread EMT(pVCpu)
15809 */
15810VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
15811{
15812 VMXVEXITINFO ExitInfo;
15813 RT_ZERO(ExitInfo);
15814 ExitInfo.uReason = VMX_EXIT_XCPT_OR_NMI;
15815
15816 VMXVEXITEVENTINFO ExitEventInfo;
15817 RT_ZERO(ExitEventInfo);
15818 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15819 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15820 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15821
15822 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15823 Assert(!pVCpu->iem.s.cActiveMappings);
15824 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15825}
15826
15827
15828/**
15829 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15830 *
15831 * @returns Strict VBox status code.
15832 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15833 * @thread EMT(pVCpu)
15834 */
15835VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
15836{
15837 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15838 Assert(!pVCpu->iem.s.cActiveMappings);
15839 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15840}
15841
15842
15843/**
15844 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15845 *
15846 * @returns Strict VBox status code.
15847 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15848 * @param uVector The SIPI vector.
15849 * @thread EMT(pVCpu)
15850 */
15851VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
15852{
15853 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
15854 Assert(!pVCpu->iem.s.cActiveMappings);
15855 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15856}
15857
15858
15859/**
15860 * Interface for HM and EM to emulate a VM-exit.
15861 *
15862 * If a specialized version of a VM-exit handler exists, that must be used instead.
15863 *
15864 * @returns Strict VBox status code.
15865 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15866 * @param uExitReason The VM-exit reason.
15867 * @param u64ExitQual The Exit qualification.
15868 * @thread EMT(pVCpu)
15869 */
15870VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
15871{
15872 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
15873 Assert(!pVCpu->iem.s.cActiveMappings);
15874 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15875}
15876
15877
15878/**
15879 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15880 *
15881 * This is meant to be used for those instructions that VMX provides additional
15882 * decoding information beyond just the instruction length!
15883 *
15884 * @returns Strict VBox status code.
15885 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15886 * @param pExitInfo Pointer to the VM-exit information.
15887 * @thread EMT(pVCpu)
15888 */
15889VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15890{
15891 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
15892 Assert(!pVCpu->iem.s.cActiveMappings);
15893 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15894}
15895
15896
15897/**
15898 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15899 *
15900 * This is meant to be used for those instructions that VMX provides only the
15901 * instruction length.
15902 *
15903 * @returns Strict VBox status code.
15904 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15905 * @param pExitInfo Pointer to the VM-exit information.
15906 * @param cbInstr The instruction length in bytes.
15907 * @thread EMT(pVCpu)
15908 */
15909VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
15910{
15911 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
15912 Assert(!pVCpu->iem.s.cActiveMappings);
15913 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15914}
15915
15916
15917/**
15918 * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
15919 * Virtualized-EOI, TPR-below threshold).
15920 *
15921 * @returns Strict VBox status code.
15922 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15923 * @param pExitInfo Pointer to the VM-exit information.
15924 * @thread EMT(pVCpu)
15925 */
15926VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15927{
15928 Assert(pExitInfo);
15929 VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
15930 Assert(!pVCpu->iem.s.cActiveMappings);
15931 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15932}
15933
15934
15935/**
15936 * Interface for HM and EM to emulate a VM-exit due to a task switch.
15937 *
15938 * @returns Strict VBox status code.
15939 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15940 * @param pExitInfo Pointer to the VM-exit information.
15941 * @param pExitEventInfo Pointer to the VM-exit event information.
15942 * @thread EMT(pVCpu)
15943 */
15944VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15945{
15946 Assert(pExitInfo);
15947 Assert(pExitEventInfo);
15948 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
15949 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15950 Assert(!pVCpu->iem.s.cActiveMappings);
15951 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15952}
15953
15954
15955/**
15956 * Interface for HM and EM to emulate the VMREAD instruction.
15957 *
15958 * @returns Strict VBox status code.
15959 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15960 * @param pExitInfo Pointer to the VM-exit information.
15961 * @thread EMT(pVCpu)
15962 */
15963VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15964{
15965 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15966 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15967 Assert(pExitInfo);
15968
15969 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15970
15971 VBOXSTRICTRC rcStrict;
15972 uint8_t const cbInstr = pExitInfo->cbInstr;
15973 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
15974 uint64_t const u64FieldEnc = fIs64BitMode
15975 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
15976 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15977 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15978 {
15979 if (fIs64BitMode)
15980 {
15981 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15982 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
15983 }
15984 else
15985 {
15986 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15987 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
15988 }
15989 }
15990 else
15991 {
15992 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
15993 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15994 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
15995 }
15996 Assert(!pVCpu->iem.s.cActiveMappings);
15997 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15998}
15999
16000
16001/**
16002 * Interface for HM and EM to emulate the VMWRITE instruction.
16003 *
16004 * @returns Strict VBox status code.
16005 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16006 * @param pExitInfo Pointer to the VM-exit information.
16007 * @thread EMT(pVCpu)
16008 */
16009VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16010{
16011 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16012 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16013 Assert(pExitInfo);
16014
16015 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16016
16017 uint64_t u64Val;
16018 uint8_t iEffSeg;
16019 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16020 {
16021 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16022 iEffSeg = UINT8_MAX;
16023 }
16024 else
16025 {
16026 u64Val = pExitInfo->GCPtrEffAddr;
16027 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16028 }
16029 uint8_t const cbInstr = pExitInfo->cbInstr;
16030 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16031 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16032 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16033 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16034 Assert(!pVCpu->iem.s.cActiveMappings);
16035 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16036}
16037
16038
16039/**
16040 * Interface for HM and EM to emulate the VMPTRLD instruction.
16041 *
16042 * @returns Strict VBox status code.
16043 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16044 * @param pExitInfo Pointer to the VM-exit information.
16045 * @thread EMT(pVCpu)
16046 */
16047VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16048{
16049 Assert(pExitInfo);
16050 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16051 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16052
16053 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16054
16055 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16056 uint8_t const cbInstr = pExitInfo->cbInstr;
16057 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16058 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16059 Assert(!pVCpu->iem.s.cActiveMappings);
16060 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16061}
16062
16063
16064/**
16065 * Interface for HM and EM to emulate the VMPTRST instruction.
16066 *
16067 * @returns Strict VBox status code.
16068 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16069 * @param pExitInfo Pointer to the VM-exit information.
16070 * @thread EMT(pVCpu)
16071 */
16072VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16073{
16074 Assert(pExitInfo);
16075 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16076 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16077
16078 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16079
16080 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16081 uint8_t const cbInstr = pExitInfo->cbInstr;
16082 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16083 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16084 Assert(!pVCpu->iem.s.cActiveMappings);
16085 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16086}
16087
16088
16089/**
16090 * Interface for HM and EM to emulate the VMCLEAR instruction.
16091 *
16092 * @returns Strict VBox status code.
16093 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16094 * @param pExitInfo Pointer to the VM-exit information.
16095 * @thread EMT(pVCpu)
16096 */
16097VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16098{
16099 Assert(pExitInfo);
16100 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16101 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16102
16103 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16104
16105 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16106 uint8_t const cbInstr = pExitInfo->cbInstr;
16107 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16108 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16109 Assert(!pVCpu->iem.s.cActiveMappings);
16110 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16111}
16112
16113
16114/**
16115 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16116 *
16117 * @returns Strict VBox status code.
16118 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16119 * @param cbInstr The instruction length in bytes.
16120 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16121 * VMXINSTRID_VMRESUME).
16122 * @thread EMT(pVCpu)
16123 */
16124VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16125{
16126 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16127 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16128
16129 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16130 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16131 Assert(!pVCpu->iem.s.cActiveMappings);
16132 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16133}
16134
16135
16136/**
16137 * Interface for HM and EM to emulate the VMXON instruction.
16138 *
16139 * @returns Strict VBox status code.
16140 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16141 * @param pExitInfo Pointer to the VM-exit information.
16142 * @thread EMT(pVCpu)
16143 */
16144VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16145{
16146 Assert(pExitInfo);
16147 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16148 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16149
16150 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16151
16152 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16153 uint8_t const cbInstr = pExitInfo->cbInstr;
16154 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16155 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16156 Assert(!pVCpu->iem.s.cActiveMappings);
16157 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16158}
16159
16160
16161/**
16162 * Interface for HM and EM to emulate the VMXOFF instruction.
16163 *
16164 * @returns Strict VBox status code.
16165 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16166 * @param cbInstr The instruction length in bytes.
16167 * @thread EMT(pVCpu)
16168 */
16169VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16170{
16171 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16172 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16173
16174 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16175 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16176 Assert(!pVCpu->iem.s.cActiveMappings);
16177 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16178}
16179
16180
16181/**
16182 * Interface for HM and EM to emulate the INVVPID instruction.
16183 *
16184 * @returns Strict VBox status code.
16185 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16186 * @param pExitInfo Pointer to the VM-exit information.
16187 * @thread EMT(pVCpu)
16188 */
16189VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16190{
16191 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16192 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16193 Assert(pExitInfo);
16194
16195 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16196
16197 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16198 uint8_t const cbInstr = pExitInfo->cbInstr;
16199 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16200 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16201 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16202 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16203 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16204 Assert(!pVCpu->iem.s.cActiveMappings);
16205 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16206}
16207
16208
16209/**
16210 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16211 *
16212 * @remarks The @a pvUser argument is currently unused.
16213 */
16214PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16215 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16216 PGMACCESSORIGIN enmOrigin, void *pvUser)
16217{
16218 RT_NOREF3(pvPhys, enmOrigin, pvUser);
16219
16220 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16221 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16222 {
16223 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16224 Assert(CPUMGetGuestVmxApicAccessPageAddr(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16225
16226 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16227 * Currently they will go through as read accesses. */
16228 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16229 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16230 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16231 if (RT_FAILURE(rcStrict))
16232 return rcStrict;
16233
16234 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16235 return VINF_SUCCESS;
16236 }
16237
16238 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16239 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16240 if (RT_FAILURE(rc))
16241 return rc;
16242
16243 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16244 return VINF_PGM_HANDLER_DO_DEFAULT;
16245}
16246
16247#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16248
16249#ifdef IN_RING3
16250
16251/**
16252 * Handles the unlikely and probably fatal merge cases.
16253 *
16254 * @returns Merged status code.
16255 * @param rcStrict Current EM status code.
16256 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16257 * with @a rcStrict.
16258 * @param iMemMap The memory mapping index. For error reporting only.
16259 * @param pVCpu The cross context virtual CPU structure of the calling
16260 * thread, for error reporting only.
16261 */
16262DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16263 unsigned iMemMap, PVMCPUCC pVCpu)
16264{
16265 if (RT_FAILURE_NP(rcStrict))
16266 return rcStrict;
16267
16268 if (RT_FAILURE_NP(rcStrictCommit))
16269 return rcStrictCommit;
16270
16271 if (rcStrict == rcStrictCommit)
16272 return rcStrictCommit;
16273
16274 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16275 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16276 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16277 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16278 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16279 return VERR_IOM_FF_STATUS_IPE;
16280}
16281
16282
16283/**
16284 * Helper for IOMR3ProcessForceFlag.
16285 *
16286 * @returns Merged status code.
16287 * @param rcStrict Current EM status code.
16288 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16289 * with @a rcStrict.
16290 * @param iMemMap The memory mapping index. For error reporting only.
16291 * @param pVCpu The cross context virtual CPU structure of the calling
16292 * thread, for error reporting only.
16293 */
16294DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
16295{
16296 /* Simple. */
16297 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16298 return rcStrictCommit;
16299
16300 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16301 return rcStrict;
16302
16303 /* EM scheduling status codes. */
16304 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16305 && rcStrict <= VINF_EM_LAST))
16306 {
16307 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16308 && rcStrictCommit <= VINF_EM_LAST))
16309 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16310 }
16311
16312 /* Unlikely */
16313 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16314}
16315
16316
16317/**
16318 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16319 *
16320 * @returns Merge between @a rcStrict and what the commit operation returned.
16321 * @param pVM The cross context VM structure.
16322 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16323 * @param rcStrict The status code returned by ring-0 or raw-mode.
16324 */
16325VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
16326{
16327 /*
16328 * Reset the pending commit.
16329 */
16330 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16331 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16332 ("%#x %#x %#x\n",
16333 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16334 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16335
16336 /*
16337 * Commit the pending bounce buffers (usually just one).
16338 */
16339 unsigned cBufs = 0;
16340 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16341 while (iMemMap-- > 0)
16342 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16343 {
16344 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16345 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16346 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16347
16348 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16349 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16350 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16351
16352 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16353 {
16354 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16355 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16356 pbBuf,
16357 cbFirst,
16358 PGMACCESSORIGIN_IEM);
16359 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16360 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16361 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16362 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16363 }
16364
16365 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16366 {
16367 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16368 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16369 pbBuf + cbFirst,
16370 cbSecond,
16371 PGMACCESSORIGIN_IEM);
16372 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16373 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16374 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16375 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16376 }
16377 cBufs++;
16378 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16379 }
16380
16381 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16382 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16383 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16384 pVCpu->iem.s.cActiveMappings = 0;
16385 return rcStrict;
16386}
16387
16388#endif /* IN_RING3 */
16389
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette