VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 94163

Last change on this file since 94163 was 94163, checked in by vboxsync, 3 years ago

VMM/IEM: Try deal with basic Intel/AMD EFLAGS difference for shifts (intel side tests). bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 677.3 KB
Line 
1/* $Id: IEMAll.cpp 94163 2022-03-11 00:56:22Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vmcc.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for EPT faults.
442 */
443# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
444 do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handler.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
450 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
463# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
464
465#endif
466
467#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
468/**
469 * Check if an SVM control/instruction intercept is set.
470 */
471# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
472 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
473
474/**
475 * Check if an SVM read CRx intercept is set.
476 */
477# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
478 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
479
480/**
481 * Check if an SVM write CRx intercept is set.
482 */
483# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
484 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
485
486/**
487 * Check if an SVM read DRx intercept is set.
488 */
489# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
490 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
491
492/**
493 * Check if an SVM write DRx intercept is set.
494 */
495# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
496 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
497
498/**
499 * Check if an SVM exception intercept is set.
500 */
501# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
502 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
503
504/**
505 * Invokes the SVM \#VMEXIT handler for the nested-guest.
506 */
507# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
508 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
509
510/**
511 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
512 * corresponding decode assist information.
513 */
514# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
515 do \
516 { \
517 uint64_t uExitInfo1; \
518 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
519 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
520 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
521 else \
522 uExitInfo1 = 0; \
523 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
524 } while (0)
525
526/** Check and handles SVM nested-guest instruction intercept and updates
527 * NRIP if needed.
528 */
529# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
530 do \
531 { \
532 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
533 { \
534 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
535 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
536 } \
537 } while (0)
538
539/** Checks and handles SVM nested-guest CR0 read intercept. */
540# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
541 do \
542 { \
543 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
544 { /* probably likely */ } \
545 else \
546 { \
547 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
548 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
549 } \
550 } while (0)
551
552/**
553 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
554 */
555# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
556 do { \
557 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
558 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
559 } while (0)
560
561#else
562# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
563# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
565# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
567# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
568# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
570# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
572# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
573
574#endif
575
576
577/*********************************************************************************************************************************
578* Global Variables *
579*********************************************************************************************************************************/
580extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
581
582
583/** Function table for the ADD instruction. */
584IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
585{
586 iemAImpl_add_u8, iemAImpl_add_u8_locked,
587 iemAImpl_add_u16, iemAImpl_add_u16_locked,
588 iemAImpl_add_u32, iemAImpl_add_u32_locked,
589 iemAImpl_add_u64, iemAImpl_add_u64_locked
590};
591
592/** Function table for the ADC instruction. */
593IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
594{
595 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
596 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
597 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
598 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
599};
600
601/** Function table for the SUB instruction. */
602IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
603{
604 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
605 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
606 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
607 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
608};
609
610/** Function table for the SBB instruction. */
611IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
612{
613 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
614 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
615 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
616 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
617};
618
619/** Function table for the OR instruction. */
620IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
621{
622 iemAImpl_or_u8, iemAImpl_or_u8_locked,
623 iemAImpl_or_u16, iemAImpl_or_u16_locked,
624 iemAImpl_or_u32, iemAImpl_or_u32_locked,
625 iemAImpl_or_u64, iemAImpl_or_u64_locked
626};
627
628/** Function table for the XOR instruction. */
629IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
630{
631 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
632 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
633 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
634 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
635};
636
637/** Function table for the AND instruction. */
638IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
639{
640 iemAImpl_and_u8, iemAImpl_and_u8_locked,
641 iemAImpl_and_u16, iemAImpl_and_u16_locked,
642 iemAImpl_and_u32, iemAImpl_and_u32_locked,
643 iemAImpl_and_u64, iemAImpl_and_u64_locked
644};
645
646/** Function table for the CMP instruction.
647 * @remarks Making operand order ASSUMPTIONS.
648 */
649IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
650{
651 iemAImpl_cmp_u8, NULL,
652 iemAImpl_cmp_u16, NULL,
653 iemAImpl_cmp_u32, NULL,
654 iemAImpl_cmp_u64, NULL
655};
656
657/** Function table for the TEST instruction.
658 * @remarks Making operand order ASSUMPTIONS.
659 */
660IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
661{
662 iemAImpl_test_u8, NULL,
663 iemAImpl_test_u16, NULL,
664 iemAImpl_test_u32, NULL,
665 iemAImpl_test_u64, NULL
666};
667
668/** Function table for the BT instruction. */
669IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
670{
671 NULL, NULL,
672 iemAImpl_bt_u16, NULL,
673 iemAImpl_bt_u32, NULL,
674 iemAImpl_bt_u64, NULL
675};
676
677/** Function table for the BTC instruction. */
678IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
679{
680 NULL, NULL,
681 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
682 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
683 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
684};
685
686/** Function table for the BTR instruction. */
687IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
688{
689 NULL, NULL,
690 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
691 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
692 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
693};
694
695/** Function table for the BTS instruction. */
696IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
697{
698 NULL, NULL,
699 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
700 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
701 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
702};
703
704/** Function table for the BSF instruction. */
705IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
706{
707 NULL, NULL,
708 iemAImpl_bsf_u16, NULL,
709 iemAImpl_bsf_u32, NULL,
710 iemAImpl_bsf_u64, NULL
711};
712
713/** Function table for the BSF instruction, AMD EFLAGS variant. */
714IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_amd =
715{
716 NULL, NULL,
717 iemAImpl_bsf_u16_amd, NULL,
718 iemAImpl_bsf_u32_amd, NULL,
719 iemAImpl_bsf_u64_amd, NULL
720};
721
722/** Function table for the BSF instruction, Intel EFLAGS variant. */
723IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf_intel =
724{
725 NULL, NULL,
726 iemAImpl_bsf_u16_intel, NULL,
727 iemAImpl_bsf_u32_intel, NULL,
728 iemAImpl_bsf_u64_intel, NULL
729};
730
731/** EFLAGS variation selection table for the BSF instruction. */
732IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsf_eflags[] =
733{
734 &g_iemAImpl_bsf,
735 &g_iemAImpl_bsf_intel,
736 &g_iemAImpl_bsf_amd,
737 &g_iemAImpl_bsf,
738};
739
740/** Function table for the BSR instruction. */
741IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
742{
743 NULL, NULL,
744 iemAImpl_bsr_u16, NULL,
745 iemAImpl_bsr_u32, NULL,
746 iemAImpl_bsr_u64, NULL
747};
748
749/** Function table for the BSR instruction, AMD EFLAGS variant. */
750IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_amd =
751{
752 NULL, NULL,
753 iemAImpl_bsr_u16_amd, NULL,
754 iemAImpl_bsr_u32_amd, NULL,
755 iemAImpl_bsr_u64_amd, NULL
756};
757
758/** Function table for the BSR instruction, Intel EFLAGS variant. */
759IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr_intel =
760{
761 NULL, NULL,
762 iemAImpl_bsr_u16_intel, NULL,
763 iemAImpl_bsr_u32_intel, NULL,
764 iemAImpl_bsr_u64_intel, NULL
765};
766
767/** EFLAGS variation selection table for the BSR instruction. */
768IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_bsr_eflags[] =
769{
770 &g_iemAImpl_bsr,
771 &g_iemAImpl_bsr_intel,
772 &g_iemAImpl_bsr_amd,
773 &g_iemAImpl_bsr,
774};
775
776/** Function table for the IMUL instruction. */
777IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
778{
779 NULL, NULL,
780 iemAImpl_imul_two_u16, NULL,
781 iemAImpl_imul_two_u32, NULL,
782 iemAImpl_imul_two_u64, NULL
783};
784
785/** Function table for the IMUL instruction, AMD EFLAGS variant. */
786IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_amd =
787{
788 NULL, NULL,
789 iemAImpl_imul_two_u16_amd, NULL,
790 iemAImpl_imul_two_u32_amd, NULL,
791 iemAImpl_imul_two_u64_amd, NULL
792};
793
794/** Function table for the IMUL instruction, Intel EFLAGS variant. */
795IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two_intel =
796{
797 NULL, NULL,
798 iemAImpl_imul_two_u16_intel, NULL,
799 iemAImpl_imul_two_u32_intel, NULL,
800 iemAImpl_imul_two_u64_intel, NULL
801};
802
803/** EFLAGS variation selection table for the IMUL instruction. */
804IEM_STATIC const IEMOPBINSIZES * const g_iemAImpl_imul_two_eflags[] =
805{
806 &g_iemAImpl_imul_two,
807 &g_iemAImpl_imul_two_intel,
808 &g_iemAImpl_imul_two_amd,
809 &g_iemAImpl_imul_two,
810};
811
812/** EFLAGS variation selection table for the 16-bit IMUL instruction. */
813IEM_STATIC PFNIEMAIMPLBINU16 const g_iemAImpl_imul_two_u16_eflags[] =
814{
815 iemAImpl_imul_two_u16,
816 iemAImpl_imul_two_u16_intel,
817 iemAImpl_imul_two_u16_amd,
818 iemAImpl_imul_two_u16,
819};
820
821/** EFLAGS variation selection table for the 32-bit IMUL instruction. */
822IEM_STATIC PFNIEMAIMPLBINU32 const g_iemAImpl_imul_two_u32_eflags[] =
823{
824 iemAImpl_imul_two_u32,
825 iemAImpl_imul_two_u32_intel,
826 iemAImpl_imul_two_u32_amd,
827 iemAImpl_imul_two_u32,
828};
829
830/** EFLAGS variation selection table for the 64-bit IMUL instruction. */
831IEM_STATIC PFNIEMAIMPLBINU64 const g_iemAImpl_imul_two_u64_eflags[] =
832{
833 iemAImpl_imul_two_u64,
834 iemAImpl_imul_two_u64_intel,
835 iemAImpl_imul_two_u64_amd,
836 iemAImpl_imul_two_u64,
837};
838
839/** Group 1 /r lookup table. */
840IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
841{
842 &g_iemAImpl_add,
843 &g_iemAImpl_or,
844 &g_iemAImpl_adc,
845 &g_iemAImpl_sbb,
846 &g_iemAImpl_and,
847 &g_iemAImpl_sub,
848 &g_iemAImpl_xor,
849 &g_iemAImpl_cmp
850};
851
852/** Function table for the INC instruction. */
853IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
854{
855 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
856 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
857 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
858 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
859};
860
861/** Function table for the DEC instruction. */
862IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
863{
864 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
865 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
866 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
867 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
868};
869
870/** Function table for the NEG instruction. */
871IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
872{
873 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
874 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
875 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
876 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
877};
878
879/** Function table for the NOT instruction. */
880IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
881{
882 iemAImpl_not_u8, iemAImpl_not_u8_locked,
883 iemAImpl_not_u16, iemAImpl_not_u16_locked,
884 iemAImpl_not_u32, iemAImpl_not_u32_locked,
885 iemAImpl_not_u64, iemAImpl_not_u64_locked
886};
887
888
889/** Function table for the ROL instruction. */
890IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
891{
892 iemAImpl_rol_u8,
893 iemAImpl_rol_u16,
894 iemAImpl_rol_u32,
895 iemAImpl_rol_u64
896};
897
898/** Function table for the ROL instruction, AMD EFLAGS variant. */
899IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_amd =
900{
901 iemAImpl_rol_u8_amd,
902 iemAImpl_rol_u16_amd,
903 iemAImpl_rol_u32_amd,
904 iemAImpl_rol_u64_amd
905};
906
907/** Function table for the ROL instruction, Intel EFLAGS variant. */
908IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol_intel =
909{
910 iemAImpl_rol_u8_intel,
911 iemAImpl_rol_u16_intel,
912 iemAImpl_rol_u32_intel,
913 iemAImpl_rol_u64_intel
914};
915
916/** EFLAGS variation selection table for the ROL instruction. */
917IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rol_eflags[] =
918{
919 &g_iemAImpl_rol,
920 &g_iemAImpl_rol_intel,
921 &g_iemAImpl_rol_amd,
922 &g_iemAImpl_rol,
923};
924
925
926/** Function table for the ROR instruction. */
927IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
928{
929 iemAImpl_ror_u8,
930 iemAImpl_ror_u16,
931 iemAImpl_ror_u32,
932 iemAImpl_ror_u64
933};
934
935/** Function table for the ROR instruction, AMD EFLAGS variant. */
936IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_amd =
937{
938 iemAImpl_ror_u8_amd,
939 iemAImpl_ror_u16_amd,
940 iemAImpl_ror_u32_amd,
941 iemAImpl_ror_u64_amd
942};
943
944/** Function table for the ROR instruction, Intel EFLAGS variant. */
945IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror_intel =
946{
947 iemAImpl_ror_u8_intel,
948 iemAImpl_ror_u16_intel,
949 iemAImpl_ror_u32_intel,
950 iemAImpl_ror_u64_intel
951};
952
953/** EFLAGS variation selection table for the ROR instruction. */
954IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_ror_eflags[] =
955{
956 &g_iemAImpl_ror,
957 &g_iemAImpl_ror_intel,
958 &g_iemAImpl_ror_amd,
959 &g_iemAImpl_ror,
960};
961
962
963/** Function table for the RCL instruction. */
964IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
965{
966 iemAImpl_rcl_u8,
967 iemAImpl_rcl_u16,
968 iemAImpl_rcl_u32,
969 iemAImpl_rcl_u64
970};
971
972/** Function table for the RCL instruction, AMD EFLAGS variant. */
973IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_amd =
974{
975 iemAImpl_rcl_u8_amd,
976 iemAImpl_rcl_u16_amd,
977 iemAImpl_rcl_u32_amd,
978 iemAImpl_rcl_u64_amd
979};
980
981/** Function table for the RCL instruction, Intel EFLAGS variant. */
982IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl_intel =
983{
984 iemAImpl_rcl_u8_intel,
985 iemAImpl_rcl_u16_intel,
986 iemAImpl_rcl_u32_intel,
987 iemAImpl_rcl_u64_intel
988};
989
990/** EFLAGS variation selection table for the RCL instruction. */
991IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcl_eflags[] =
992{
993 &g_iemAImpl_rcl,
994 &g_iemAImpl_rcl_intel,
995 &g_iemAImpl_rcl_amd,
996 &g_iemAImpl_rcl,
997};
998
999
1000/** Function table for the RCR instruction. */
1001IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
1002{
1003 iemAImpl_rcr_u8,
1004 iemAImpl_rcr_u16,
1005 iemAImpl_rcr_u32,
1006 iemAImpl_rcr_u64
1007};
1008
1009/** Function table for the RCR instruction, AMD EFLAGS variant. */
1010IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_amd =
1011{
1012 iemAImpl_rcr_u8_amd,
1013 iemAImpl_rcr_u16_amd,
1014 iemAImpl_rcr_u32_amd,
1015 iemAImpl_rcr_u64_amd
1016};
1017
1018/** Function table for the RCR instruction, Intel EFLAGS variant. */
1019IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr_intel =
1020{
1021 iemAImpl_rcr_u8_intel,
1022 iemAImpl_rcr_u16_intel,
1023 iemAImpl_rcr_u32_intel,
1024 iemAImpl_rcr_u64_intel
1025};
1026
1027/** EFLAGS variation selection table for the RCR instruction. */
1028IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_rcr_eflags[] =
1029{
1030 &g_iemAImpl_rcr,
1031 &g_iemAImpl_rcr_intel,
1032 &g_iemAImpl_rcr_amd,
1033 &g_iemAImpl_rcr,
1034};
1035
1036
1037/** Function table for the SHL instruction. */
1038IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
1039{
1040 iemAImpl_shl_u8,
1041 iemAImpl_shl_u16,
1042 iemAImpl_shl_u32,
1043 iemAImpl_shl_u64
1044};
1045
1046/** Function table for the SHL instruction, AMD EFLAGS variant. */
1047IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_amd =
1048{
1049 iemAImpl_shl_u8_amd,
1050 iemAImpl_shl_u16_amd,
1051 iemAImpl_shl_u32_amd,
1052 iemAImpl_shl_u64_amd
1053};
1054
1055/** Function table for the SHL instruction, Intel EFLAGS variant. */
1056IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl_intel =
1057{
1058 iemAImpl_shl_u8_intel,
1059 iemAImpl_shl_u16_intel,
1060 iemAImpl_shl_u32_intel,
1061 iemAImpl_shl_u64_intel
1062};
1063
1064/** EFLAGS variation selection table for the SHL instruction. */
1065IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shl_eflags[] =
1066{
1067 &g_iemAImpl_shl,
1068 &g_iemAImpl_shl_intel,
1069 &g_iemAImpl_shl_amd,
1070 &g_iemAImpl_shl,
1071};
1072
1073
1074/** Function table for the SHR instruction. */
1075IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
1076{
1077 iemAImpl_shr_u8,
1078 iemAImpl_shr_u16,
1079 iemAImpl_shr_u32,
1080 iemAImpl_shr_u64
1081};
1082
1083/** Function table for the SHR instruction, AMD EFLAGS variant. */
1084IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_amd =
1085{
1086 iemAImpl_shr_u8_amd,
1087 iemAImpl_shr_u16_amd,
1088 iemAImpl_shr_u32_amd,
1089 iemAImpl_shr_u64_amd
1090};
1091
1092/** Function table for the SHR instruction, Intel EFLAGS variant. */
1093IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr_intel =
1094{
1095 iemAImpl_shr_u8_intel,
1096 iemAImpl_shr_u16_intel,
1097 iemAImpl_shr_u32_intel,
1098 iemAImpl_shr_u64_intel
1099};
1100
1101/** EFLAGS variation selection table for the SHR instruction. */
1102IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_shr_eflags[] =
1103{
1104 &g_iemAImpl_shr,
1105 &g_iemAImpl_shr_intel,
1106 &g_iemAImpl_shr_amd,
1107 &g_iemAImpl_shr,
1108};
1109
1110
1111/** Function table for the SAR instruction. */
1112IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
1113{
1114 iemAImpl_sar_u8,
1115 iemAImpl_sar_u16,
1116 iemAImpl_sar_u32,
1117 iemAImpl_sar_u64
1118};
1119
1120/** Function table for the SAR instruction, AMD EFLAGS variant. */
1121IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_amd =
1122{
1123 iemAImpl_sar_u8_amd,
1124 iemAImpl_sar_u16_amd,
1125 iemAImpl_sar_u32_amd,
1126 iemAImpl_sar_u64_amd
1127};
1128
1129/** Function table for the SAR instruction, Intel EFLAGS variant. */
1130IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar_intel =
1131{
1132 iemAImpl_sar_u8_intel,
1133 iemAImpl_sar_u16_intel,
1134 iemAImpl_sar_u32_intel,
1135 iemAImpl_sar_u64_intel
1136};
1137
1138/** EFLAGS variation selection table for the SAR instruction. */
1139IEM_STATIC const IEMOPSHIFTSIZES * const g_iemAImpl_sar_eflags[] =
1140{
1141 &g_iemAImpl_sar,
1142 &g_iemAImpl_sar_intel,
1143 &g_iemAImpl_sar_amd,
1144 &g_iemAImpl_sar,
1145};
1146
1147
1148/** Function table for the MUL instruction. */
1149IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
1150{
1151 iemAImpl_mul_u8,
1152 iemAImpl_mul_u16,
1153 iemAImpl_mul_u32,
1154 iemAImpl_mul_u64
1155};
1156
1157/** Function table for the MUL instruction, AMD EFLAGS variation. */
1158IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_amd =
1159{
1160 iemAImpl_mul_u8_amd,
1161 iemAImpl_mul_u16_amd,
1162 iemAImpl_mul_u32_amd,
1163 iemAImpl_mul_u64_amd
1164};
1165
1166/** Function table for the MUL instruction, Intel EFLAGS variation. */
1167IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul_intel =
1168{
1169 iemAImpl_mul_u8_intel,
1170 iemAImpl_mul_u16_intel,
1171 iemAImpl_mul_u32_intel,
1172 iemAImpl_mul_u64_intel
1173};
1174
1175/** EFLAGS variation selection table for the MUL instruction. */
1176IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_mul_eflags[] =
1177{
1178 &g_iemAImpl_mul,
1179 &g_iemAImpl_mul_intel,
1180 &g_iemAImpl_mul_amd,
1181 &g_iemAImpl_mul,
1182};
1183
1184/** EFLAGS variation selection table for the 8-bit MUL instruction. */
1185IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_mul_u8_eflags[] =
1186{
1187 iemAImpl_mul_u8,
1188 iemAImpl_mul_u8_intel,
1189 iemAImpl_mul_u8_amd,
1190 iemAImpl_mul_u8
1191};
1192
1193
1194/** Function table for the IMUL instruction working implicitly on rAX. */
1195IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
1196{
1197 iemAImpl_imul_u8,
1198 iemAImpl_imul_u16,
1199 iemAImpl_imul_u32,
1200 iemAImpl_imul_u64
1201};
1202
1203/** Function table for the IMUL instruction working implicitly on rAX, AMD EFLAGS variation. */
1204IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_amd =
1205{
1206 iemAImpl_imul_u8_amd,
1207 iemAImpl_imul_u16_amd,
1208 iemAImpl_imul_u32_amd,
1209 iemAImpl_imul_u64_amd
1210};
1211
1212/** Function table for the IMUL instruction working implicitly on rAX, Intel EFLAGS variation. */
1213IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul_intel =
1214{
1215 iemAImpl_imul_u8_intel,
1216 iemAImpl_imul_u16_intel,
1217 iemAImpl_imul_u32_intel,
1218 iemAImpl_imul_u64_intel
1219};
1220
1221/** EFLAGS variation selection table for the IMUL instruction. */
1222IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_imul_eflags[] =
1223{
1224 &g_iemAImpl_imul,
1225 &g_iemAImpl_imul_intel,
1226 &g_iemAImpl_imul_amd,
1227 &g_iemAImpl_imul,
1228};
1229
1230/** EFLAGS variation selection table for the 8-bit IMUL instruction. */
1231IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_imul_u8_eflags[] =
1232{
1233 iemAImpl_imul_u8,
1234 iemAImpl_imul_u8_intel,
1235 iemAImpl_imul_u8_amd,
1236 iemAImpl_imul_u8
1237};
1238
1239
1240/** Function table for the DIV instruction. */
1241IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
1242{
1243 iemAImpl_div_u8,
1244 iemAImpl_div_u16,
1245 iemAImpl_div_u32,
1246 iemAImpl_div_u64
1247};
1248
1249/** Function table for the DIV instruction, AMD EFLAGS variation. */
1250IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_amd =
1251{
1252 iemAImpl_div_u8_amd,
1253 iemAImpl_div_u16_amd,
1254 iemAImpl_div_u32_amd,
1255 iemAImpl_div_u64_amd
1256};
1257
1258/** Function table for the DIV instruction, Intel EFLAGS variation. */
1259IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div_intel =
1260{
1261 iemAImpl_div_u8_intel,
1262 iemAImpl_div_u16_intel,
1263 iemAImpl_div_u32_intel,
1264 iemAImpl_div_u64_intel
1265};
1266
1267/** EFLAGS variation selection table for the DIV instruction. */
1268IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_div_eflags[] =
1269{
1270 &g_iemAImpl_div,
1271 &g_iemAImpl_div_intel,
1272 &g_iemAImpl_div_amd,
1273 &g_iemAImpl_div,
1274};
1275
1276/** EFLAGS variation selection table for the 8-bit DIV instruction. */
1277IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_div_u8_eflags[] =
1278{
1279 iemAImpl_div_u8,
1280 iemAImpl_div_u8_intel,
1281 iemAImpl_div_u8_amd,
1282 iemAImpl_div_u8
1283};
1284
1285
1286/** Function table for the IDIV instruction. */
1287IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
1288{
1289 iemAImpl_idiv_u8,
1290 iemAImpl_idiv_u16,
1291 iemAImpl_idiv_u32,
1292 iemAImpl_idiv_u64
1293};
1294
1295/** Function table for the IDIV instruction, AMD EFLAGS variation. */
1296IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_amd =
1297{
1298 iemAImpl_idiv_u8_amd,
1299 iemAImpl_idiv_u16_amd,
1300 iemAImpl_idiv_u32_amd,
1301 iemAImpl_idiv_u64_amd
1302};
1303
1304/** Function table for the IDIV instruction, Intel EFLAGS variation. */
1305IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv_intel =
1306{
1307 iemAImpl_idiv_u8_intel,
1308 iemAImpl_idiv_u16_intel,
1309 iemAImpl_idiv_u32_intel,
1310 iemAImpl_idiv_u64_intel
1311};
1312
1313/** EFLAGS variation selection table for the IDIV instruction. */
1314IEM_STATIC const IEMOPMULDIVSIZES * const g_iemAImpl_idiv_eflags[] =
1315{
1316 &g_iemAImpl_idiv,
1317 &g_iemAImpl_idiv_intel,
1318 &g_iemAImpl_idiv_amd,
1319 &g_iemAImpl_idiv,
1320};
1321
1322/** EFLAGS variation selection table for the 8-bit IDIV instruction. */
1323IEM_STATIC PFNIEMAIMPLMULDIVU8 const g_iemAImpl_idiv_u8_eflags[] =
1324{
1325 iemAImpl_idiv_u8,
1326 iemAImpl_idiv_u8_intel,
1327 iemAImpl_idiv_u8_amd,
1328 iemAImpl_idiv_u8
1329};
1330
1331
1332/** Function table for the SHLD instruction. */
1333IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
1334{
1335 iemAImpl_shld_u16,
1336 iemAImpl_shld_u32,
1337 iemAImpl_shld_u64,
1338};
1339
1340/** Function table for the SHLD instruction, AMD EFLAGS variation. */
1341IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_amd =
1342{
1343 iemAImpl_shld_u16_amd,
1344 iemAImpl_shld_u32_amd,
1345 iemAImpl_shld_u64_amd
1346};
1347
1348/** Function table for the SHLD instruction, Intel EFLAGS variation. */
1349IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld_intel =
1350{
1351 iemAImpl_shld_u16_intel,
1352 iemAImpl_shld_u32_intel,
1353 iemAImpl_shld_u64_intel
1354};
1355
1356/** EFLAGS variation selection table for the SHLD instruction. */
1357IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shld_eflags[] =
1358{
1359 &g_iemAImpl_shld,
1360 &g_iemAImpl_shld_intel,
1361 &g_iemAImpl_shld_amd,
1362 &g_iemAImpl_shld
1363};
1364
1365/** Function table for the SHRD instruction. */
1366IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
1367{
1368 iemAImpl_shrd_u16,
1369 iemAImpl_shrd_u32,
1370 iemAImpl_shrd_u64
1371};
1372
1373/** Function table for the SHRD instruction, AMD EFLAGS variation. */
1374IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_amd =
1375{
1376 iemAImpl_shrd_u16_amd,
1377 iemAImpl_shrd_u32_amd,
1378 iemAImpl_shrd_u64_amd
1379};
1380
1381/** Function table for the SHRD instruction, Intel EFLAGS variation. */
1382IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd_intel =
1383{
1384 iemAImpl_shrd_u16_intel,
1385 iemAImpl_shrd_u32_intel,
1386 iemAImpl_shrd_u64_intel
1387};
1388
1389/** EFLAGS variation selection table for the SHRD instruction. */
1390IEM_STATIC const IEMOPSHIFTDBLSIZES * const g_iemAImpl_shrd_eflags[] =
1391{
1392 &g_iemAImpl_shrd,
1393 &g_iemAImpl_shrd_intel,
1394 &g_iemAImpl_shrd_amd,
1395 &g_iemAImpl_shrd
1396};
1397
1398
1399/** Function table for the PUNPCKLBW instruction */
1400IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
1401/** Function table for the PUNPCKLBD instruction */
1402IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
1403/** Function table for the PUNPCKLDQ instruction */
1404IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
1405/** Function table for the PUNPCKLQDQ instruction */
1406IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
1407
1408/** Function table for the PUNPCKHBW instruction */
1409IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
1410/** Function table for the PUNPCKHBD instruction */
1411IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
1412/** Function table for the PUNPCKHDQ instruction */
1413IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
1414/** Function table for the PUNPCKHQDQ instruction */
1415IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
1416
1417/** Function table for the PXOR instruction */
1418IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
1419/** Function table for the PCMPEQB instruction */
1420IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
1421/** Function table for the PCMPEQW instruction */
1422IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
1423/** Function table for the PCMPEQD instruction */
1424IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
1425
1426
1427#if defined(IEM_LOG_MEMORY_WRITES)
1428/** What IEM just wrote. */
1429uint8_t g_abIemWrote[256];
1430/** How much IEM just wrote. */
1431size_t g_cbIemWrote;
1432#endif
1433
1434
1435/*********************************************************************************************************************************
1436* Internal Functions *
1437*********************************************************************************************************************************/
1438IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
1439IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
1440IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
1441IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
1442/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
1443IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
1444IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
1445IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
1446IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
1447IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
1448IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
1449IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
1450IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
1451IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
1452IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
1453IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
1454IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
1455#ifdef IEM_WITH_SETJMP
1456DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
1457DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
1458DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
1459DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
1460DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
1461#endif
1462
1463IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
1464IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
1465IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1466IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1467IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1468IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1469IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1470IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1471IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
1472IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
1473IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
1474IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
1475IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
1476IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
1477IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
1478IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
1479DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
1480DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
1481
1482#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1483IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
1484IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
1485IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
1486IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
1487IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
1488IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
1489IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
1490IEM_STATIC VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr);
1491#endif
1492
1493#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1494IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
1495IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
1496#endif
1497
1498
1499/**
1500 * Sets the pass up status.
1501 *
1502 * @returns VINF_SUCCESS.
1503 * @param pVCpu The cross context virtual CPU structure of the
1504 * calling thread.
1505 * @param rcPassUp The pass up status. Must be informational.
1506 * VINF_SUCCESS is not allowed.
1507 */
1508IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
1509{
1510 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1511
1512 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1513 if (rcOldPassUp == VINF_SUCCESS)
1514 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1515 /* If both are EM scheduling codes, use EM priority rules. */
1516 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1517 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1518 {
1519 if (rcPassUp < rcOldPassUp)
1520 {
1521 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1522 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1523 }
1524 else
1525 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1526 }
1527 /* Override EM scheduling with specific status code. */
1528 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1529 {
1530 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1531 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1532 }
1533 /* Don't override specific status code, first come first served. */
1534 else
1535 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1536 return VINF_SUCCESS;
1537}
1538
1539
1540/**
1541 * Calculates the CPU mode.
1542 *
1543 * This is mainly for updating IEMCPU::enmCpuMode.
1544 *
1545 * @returns CPU mode.
1546 * @param pVCpu The cross context virtual CPU structure of the
1547 * calling thread.
1548 */
1549DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1550{
1551 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1552 return IEMMODE_64BIT;
1553 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1554 return IEMMODE_32BIT;
1555 return IEMMODE_16BIT;
1556}
1557
1558
1559/**
1560 * Initializes the execution state.
1561 *
1562 * @param pVCpu The cross context virtual CPU structure of the
1563 * calling thread.
1564 * @param fBypassHandlers Whether to bypass access handlers.
1565 *
1566 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1567 * side-effects in strict builds.
1568 */
1569DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1570{
1571 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1572 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1573 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1574 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1575 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1576 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1577 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1578 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1579 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1580 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1581
1582 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1583 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1584#ifdef VBOX_STRICT
1585 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1586 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1587 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1588 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1589 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1590 pVCpu->iem.s.uRexReg = 127;
1591 pVCpu->iem.s.uRexB = 127;
1592 pVCpu->iem.s.offModRm = 127;
1593 pVCpu->iem.s.uRexIndex = 127;
1594 pVCpu->iem.s.iEffSeg = 127;
1595 pVCpu->iem.s.idxPrefix = 127;
1596 pVCpu->iem.s.uVex3rdReg = 127;
1597 pVCpu->iem.s.uVexLength = 127;
1598 pVCpu->iem.s.fEvexStuff = 127;
1599 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1600# ifdef IEM_WITH_CODE_TLB
1601 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1602 pVCpu->iem.s.pbInstrBuf = NULL;
1603 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1604 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1605 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1606 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1607# else
1608 pVCpu->iem.s.offOpcode = 127;
1609 pVCpu->iem.s.cbOpcode = 127;
1610# endif
1611#endif
1612
1613 pVCpu->iem.s.cActiveMappings = 0;
1614 pVCpu->iem.s.iNextMapping = 0;
1615 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1616 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1617#if 0
1618#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1619 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1620 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1621 {
1622 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1623 Assert(pVmcs);
1624 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1625 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1626 {
1627 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1628 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1629 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1630 AssertRC(rc);
1631 }
1632 }
1633#endif
1634#endif
1635}
1636
1637#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1638/**
1639 * Performs a minimal reinitialization of the execution state.
1640 *
1641 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1642 * 'world-switch' types operations on the CPU. Currently only nested
1643 * hardware-virtualization uses it.
1644 *
1645 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1646 */
1647IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1648{
1649 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1650 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1651
1652 pVCpu->iem.s.uCpl = uCpl;
1653 pVCpu->iem.s.enmCpuMode = enmMode;
1654 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1655 pVCpu->iem.s.enmEffAddrMode = enmMode;
1656 if (enmMode != IEMMODE_64BIT)
1657 {
1658 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1659 pVCpu->iem.s.enmEffOpSize = enmMode;
1660 }
1661 else
1662 {
1663 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1664 pVCpu->iem.s.enmEffOpSize = enmMode;
1665 }
1666 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1667#ifndef IEM_WITH_CODE_TLB
1668 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1669 pVCpu->iem.s.offOpcode = 0;
1670 pVCpu->iem.s.cbOpcode = 0;
1671#endif
1672 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1673}
1674#endif
1675
1676/**
1677 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1678 *
1679 * @param pVCpu The cross context virtual CPU structure of the
1680 * calling thread.
1681 */
1682DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1683{
1684 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1685#ifdef VBOX_STRICT
1686# ifdef IEM_WITH_CODE_TLB
1687 NOREF(pVCpu);
1688# else
1689 pVCpu->iem.s.cbOpcode = 0;
1690# endif
1691#else
1692 NOREF(pVCpu);
1693#endif
1694}
1695
1696
1697/**
1698 * Initializes the decoder state.
1699 *
1700 * iemReInitDecoder is mostly a copy of this function.
1701 *
1702 * @param pVCpu The cross context virtual CPU structure of the
1703 * calling thread.
1704 * @param fBypassHandlers Whether to bypass access handlers.
1705 * @param fDisregardLock Whether to disregard the LOCK prefix.
1706 */
1707DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1708{
1709 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1710 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1711 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1712 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1713 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1714 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1715 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1716 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1717 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1718 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1719
1720 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1721 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1722 pVCpu->iem.s.enmCpuMode = enmMode;
1723 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1724 pVCpu->iem.s.enmEffAddrMode = enmMode;
1725 if (enmMode != IEMMODE_64BIT)
1726 {
1727 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1728 pVCpu->iem.s.enmEffOpSize = enmMode;
1729 }
1730 else
1731 {
1732 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1733 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1734 }
1735 pVCpu->iem.s.fPrefixes = 0;
1736 pVCpu->iem.s.uRexReg = 0;
1737 pVCpu->iem.s.uRexB = 0;
1738 pVCpu->iem.s.uRexIndex = 0;
1739 pVCpu->iem.s.idxPrefix = 0;
1740 pVCpu->iem.s.uVex3rdReg = 0;
1741 pVCpu->iem.s.uVexLength = 0;
1742 pVCpu->iem.s.fEvexStuff = 0;
1743 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1744#ifdef IEM_WITH_CODE_TLB
1745 pVCpu->iem.s.pbInstrBuf = NULL;
1746 pVCpu->iem.s.offInstrNextByte = 0;
1747 pVCpu->iem.s.offCurInstrStart = 0;
1748# ifdef VBOX_STRICT
1749 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1750 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1751 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1752# endif
1753#else
1754 pVCpu->iem.s.offOpcode = 0;
1755 pVCpu->iem.s.cbOpcode = 0;
1756#endif
1757 pVCpu->iem.s.offModRm = 0;
1758 pVCpu->iem.s.cActiveMappings = 0;
1759 pVCpu->iem.s.iNextMapping = 0;
1760 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1761 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1762 pVCpu->iem.s.fDisregardLock = fDisregardLock;
1763
1764#ifdef DBGFTRACE_ENABLED
1765 switch (enmMode)
1766 {
1767 case IEMMODE_64BIT:
1768 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1769 break;
1770 case IEMMODE_32BIT:
1771 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1772 break;
1773 case IEMMODE_16BIT:
1774 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1775 break;
1776 }
1777#endif
1778}
1779
1780
1781/**
1782 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1783 *
1784 * This is mostly a copy of iemInitDecoder.
1785 *
1786 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1787 */
1788DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1789{
1790 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1791 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1792 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1793 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1794 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1795 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1796 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1797 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1798 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1799
1800 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1801 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1802 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1803 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1804 pVCpu->iem.s.enmEffAddrMode = enmMode;
1805 if (enmMode != IEMMODE_64BIT)
1806 {
1807 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1808 pVCpu->iem.s.enmEffOpSize = enmMode;
1809 }
1810 else
1811 {
1812 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1813 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1814 }
1815 pVCpu->iem.s.fPrefixes = 0;
1816 pVCpu->iem.s.uRexReg = 0;
1817 pVCpu->iem.s.uRexB = 0;
1818 pVCpu->iem.s.uRexIndex = 0;
1819 pVCpu->iem.s.idxPrefix = 0;
1820 pVCpu->iem.s.uVex3rdReg = 0;
1821 pVCpu->iem.s.uVexLength = 0;
1822 pVCpu->iem.s.fEvexStuff = 0;
1823 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1824#ifdef IEM_WITH_CODE_TLB
1825 if (pVCpu->iem.s.pbInstrBuf)
1826 {
1827 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1828 - pVCpu->iem.s.uInstrBufPc;
1829 if (off < pVCpu->iem.s.cbInstrBufTotal)
1830 {
1831 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1832 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1833 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1834 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1835 else
1836 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1837 }
1838 else
1839 {
1840 pVCpu->iem.s.pbInstrBuf = NULL;
1841 pVCpu->iem.s.offInstrNextByte = 0;
1842 pVCpu->iem.s.offCurInstrStart = 0;
1843 pVCpu->iem.s.cbInstrBuf = 0;
1844 pVCpu->iem.s.cbInstrBufTotal = 0;
1845 }
1846 }
1847 else
1848 {
1849 pVCpu->iem.s.offInstrNextByte = 0;
1850 pVCpu->iem.s.offCurInstrStart = 0;
1851 pVCpu->iem.s.cbInstrBuf = 0;
1852 pVCpu->iem.s.cbInstrBufTotal = 0;
1853 }
1854#else
1855 pVCpu->iem.s.cbOpcode = 0;
1856 pVCpu->iem.s.offOpcode = 0;
1857#endif
1858 pVCpu->iem.s.offModRm = 0;
1859 Assert(pVCpu->iem.s.cActiveMappings == 0);
1860 pVCpu->iem.s.iNextMapping = 0;
1861 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1862 Assert(pVCpu->iem.s.fBypassHandlers == false);
1863
1864#ifdef DBGFTRACE_ENABLED
1865 switch (enmMode)
1866 {
1867 case IEMMODE_64BIT:
1868 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1869 break;
1870 case IEMMODE_32BIT:
1871 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1872 break;
1873 case IEMMODE_16BIT:
1874 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1875 break;
1876 }
1877#endif
1878}
1879
1880
1881
1882/**
1883 * Prefetch opcodes the first time when starting executing.
1884 *
1885 * @returns Strict VBox status code.
1886 * @param pVCpu The cross context virtual CPU structure of the
1887 * calling thread.
1888 * @param fBypassHandlers Whether to bypass access handlers.
1889 * @param fDisregardLock Whether to disregard LOCK prefixes.
1890 *
1891 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
1892 * store them as such.
1893 */
1894IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1895{
1896 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
1897
1898#ifdef IEM_WITH_CODE_TLB
1899 /** @todo Do ITLB lookup here. */
1900
1901#else /* !IEM_WITH_CODE_TLB */
1902
1903 /*
1904 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1905 *
1906 * First translate CS:rIP to a physical address.
1907 */
1908 uint32_t cbToTryRead;
1909 RTGCPTR GCPtrPC;
1910 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1911 {
1912 cbToTryRead = GUEST_PAGE_SIZE;
1913 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1914 if (IEM_IS_CANONICAL(GCPtrPC))
1915 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
1916 else
1917 return iemRaiseGeneralProtectionFault0(pVCpu);
1918 }
1919 else
1920 {
1921 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1922 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1923 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1924 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1925 else
1926 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1927 if (cbToTryRead) { /* likely */ }
1928 else /* overflowed */
1929 {
1930 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1931 cbToTryRead = UINT32_MAX;
1932 }
1933 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1934 Assert(GCPtrPC <= UINT32_MAX);
1935 }
1936
1937 PGMPTWALK Walk;
1938 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
1939 if (RT_SUCCESS(rc))
1940 Assert(Walk.fSucceeded); /* probable. */
1941 else
1942 {
1943 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1944#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1945 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1946 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1947#endif
1948 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1949 }
1950 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1951 else
1952 {
1953 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1954#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1955 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1956 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1957#endif
1958 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1959 }
1960 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1961 else
1962 {
1963 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1964#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1965 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1966 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1967#endif
1968 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1969 }
1970 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
1971 /** @todo Check reserved bits and such stuff. PGM is better at doing
1972 * that, so do it when implementing the guest virtual address
1973 * TLB... */
1974
1975 /*
1976 * Read the bytes at this address.
1977 */
1978 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
1979 if (cbToTryRead > cbLeftOnPage)
1980 cbToTryRead = cbLeftOnPage;
1981 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1982 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1983
1984 if (!pVCpu->iem.s.fBypassHandlers)
1985 {
1986 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1987 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1988 { /* likely */ }
1989 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1990 {
1991 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1992 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1993 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1994 }
1995 else
1996 {
1997 Log((RT_SUCCESS(rcStrict)
1998 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1999 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2000 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2001 return rcStrict;
2002 }
2003 }
2004 else
2005 {
2006 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
2007 if (RT_SUCCESS(rc))
2008 { /* likely */ }
2009 else
2010 {
2011 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
2012 GCPtrPC, GCPhys, rc, cbToTryRead));
2013 return rc;
2014 }
2015 }
2016 pVCpu->iem.s.cbOpcode = cbToTryRead;
2017#endif /* !IEM_WITH_CODE_TLB */
2018 return VINF_SUCCESS;
2019}
2020
2021
2022/**
2023 * Invalidates the IEM TLBs.
2024 *
2025 * This is called internally as well as by PGM when moving GC mappings.
2026 *
2027 * @returns
2028 * @param pVCpu The cross context virtual CPU structure of the calling
2029 * thread.
2030 * @param fVmm Set when PGM calls us with a remapping.
2031 */
2032VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
2033{
2034#ifdef IEM_WITH_CODE_TLB
2035 pVCpu->iem.s.cbInstrBufTotal = 0;
2036 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
2037 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
2038 { /* very likely */ }
2039 else
2040 {
2041 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
2042 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
2043 while (i-- > 0)
2044 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
2045 }
2046#endif
2047
2048#ifdef IEM_WITH_DATA_TLB
2049 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
2050 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
2051 { /* very likely */ }
2052 else
2053 {
2054 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
2055 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
2056 while (i-- > 0)
2057 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
2058 }
2059#endif
2060 NOREF(pVCpu); NOREF(fVmm);
2061}
2062
2063
2064/**
2065 * Invalidates a page in the TLBs.
2066 *
2067 * @param pVCpu The cross context virtual CPU structure of the calling
2068 * thread.
2069 * @param GCPtr The address of the page to invalidate
2070 */
2071VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
2072{
2073#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
2074 GCPtr = GCPtr >> X86_PAGE_SHIFT;
2075 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
2076 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
2077 uintptr_t idx = (uint8_t)GCPtr;
2078
2079# ifdef IEM_WITH_CODE_TLB
2080 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
2081 {
2082 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
2083 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
2084 pVCpu->iem.s.cbInstrBufTotal = 0;
2085 }
2086# endif
2087
2088# ifdef IEM_WITH_DATA_TLB
2089 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
2090 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
2091# endif
2092#else
2093 NOREF(pVCpu); NOREF(GCPtr);
2094#endif
2095}
2096
2097
2098/**
2099 * Invalidates the host physical aspects of the IEM TLBs.
2100 *
2101 * This is called internally as well as by PGM when moving GC mappings.
2102 *
2103 * @param pVCpu The cross context virtual CPU structure of the calling
2104 * thread.
2105 */
2106VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
2107{
2108#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
2109 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
2110
2111# ifdef IEM_WITH_CODE_TLB
2112 pVCpu->iem.s.cbInstrBufTotal = 0;
2113# endif
2114 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
2115 if (uTlbPhysRev != 0)
2116 {
2117 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
2118 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
2119 }
2120 else
2121 {
2122 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
2123 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
2124
2125 unsigned i;
2126# ifdef IEM_WITH_CODE_TLB
2127 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
2128 while (i-- > 0)
2129 {
2130 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
2131 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
2132 }
2133# endif
2134# ifdef IEM_WITH_DATA_TLB
2135 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
2136 while (i-- > 0)
2137 {
2138 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
2139 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
2140 }
2141# endif
2142 }
2143#else
2144 NOREF(pVCpu);
2145#endif
2146}
2147
2148
2149/**
2150 * Invalidates the host physical aspects of the IEM TLBs.
2151 *
2152 * This is called internally as well as by PGM when moving GC mappings.
2153 *
2154 * @param pVM The cross context VM structure.
2155 *
2156 * @remarks Caller holds the PGM lock.
2157 */
2158VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
2159{
2160 RT_NOREF_PV(pVM);
2161}
2162
2163#ifdef IEM_WITH_CODE_TLB
2164
2165/**
2166 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
2167 * failure and jumps.
2168 *
2169 * We end up here for a number of reasons:
2170 * - pbInstrBuf isn't yet initialized.
2171 * - Advancing beyond the buffer boundrary (e.g. cross page).
2172 * - Advancing beyond the CS segment limit.
2173 * - Fetching from non-mappable page (e.g. MMIO).
2174 *
2175 * @param pVCpu The cross context virtual CPU structure of the
2176 * calling thread.
2177 * @param pvDst Where to return the bytes.
2178 * @param cbDst Number of bytes to read.
2179 *
2180 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
2181 */
2182IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
2183{
2184#ifdef IN_RING3
2185 for (;;)
2186 {
2187 Assert(cbDst <= 8);
2188 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
2189
2190 /*
2191 * We might have a partial buffer match, deal with that first to make the
2192 * rest simpler. This is the first part of the cross page/buffer case.
2193 */
2194 if (pVCpu->iem.s.pbInstrBuf != NULL)
2195 {
2196 if (offBuf < pVCpu->iem.s.cbInstrBuf)
2197 {
2198 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
2199 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
2200 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
2201
2202 cbDst -= cbCopy;
2203 pvDst = (uint8_t *)pvDst + cbCopy;
2204 offBuf += cbCopy;
2205 pVCpu->iem.s.offInstrNextByte += offBuf;
2206 }
2207 }
2208
2209 /*
2210 * Check segment limit, figuring how much we're allowed to access at this point.
2211 *
2212 * We will fault immediately if RIP is past the segment limit / in non-canonical
2213 * territory. If we do continue, there are one or more bytes to read before we
2214 * end up in trouble and we need to do that first before faulting.
2215 */
2216 RTGCPTR GCPtrFirst;
2217 uint32_t cbMaxRead;
2218 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2219 {
2220 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
2221 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
2222 { /* likely */ }
2223 else
2224 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
2225 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
2226 }
2227 else
2228 {
2229 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
2230 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2231 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
2232 { /* likely */ }
2233 else
2234 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2235 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
2236 if (cbMaxRead != 0)
2237 { /* likely */ }
2238 else
2239 {
2240 /* Overflowed because address is 0 and limit is max. */
2241 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2242 cbMaxRead = X86_PAGE_SIZE;
2243 }
2244 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
2245 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
2246 if (cbMaxRead2 < cbMaxRead)
2247 cbMaxRead = cbMaxRead2;
2248 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
2249 }
2250
2251 /*
2252 * Get the TLB entry for this piece of code.
2253 */
2254 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
2255 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
2256 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
2257 if (pTlbe->uTag == uTag)
2258 {
2259 /* likely when executing lots of code, otherwise unlikely */
2260# ifdef VBOX_WITH_STATISTICS
2261 pVCpu->iem.s.CodeTlb.cTlbHits++;
2262# endif
2263 }
2264 else
2265 {
2266 pVCpu->iem.s.CodeTlb.cTlbMisses++;
2267 PGMPTWALK Walk;
2268 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
2269 if (RT_FAILURE(rc))
2270 {
2271#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2272 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
2273 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
2274#endif
2275 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
2276 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
2277 }
2278
2279 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
2280 Assert(Walk.fSucceeded);
2281 pTlbe->uTag = uTag;
2282 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D))
2283 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
2284 pTlbe->GCPhys = Walk.GCPhys;
2285 pTlbe->pbMappingR3 = NULL;
2286 }
2287
2288 /*
2289 * Check TLB page table level access flags.
2290 */
2291 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
2292 {
2293 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
2294 {
2295 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
2296 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2297 }
2298 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2299 {
2300 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
2301 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2302 }
2303 }
2304
2305 /*
2306 * Look up the physical page info if necessary.
2307 */
2308 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
2309 { /* not necessary */ }
2310 else
2311 {
2312 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
2313 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
2314 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
2315 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
2316 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
2317 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
2318 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
2319 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
2320 }
2321
2322# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
2323 /*
2324 * Try do a direct read using the pbMappingR3 pointer.
2325 */
2326 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
2327 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
2328 {
2329 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
2330 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
2331 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
2332 {
2333 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
2334 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
2335 }
2336 else
2337 {
2338 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
2339 Assert(cbInstr < cbMaxRead);
2340 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
2341 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
2342 }
2343 if (cbDst <= cbMaxRead)
2344 {
2345 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
2346 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
2347 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
2348 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
2349 return;
2350 }
2351 pVCpu->iem.s.pbInstrBuf = NULL;
2352
2353 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
2354 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
2355 }
2356 else
2357# endif
2358#if 0
2359 /*
2360 * If there is no special read handling, so we can read a bit more and
2361 * put it in the prefetch buffer.
2362 */
2363 if ( cbDst < cbMaxRead
2364 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
2365 {
2366 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
2367 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
2368 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2369 { /* likely */ }
2370 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2371 {
2372 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2373 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2374 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2375 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
2376 }
2377 else
2378 {
2379 Log((RT_SUCCESS(rcStrict)
2380 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2381 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2382 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2383 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2384 }
2385 }
2386 /*
2387 * Special read handling, so only read exactly what's needed.
2388 * This is a highly unlikely scenario.
2389 */
2390 else
2391#endif
2392 {
2393 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
2394 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
2395 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
2396 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
2397 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2398 { /* likely */ }
2399 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2400 {
2401 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2402 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
2403 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2404 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
2405 }
2406 else
2407 {
2408 Log((RT_SUCCESS(rcStrict)
2409 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2410 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2411 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
2412 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2413 }
2414 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
2415 if (cbToRead == cbDst)
2416 return;
2417 }
2418
2419 /*
2420 * More to read, loop.
2421 */
2422 cbDst -= cbMaxRead;
2423 pvDst = (uint8_t *)pvDst + cbMaxRead;
2424 }
2425#else
2426 RT_NOREF(pvDst, cbDst);
2427 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
2428#endif
2429}
2430
2431#else
2432
2433/**
2434 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
2435 * exception if it fails.
2436 *
2437 * @returns Strict VBox status code.
2438 * @param pVCpu The cross context virtual CPU structure of the
2439 * calling thread.
2440 * @param cbMin The minimum number of bytes relative offOpcode
2441 * that must be read.
2442 */
2443IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
2444{
2445 /*
2446 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2447 *
2448 * First translate CS:rIP to a physical address.
2449 */
2450 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2451 uint32_t cbToTryRead;
2452 RTGCPTR GCPtrNext;
2453 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2454 {
2455 cbToTryRead = GUEST_PAGE_SIZE;
2456 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2457 if (!IEM_IS_CANONICAL(GCPtrNext))
2458 return iemRaiseGeneralProtectionFault0(pVCpu);
2459 }
2460 else
2461 {
2462 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2463 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2464 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2465 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2466 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2467 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2468 if (!cbToTryRead) /* overflowed */
2469 {
2470 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2471 cbToTryRead = UINT32_MAX;
2472 /** @todo check out wrapping around the code segment. */
2473 }
2474 if (cbToTryRead < cbMin - cbLeft)
2475 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2476 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2477 }
2478
2479 /* Only read up to the end of the page, and make sure we don't read more
2480 than the opcode buffer can hold. */
2481 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
2482 if (cbToTryRead > cbLeftOnPage)
2483 cbToTryRead = cbLeftOnPage;
2484 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2485 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2486/** @todo r=bird: Convert assertion into undefined opcode exception? */
2487 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2488
2489 PGMPTWALK Walk;
2490 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
2491 if (RT_FAILURE(rc))
2492 {
2493 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2494#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2495 if (Walk.fFailed & PGM_WALKFAIL_EPT)
2496 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
2497#endif
2498 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2499 }
2500 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2501 {
2502 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2503#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2504 if (Walk.fFailed & PGM_WALKFAIL_EPT)
2505 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
2506#endif
2507 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2508 }
2509 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2510 {
2511 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2512#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2513 if (Walk.fFailed & PGM_WALKFAIL_EPT)
2514 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
2515#endif
2516 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2517 }
2518 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
2519 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2520 /** @todo Check reserved bits and such stuff. PGM is better at doing
2521 * that, so do it when implementing the guest virtual address
2522 * TLB... */
2523
2524 /*
2525 * Read the bytes at this address.
2526 *
2527 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2528 * and since PATM should only patch the start of an instruction there
2529 * should be no need to check again here.
2530 */
2531 if (!pVCpu->iem.s.fBypassHandlers)
2532 {
2533 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2534 cbToTryRead, PGMACCESSORIGIN_IEM);
2535 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2536 { /* likely */ }
2537 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2538 {
2539 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2540 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2541 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2542 }
2543 else
2544 {
2545 Log((RT_SUCCESS(rcStrict)
2546 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2547 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2548 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2549 return rcStrict;
2550 }
2551 }
2552 else
2553 {
2554 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2555 if (RT_SUCCESS(rc))
2556 { /* likely */ }
2557 else
2558 {
2559 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2560 return rc;
2561 }
2562 }
2563 pVCpu->iem.s.cbOpcode += cbToTryRead;
2564 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2565
2566 return VINF_SUCCESS;
2567}
2568
2569#endif /* !IEM_WITH_CODE_TLB */
2570#ifndef IEM_WITH_SETJMP
2571
2572/**
2573 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2574 *
2575 * @returns Strict VBox status code.
2576 * @param pVCpu The cross context virtual CPU structure of the
2577 * calling thread.
2578 * @param pb Where to return the opcode byte.
2579 */
2580DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2581{
2582 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2583 if (rcStrict == VINF_SUCCESS)
2584 {
2585 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2586 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2587 pVCpu->iem.s.offOpcode = offOpcode + 1;
2588 }
2589 else
2590 *pb = 0;
2591 return rcStrict;
2592}
2593
2594
2595/**
2596 * Fetches the next opcode byte.
2597 *
2598 * @returns Strict VBox status code.
2599 * @param pVCpu The cross context virtual CPU structure of the
2600 * calling thread.
2601 * @param pu8 Where to return the opcode byte.
2602 */
2603DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2604{
2605 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2606 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2607 {
2608 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2609 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2610 return VINF_SUCCESS;
2611 }
2612 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2613}
2614
2615#else /* IEM_WITH_SETJMP */
2616
2617/**
2618 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2619 *
2620 * @returns The opcode byte.
2621 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2622 */
2623DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2624{
2625# ifdef IEM_WITH_CODE_TLB
2626 uint8_t u8;
2627 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2628 return u8;
2629# else
2630 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2631 if (rcStrict == VINF_SUCCESS)
2632 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2633 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2634# endif
2635}
2636
2637
2638/**
2639 * Fetches the next opcode byte, longjmp on error.
2640 *
2641 * @returns The opcode byte.
2642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2643 */
2644DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2645{
2646# ifdef IEM_WITH_CODE_TLB
2647 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2648 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2649 if (RT_LIKELY( pbBuf != NULL
2650 && offBuf < pVCpu->iem.s.cbInstrBuf))
2651 {
2652 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2653 return pbBuf[offBuf];
2654 }
2655# else
2656 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2657 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2658 {
2659 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2660 return pVCpu->iem.s.abOpcode[offOpcode];
2661 }
2662# endif
2663 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2664}
2665
2666#endif /* IEM_WITH_SETJMP */
2667
2668/**
2669 * Fetches the next opcode byte, returns automatically on failure.
2670 *
2671 * @param a_pu8 Where to return the opcode byte.
2672 * @remark Implicitly references pVCpu.
2673 */
2674#ifndef IEM_WITH_SETJMP
2675# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2676 do \
2677 { \
2678 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2679 if (rcStrict2 == VINF_SUCCESS) \
2680 { /* likely */ } \
2681 else \
2682 return rcStrict2; \
2683 } while (0)
2684#else
2685# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2686#endif /* IEM_WITH_SETJMP */
2687
2688
2689#ifndef IEM_WITH_SETJMP
2690/**
2691 * Fetches the next signed byte from the opcode stream.
2692 *
2693 * @returns Strict VBox status code.
2694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2695 * @param pi8 Where to return the signed byte.
2696 */
2697DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2698{
2699 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2700}
2701#endif /* !IEM_WITH_SETJMP */
2702
2703
2704/**
2705 * Fetches the next signed byte from the opcode stream, returning automatically
2706 * on failure.
2707 *
2708 * @param a_pi8 Where to return the signed byte.
2709 * @remark Implicitly references pVCpu.
2710 */
2711#ifndef IEM_WITH_SETJMP
2712# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2713 do \
2714 { \
2715 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2716 if (rcStrict2 != VINF_SUCCESS) \
2717 return rcStrict2; \
2718 } while (0)
2719#else /* IEM_WITH_SETJMP */
2720# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2721
2722#endif /* IEM_WITH_SETJMP */
2723
2724#ifndef IEM_WITH_SETJMP
2725
2726/**
2727 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2728 *
2729 * @returns Strict VBox status code.
2730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2731 * @param pu16 Where to return the opcode dword.
2732 */
2733DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2734{
2735 uint8_t u8;
2736 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2737 if (rcStrict == VINF_SUCCESS)
2738 *pu16 = (int8_t)u8;
2739 return rcStrict;
2740}
2741
2742
2743/**
2744 * Fetches the next signed byte from the opcode stream, extending it to
2745 * unsigned 16-bit.
2746 *
2747 * @returns Strict VBox status code.
2748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2749 * @param pu16 Where to return the unsigned word.
2750 */
2751DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2752{
2753 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2754 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2755 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2756
2757 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2758 pVCpu->iem.s.offOpcode = offOpcode + 1;
2759 return VINF_SUCCESS;
2760}
2761
2762#endif /* !IEM_WITH_SETJMP */
2763
2764/**
2765 * Fetches the next signed byte from the opcode stream and sign-extending it to
2766 * a word, returning automatically on failure.
2767 *
2768 * @param a_pu16 Where to return the word.
2769 * @remark Implicitly references pVCpu.
2770 */
2771#ifndef IEM_WITH_SETJMP
2772# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2773 do \
2774 { \
2775 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2776 if (rcStrict2 != VINF_SUCCESS) \
2777 return rcStrict2; \
2778 } while (0)
2779#else
2780# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2781#endif
2782
2783#ifndef IEM_WITH_SETJMP
2784
2785/**
2786 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2787 *
2788 * @returns Strict VBox status code.
2789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2790 * @param pu32 Where to return the opcode dword.
2791 */
2792DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2793{
2794 uint8_t u8;
2795 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2796 if (rcStrict == VINF_SUCCESS)
2797 *pu32 = (int8_t)u8;
2798 return rcStrict;
2799}
2800
2801
2802/**
2803 * Fetches the next signed byte from the opcode stream, extending it to
2804 * unsigned 32-bit.
2805 *
2806 * @returns Strict VBox status code.
2807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2808 * @param pu32 Where to return the unsigned dword.
2809 */
2810DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2811{
2812 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2813 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2814 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2815
2816 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2817 pVCpu->iem.s.offOpcode = offOpcode + 1;
2818 return VINF_SUCCESS;
2819}
2820
2821#endif /* !IEM_WITH_SETJMP */
2822
2823/**
2824 * Fetches the next signed byte from the opcode stream and sign-extending it to
2825 * a word, returning automatically on failure.
2826 *
2827 * @param a_pu32 Where to return the word.
2828 * @remark Implicitly references pVCpu.
2829 */
2830#ifndef IEM_WITH_SETJMP
2831#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2832 do \
2833 { \
2834 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2835 if (rcStrict2 != VINF_SUCCESS) \
2836 return rcStrict2; \
2837 } while (0)
2838#else
2839# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2840#endif
2841
2842#ifndef IEM_WITH_SETJMP
2843
2844/**
2845 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2846 *
2847 * @returns Strict VBox status code.
2848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2849 * @param pu64 Where to return the opcode qword.
2850 */
2851DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2852{
2853 uint8_t u8;
2854 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2855 if (rcStrict == VINF_SUCCESS)
2856 *pu64 = (int8_t)u8;
2857 return rcStrict;
2858}
2859
2860
2861/**
2862 * Fetches the next signed byte from the opcode stream, extending it to
2863 * unsigned 64-bit.
2864 *
2865 * @returns Strict VBox status code.
2866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2867 * @param pu64 Where to return the unsigned qword.
2868 */
2869DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2870{
2871 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2872 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2873 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2874
2875 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2876 pVCpu->iem.s.offOpcode = offOpcode + 1;
2877 return VINF_SUCCESS;
2878}
2879
2880#endif /* !IEM_WITH_SETJMP */
2881
2882
2883/**
2884 * Fetches the next signed byte from the opcode stream and sign-extending it to
2885 * a word, returning automatically on failure.
2886 *
2887 * @param a_pu64 Where to return the word.
2888 * @remark Implicitly references pVCpu.
2889 */
2890#ifndef IEM_WITH_SETJMP
2891# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2892 do \
2893 { \
2894 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2895 if (rcStrict2 != VINF_SUCCESS) \
2896 return rcStrict2; \
2897 } while (0)
2898#else
2899# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2900#endif
2901
2902
2903#ifndef IEM_WITH_SETJMP
2904/**
2905 * Fetches the next opcode byte.
2906 *
2907 * @returns Strict VBox status code.
2908 * @param pVCpu The cross context virtual CPU structure of the
2909 * calling thread.
2910 * @param pu8 Where to return the opcode byte.
2911 */
2912DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2913{
2914 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2915 pVCpu->iem.s.offModRm = offOpcode;
2916 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2917 {
2918 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2919 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2920 return VINF_SUCCESS;
2921 }
2922 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2923}
2924#else /* IEM_WITH_SETJMP */
2925/**
2926 * Fetches the next opcode byte, longjmp on error.
2927 *
2928 * @returns The opcode byte.
2929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2930 */
2931DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2932{
2933# ifdef IEM_WITH_CODE_TLB
2934 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2935 pVCpu->iem.s.offModRm = offBuf;
2936 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2937 if (RT_LIKELY( pbBuf != NULL
2938 && offBuf < pVCpu->iem.s.cbInstrBuf))
2939 {
2940 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2941 return pbBuf[offBuf];
2942 }
2943# else
2944 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2945 pVCpu->iem.s.offModRm = offOpcode;
2946 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2947 {
2948 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2949 return pVCpu->iem.s.abOpcode[offOpcode];
2950 }
2951# endif
2952 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2953}
2954#endif /* IEM_WITH_SETJMP */
2955
2956/**
2957 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2958 * on failure.
2959 *
2960 * Will note down the position of the ModR/M byte for VT-x exits.
2961 *
2962 * @param a_pbRm Where to return the RM opcode byte.
2963 * @remark Implicitly references pVCpu.
2964 */
2965#ifndef IEM_WITH_SETJMP
2966# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2967 do \
2968 { \
2969 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2970 if (rcStrict2 == VINF_SUCCESS) \
2971 { /* likely */ } \
2972 else \
2973 return rcStrict2; \
2974 } while (0)
2975#else
2976# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2977#endif /* IEM_WITH_SETJMP */
2978
2979
2980#ifndef IEM_WITH_SETJMP
2981
2982/**
2983 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2984 *
2985 * @returns Strict VBox status code.
2986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2987 * @param pu16 Where to return the opcode word.
2988 */
2989DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2990{
2991 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2992 if (rcStrict == VINF_SUCCESS)
2993 {
2994 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2995# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2996 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2997# else
2998 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2999# endif
3000 pVCpu->iem.s.offOpcode = offOpcode + 2;
3001 }
3002 else
3003 *pu16 = 0;
3004 return rcStrict;
3005}
3006
3007
3008/**
3009 * Fetches the next opcode word.
3010 *
3011 * @returns Strict VBox status code.
3012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3013 * @param pu16 Where to return the opcode word.
3014 */
3015DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
3016{
3017 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3018 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
3019 {
3020 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
3021# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3022 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3023# else
3024 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
3025# endif
3026 return VINF_SUCCESS;
3027 }
3028 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
3029}
3030
3031#else /* IEM_WITH_SETJMP */
3032
3033/**
3034 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
3035 *
3036 * @returns The opcode word.
3037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3038 */
3039DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
3040{
3041# ifdef IEM_WITH_CODE_TLB
3042 uint16_t u16;
3043 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
3044 return u16;
3045# else
3046 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
3047 if (rcStrict == VINF_SUCCESS)
3048 {
3049 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3050 pVCpu->iem.s.offOpcode += 2;
3051# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3052 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3053# else
3054 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
3055# endif
3056 }
3057 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3058# endif
3059}
3060
3061
3062/**
3063 * Fetches the next opcode word, longjmp on error.
3064 *
3065 * @returns The opcode word.
3066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3067 */
3068DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
3069{
3070# ifdef IEM_WITH_CODE_TLB
3071 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3072 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3073 if (RT_LIKELY( pbBuf != NULL
3074 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
3075 {
3076 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
3077# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3078 return *(uint16_t const *)&pbBuf[offBuf];
3079# else
3080 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
3081# endif
3082 }
3083# else
3084 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3085 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
3086 {
3087 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
3088# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3089 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3090# else
3091 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
3092# endif
3093 }
3094# endif
3095 return iemOpcodeGetNextU16SlowJmp(pVCpu);
3096}
3097
3098#endif /* IEM_WITH_SETJMP */
3099
3100
3101/**
3102 * Fetches the next opcode word, returns automatically on failure.
3103 *
3104 * @param a_pu16 Where to return the opcode word.
3105 * @remark Implicitly references pVCpu.
3106 */
3107#ifndef IEM_WITH_SETJMP
3108# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
3109 do \
3110 { \
3111 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
3112 if (rcStrict2 != VINF_SUCCESS) \
3113 return rcStrict2; \
3114 } while (0)
3115#else
3116# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
3117#endif
3118
3119#ifndef IEM_WITH_SETJMP
3120
3121/**
3122 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
3123 *
3124 * @returns Strict VBox status code.
3125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3126 * @param pu32 Where to return the opcode double word.
3127 */
3128DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
3129{
3130 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
3131 if (rcStrict == VINF_SUCCESS)
3132 {
3133 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3134 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
3135 pVCpu->iem.s.offOpcode = offOpcode + 2;
3136 }
3137 else
3138 *pu32 = 0;
3139 return rcStrict;
3140}
3141
3142
3143/**
3144 * Fetches the next opcode word, zero extending it to a double word.
3145 *
3146 * @returns Strict VBox status code.
3147 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3148 * @param pu32 Where to return the opcode double word.
3149 */
3150DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
3151{
3152 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3153 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
3154 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
3155
3156 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
3157 pVCpu->iem.s.offOpcode = offOpcode + 2;
3158 return VINF_SUCCESS;
3159}
3160
3161#endif /* !IEM_WITH_SETJMP */
3162
3163
3164/**
3165 * Fetches the next opcode word and zero extends it to a double word, returns
3166 * automatically on failure.
3167 *
3168 * @param a_pu32 Where to return the opcode double word.
3169 * @remark Implicitly references pVCpu.
3170 */
3171#ifndef IEM_WITH_SETJMP
3172# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
3173 do \
3174 { \
3175 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
3176 if (rcStrict2 != VINF_SUCCESS) \
3177 return rcStrict2; \
3178 } while (0)
3179#else
3180# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
3181#endif
3182
3183#ifndef IEM_WITH_SETJMP
3184
3185/**
3186 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
3187 *
3188 * @returns Strict VBox status code.
3189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3190 * @param pu64 Where to return the opcode quad word.
3191 */
3192DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3193{
3194 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
3195 if (rcStrict == VINF_SUCCESS)
3196 {
3197 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3198 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
3199 pVCpu->iem.s.offOpcode = offOpcode + 2;
3200 }
3201 else
3202 *pu64 = 0;
3203 return rcStrict;
3204}
3205
3206
3207/**
3208 * Fetches the next opcode word, zero extending it to a quad word.
3209 *
3210 * @returns Strict VBox status code.
3211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3212 * @param pu64 Where to return the opcode quad word.
3213 */
3214DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3215{
3216 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3217 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
3218 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
3219
3220 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
3221 pVCpu->iem.s.offOpcode = offOpcode + 2;
3222 return VINF_SUCCESS;
3223}
3224
3225#endif /* !IEM_WITH_SETJMP */
3226
3227/**
3228 * Fetches the next opcode word and zero extends it to a quad word, returns
3229 * automatically on failure.
3230 *
3231 * @param a_pu64 Where to return the opcode quad word.
3232 * @remark Implicitly references pVCpu.
3233 */
3234#ifndef IEM_WITH_SETJMP
3235# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
3236 do \
3237 { \
3238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
3239 if (rcStrict2 != VINF_SUCCESS) \
3240 return rcStrict2; \
3241 } while (0)
3242#else
3243# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
3244#endif
3245
3246
3247#ifndef IEM_WITH_SETJMP
3248/**
3249 * Fetches the next signed word from the opcode stream.
3250 *
3251 * @returns Strict VBox status code.
3252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3253 * @param pi16 Where to return the signed word.
3254 */
3255DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
3256{
3257 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
3258}
3259#endif /* !IEM_WITH_SETJMP */
3260
3261
3262/**
3263 * Fetches the next signed word from the opcode stream, returning automatically
3264 * on failure.
3265 *
3266 * @param a_pi16 Where to return the signed word.
3267 * @remark Implicitly references pVCpu.
3268 */
3269#ifndef IEM_WITH_SETJMP
3270# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
3271 do \
3272 { \
3273 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
3274 if (rcStrict2 != VINF_SUCCESS) \
3275 return rcStrict2; \
3276 } while (0)
3277#else
3278# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
3279#endif
3280
3281#ifndef IEM_WITH_SETJMP
3282
3283/**
3284 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
3285 *
3286 * @returns Strict VBox status code.
3287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3288 * @param pu32 Where to return the opcode dword.
3289 */
3290DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
3291{
3292 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3293 if (rcStrict == VINF_SUCCESS)
3294 {
3295 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3296# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3297 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3298# else
3299 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3300 pVCpu->iem.s.abOpcode[offOpcode + 1],
3301 pVCpu->iem.s.abOpcode[offOpcode + 2],
3302 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3303# endif
3304 pVCpu->iem.s.offOpcode = offOpcode + 4;
3305 }
3306 else
3307 *pu32 = 0;
3308 return rcStrict;
3309}
3310
3311
3312/**
3313 * Fetches the next opcode dword.
3314 *
3315 * @returns Strict VBox status code.
3316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3317 * @param pu32 Where to return the opcode double word.
3318 */
3319DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
3320{
3321 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3322 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
3323 {
3324 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
3325# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3326 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3327# else
3328 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3329 pVCpu->iem.s.abOpcode[offOpcode + 1],
3330 pVCpu->iem.s.abOpcode[offOpcode + 2],
3331 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3332# endif
3333 return VINF_SUCCESS;
3334 }
3335 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
3336}
3337
3338#else /* !IEM_WITH_SETJMP */
3339
3340/**
3341 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
3342 *
3343 * @returns The opcode dword.
3344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3345 */
3346DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
3347{
3348# ifdef IEM_WITH_CODE_TLB
3349 uint32_t u32;
3350 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
3351 return u32;
3352# else
3353 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3354 if (rcStrict == VINF_SUCCESS)
3355 {
3356 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3357 pVCpu->iem.s.offOpcode = offOpcode + 4;
3358# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3359 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3360# else
3361 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3362 pVCpu->iem.s.abOpcode[offOpcode + 1],
3363 pVCpu->iem.s.abOpcode[offOpcode + 2],
3364 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3365# endif
3366 }
3367 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3368# endif
3369}
3370
3371
3372/**
3373 * Fetches the next opcode dword, longjmp on error.
3374 *
3375 * @returns The opcode dword.
3376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3377 */
3378DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
3379{
3380# ifdef IEM_WITH_CODE_TLB
3381 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3382 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3383 if (RT_LIKELY( pbBuf != NULL
3384 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
3385 {
3386 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
3387# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3388 return *(uint32_t const *)&pbBuf[offBuf];
3389# else
3390 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
3391 pbBuf[offBuf + 1],
3392 pbBuf[offBuf + 2],
3393 pbBuf[offBuf + 3]);
3394# endif
3395 }
3396# else
3397 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3398 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
3399 {
3400 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
3401# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3402 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3403# else
3404 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3405 pVCpu->iem.s.abOpcode[offOpcode + 1],
3406 pVCpu->iem.s.abOpcode[offOpcode + 2],
3407 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3408# endif
3409 }
3410# endif
3411 return iemOpcodeGetNextU32SlowJmp(pVCpu);
3412}
3413
3414#endif /* !IEM_WITH_SETJMP */
3415
3416
3417/**
3418 * Fetches the next opcode dword, returns automatically on failure.
3419 *
3420 * @param a_pu32 Where to return the opcode dword.
3421 * @remark Implicitly references pVCpu.
3422 */
3423#ifndef IEM_WITH_SETJMP
3424# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
3425 do \
3426 { \
3427 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
3428 if (rcStrict2 != VINF_SUCCESS) \
3429 return rcStrict2; \
3430 } while (0)
3431#else
3432# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
3433#endif
3434
3435#ifndef IEM_WITH_SETJMP
3436
3437/**
3438 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3439 *
3440 * @returns Strict VBox status code.
3441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3442 * @param pu64 Where to return the opcode dword.
3443 */
3444DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3445{
3446 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3447 if (rcStrict == VINF_SUCCESS)
3448 {
3449 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3450 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3451 pVCpu->iem.s.abOpcode[offOpcode + 1],
3452 pVCpu->iem.s.abOpcode[offOpcode + 2],
3453 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3454 pVCpu->iem.s.offOpcode = offOpcode + 4;
3455 }
3456 else
3457 *pu64 = 0;
3458 return rcStrict;
3459}
3460
3461
3462/**
3463 * Fetches the next opcode dword, zero extending it to a quad word.
3464 *
3465 * @returns Strict VBox status code.
3466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3467 * @param pu64 Where to return the opcode quad word.
3468 */
3469DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3470{
3471 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3472 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3473 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3474
3475 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3476 pVCpu->iem.s.abOpcode[offOpcode + 1],
3477 pVCpu->iem.s.abOpcode[offOpcode + 2],
3478 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3479 pVCpu->iem.s.offOpcode = offOpcode + 4;
3480 return VINF_SUCCESS;
3481}
3482
3483#endif /* !IEM_WITH_SETJMP */
3484
3485
3486/**
3487 * Fetches the next opcode dword and zero extends it to a quad word, returns
3488 * automatically on failure.
3489 *
3490 * @param a_pu64 Where to return the opcode quad word.
3491 * @remark Implicitly references pVCpu.
3492 */
3493#ifndef IEM_WITH_SETJMP
3494# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3495 do \
3496 { \
3497 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3498 if (rcStrict2 != VINF_SUCCESS) \
3499 return rcStrict2; \
3500 } while (0)
3501#else
3502# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3503#endif
3504
3505
3506#ifndef IEM_WITH_SETJMP
3507/**
3508 * Fetches the next signed double word from the opcode stream.
3509 *
3510 * @returns Strict VBox status code.
3511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3512 * @param pi32 Where to return the signed double word.
3513 */
3514DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
3515{
3516 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3517}
3518#endif
3519
3520/**
3521 * Fetches the next signed double word from the opcode stream, returning
3522 * automatically on failure.
3523 *
3524 * @param a_pi32 Where to return the signed double word.
3525 * @remark Implicitly references pVCpu.
3526 */
3527#ifndef IEM_WITH_SETJMP
3528# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3529 do \
3530 { \
3531 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3532 if (rcStrict2 != VINF_SUCCESS) \
3533 return rcStrict2; \
3534 } while (0)
3535#else
3536# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3537#endif
3538
3539#ifndef IEM_WITH_SETJMP
3540
3541/**
3542 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3543 *
3544 * @returns Strict VBox status code.
3545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3546 * @param pu64 Where to return the opcode qword.
3547 */
3548DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3549{
3550 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3551 if (rcStrict == VINF_SUCCESS)
3552 {
3553 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3554 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3555 pVCpu->iem.s.abOpcode[offOpcode + 1],
3556 pVCpu->iem.s.abOpcode[offOpcode + 2],
3557 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3558 pVCpu->iem.s.offOpcode = offOpcode + 4;
3559 }
3560 else
3561 *pu64 = 0;
3562 return rcStrict;
3563}
3564
3565
3566/**
3567 * Fetches the next opcode dword, sign extending it into a quad word.
3568 *
3569 * @returns Strict VBox status code.
3570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3571 * @param pu64 Where to return the opcode quad word.
3572 */
3573DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3574{
3575 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3576 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3577 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3578
3579 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3580 pVCpu->iem.s.abOpcode[offOpcode + 1],
3581 pVCpu->iem.s.abOpcode[offOpcode + 2],
3582 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3583 *pu64 = i32;
3584 pVCpu->iem.s.offOpcode = offOpcode + 4;
3585 return VINF_SUCCESS;
3586}
3587
3588#endif /* !IEM_WITH_SETJMP */
3589
3590
3591/**
3592 * Fetches the next opcode double word and sign extends it to a quad word,
3593 * returns automatically on failure.
3594 *
3595 * @param a_pu64 Where to return the opcode quad word.
3596 * @remark Implicitly references pVCpu.
3597 */
3598#ifndef IEM_WITH_SETJMP
3599# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3600 do \
3601 { \
3602 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3603 if (rcStrict2 != VINF_SUCCESS) \
3604 return rcStrict2; \
3605 } while (0)
3606#else
3607# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3608#endif
3609
3610#ifndef IEM_WITH_SETJMP
3611
3612/**
3613 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3614 *
3615 * @returns Strict VBox status code.
3616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3617 * @param pu64 Where to return the opcode qword.
3618 */
3619DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3620{
3621 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3622 if (rcStrict == VINF_SUCCESS)
3623 {
3624 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3625# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3626 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3627# else
3628 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3629 pVCpu->iem.s.abOpcode[offOpcode + 1],
3630 pVCpu->iem.s.abOpcode[offOpcode + 2],
3631 pVCpu->iem.s.abOpcode[offOpcode + 3],
3632 pVCpu->iem.s.abOpcode[offOpcode + 4],
3633 pVCpu->iem.s.abOpcode[offOpcode + 5],
3634 pVCpu->iem.s.abOpcode[offOpcode + 6],
3635 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3636# endif
3637 pVCpu->iem.s.offOpcode = offOpcode + 8;
3638 }
3639 else
3640 *pu64 = 0;
3641 return rcStrict;
3642}
3643
3644
3645/**
3646 * Fetches the next opcode qword.
3647 *
3648 * @returns Strict VBox status code.
3649 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3650 * @param pu64 Where to return the opcode qword.
3651 */
3652DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3653{
3654 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3655 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3656 {
3657# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3658 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3659# else
3660 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3661 pVCpu->iem.s.abOpcode[offOpcode + 1],
3662 pVCpu->iem.s.abOpcode[offOpcode + 2],
3663 pVCpu->iem.s.abOpcode[offOpcode + 3],
3664 pVCpu->iem.s.abOpcode[offOpcode + 4],
3665 pVCpu->iem.s.abOpcode[offOpcode + 5],
3666 pVCpu->iem.s.abOpcode[offOpcode + 6],
3667 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3668# endif
3669 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3670 return VINF_SUCCESS;
3671 }
3672 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3673}
3674
3675#else /* IEM_WITH_SETJMP */
3676
3677/**
3678 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3679 *
3680 * @returns The opcode qword.
3681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3682 */
3683DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3684{
3685# ifdef IEM_WITH_CODE_TLB
3686 uint64_t u64;
3687 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3688 return u64;
3689# else
3690 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3691 if (rcStrict == VINF_SUCCESS)
3692 {
3693 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3694 pVCpu->iem.s.offOpcode = offOpcode + 8;
3695# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3696 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3697# else
3698 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3699 pVCpu->iem.s.abOpcode[offOpcode + 1],
3700 pVCpu->iem.s.abOpcode[offOpcode + 2],
3701 pVCpu->iem.s.abOpcode[offOpcode + 3],
3702 pVCpu->iem.s.abOpcode[offOpcode + 4],
3703 pVCpu->iem.s.abOpcode[offOpcode + 5],
3704 pVCpu->iem.s.abOpcode[offOpcode + 6],
3705 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3706# endif
3707 }
3708 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3709# endif
3710}
3711
3712
3713/**
3714 * Fetches the next opcode qword, longjmp on error.
3715 *
3716 * @returns The opcode qword.
3717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3718 */
3719DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3720{
3721# ifdef IEM_WITH_CODE_TLB
3722 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3723 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3724 if (RT_LIKELY( pbBuf != NULL
3725 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3726 {
3727 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3728# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3729 return *(uint64_t const *)&pbBuf[offBuf];
3730# else
3731 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3732 pbBuf[offBuf + 1],
3733 pbBuf[offBuf + 2],
3734 pbBuf[offBuf + 3],
3735 pbBuf[offBuf + 4],
3736 pbBuf[offBuf + 5],
3737 pbBuf[offBuf + 6],
3738 pbBuf[offBuf + 7]);
3739# endif
3740 }
3741# else
3742 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3743 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3744 {
3745 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3746# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3747 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3748# else
3749 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3750 pVCpu->iem.s.abOpcode[offOpcode + 1],
3751 pVCpu->iem.s.abOpcode[offOpcode + 2],
3752 pVCpu->iem.s.abOpcode[offOpcode + 3],
3753 pVCpu->iem.s.abOpcode[offOpcode + 4],
3754 pVCpu->iem.s.abOpcode[offOpcode + 5],
3755 pVCpu->iem.s.abOpcode[offOpcode + 6],
3756 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3757# endif
3758 }
3759# endif
3760 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3761}
3762
3763#endif /* IEM_WITH_SETJMP */
3764
3765/**
3766 * Fetches the next opcode quad word, returns automatically on failure.
3767 *
3768 * @param a_pu64 Where to return the opcode quad word.
3769 * @remark Implicitly references pVCpu.
3770 */
3771#ifndef IEM_WITH_SETJMP
3772# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3773 do \
3774 { \
3775 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3776 if (rcStrict2 != VINF_SUCCESS) \
3777 return rcStrict2; \
3778 } while (0)
3779#else
3780# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3781#endif
3782
3783
3784/** @name Misc Worker Functions.
3785 * @{
3786 */
3787
3788/**
3789 * Gets the exception class for the specified exception vector.
3790 *
3791 * @returns The class of the specified exception.
3792 * @param uVector The exception vector.
3793 */
3794IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3795{
3796 Assert(uVector <= X86_XCPT_LAST);
3797 switch (uVector)
3798 {
3799 case X86_XCPT_DE:
3800 case X86_XCPT_TS:
3801 case X86_XCPT_NP:
3802 case X86_XCPT_SS:
3803 case X86_XCPT_GP:
3804 case X86_XCPT_SX: /* AMD only */
3805 return IEMXCPTCLASS_CONTRIBUTORY;
3806
3807 case X86_XCPT_PF:
3808 case X86_XCPT_VE: /* Intel only */
3809 return IEMXCPTCLASS_PAGE_FAULT;
3810
3811 case X86_XCPT_DF:
3812 return IEMXCPTCLASS_DOUBLE_FAULT;
3813 }
3814 return IEMXCPTCLASS_BENIGN;
3815}
3816
3817
3818/**
3819 * Evaluates how to handle an exception caused during delivery of another event
3820 * (exception / interrupt).
3821 *
3822 * @returns How to handle the recursive exception.
3823 * @param pVCpu The cross context virtual CPU structure of the
3824 * calling thread.
3825 * @param fPrevFlags The flags of the previous event.
3826 * @param uPrevVector The vector of the previous event.
3827 * @param fCurFlags The flags of the current exception.
3828 * @param uCurVector The vector of the current exception.
3829 * @param pfXcptRaiseInfo Where to store additional information about the
3830 * exception condition. Optional.
3831 */
3832VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3833 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3834{
3835 /*
3836 * Only CPU exceptions can be raised while delivering other events, software interrupt
3837 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3838 */
3839 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3840 Assert(pVCpu); RT_NOREF(pVCpu);
3841 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3842
3843 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3844 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3845 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3846 {
3847 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3848 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3849 {
3850 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3851 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3852 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3853 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3854 {
3855 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3856 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3857 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3858 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3859 uCurVector, pVCpu->cpum.GstCtx.cr2));
3860 }
3861 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3862 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3863 {
3864 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3865 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3866 }
3867 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3868 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3869 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3870 {
3871 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3872 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3873 }
3874 }
3875 else
3876 {
3877 if (uPrevVector == X86_XCPT_NMI)
3878 {
3879 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3880 if (uCurVector == X86_XCPT_PF)
3881 {
3882 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3883 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3884 }
3885 }
3886 else if ( uPrevVector == X86_XCPT_AC
3887 && uCurVector == X86_XCPT_AC)
3888 {
3889 enmRaise = IEMXCPTRAISE_CPU_HANG;
3890 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3891 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3892 }
3893 }
3894 }
3895 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3896 {
3897 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3898 if (uCurVector == X86_XCPT_PF)
3899 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3900 }
3901 else
3902 {
3903 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3904 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3905 }
3906
3907 if (pfXcptRaiseInfo)
3908 *pfXcptRaiseInfo = fRaiseInfo;
3909 return enmRaise;
3910}
3911
3912
3913/**
3914 * Enters the CPU shutdown state initiated by a triple fault or other
3915 * unrecoverable conditions.
3916 *
3917 * @returns Strict VBox status code.
3918 * @param pVCpu The cross context virtual CPU structure of the
3919 * calling thread.
3920 */
3921IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3922{
3923 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3924 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3925
3926 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3927 {
3928 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3929 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3930 }
3931
3932 RT_NOREF(pVCpu);
3933 return VINF_EM_TRIPLE_FAULT;
3934}
3935
3936
3937/**
3938 * Validates a new SS segment.
3939 *
3940 * @returns VBox strict status code.
3941 * @param pVCpu The cross context virtual CPU structure of the
3942 * calling thread.
3943 * @param NewSS The new SS selctor.
3944 * @param uCpl The CPL to load the stack for.
3945 * @param pDesc Where to return the descriptor.
3946 */
3947IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3948{
3949 /* Null selectors are not allowed (we're not called for dispatching
3950 interrupts with SS=0 in long mode). */
3951 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3952 {
3953 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3954 return iemRaiseTaskSwitchFault0(pVCpu);
3955 }
3956
3957 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3958 if ((NewSS & X86_SEL_RPL) != uCpl)
3959 {
3960 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3961 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3962 }
3963
3964 /*
3965 * Read the descriptor.
3966 */
3967 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3968 if (rcStrict != VINF_SUCCESS)
3969 return rcStrict;
3970
3971 /*
3972 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3973 */
3974 if (!pDesc->Legacy.Gen.u1DescType)
3975 {
3976 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3977 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3978 }
3979
3980 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3981 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3982 {
3983 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3984 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3985 }
3986 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3987 {
3988 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3989 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3990 }
3991
3992 /* Is it there? */
3993 /** @todo testcase: Is this checked before the canonical / limit check below? */
3994 if (!pDesc->Legacy.Gen.u1Present)
3995 {
3996 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3997 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3998 }
3999
4000 return VINF_SUCCESS;
4001}
4002
4003
4004/**
4005 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
4006 * not (kind of obsolete now).
4007 *
4008 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4009 */
4010#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
4011
4012/**
4013 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
4014 *
4015 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4016 * @param a_fEfl The new EFLAGS.
4017 */
4018#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
4019
4020/** @} */
4021
4022
4023/** @name Raising Exceptions.
4024 *
4025 * @{
4026 */
4027
4028
4029/**
4030 * Loads the specified stack far pointer from the TSS.
4031 *
4032 * @returns VBox strict status code.
4033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4034 * @param uCpl The CPL to load the stack for.
4035 * @param pSelSS Where to return the new stack segment.
4036 * @param puEsp Where to return the new stack pointer.
4037 */
4038IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
4039{
4040 VBOXSTRICTRC rcStrict;
4041 Assert(uCpl < 4);
4042
4043 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
4044 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
4045 {
4046 /*
4047 * 16-bit TSS (X86TSS16).
4048 */
4049 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
4050 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4051 {
4052 uint32_t off = uCpl * 4 + 2;
4053 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
4054 {
4055 /** @todo check actual access pattern here. */
4056 uint32_t u32Tmp = 0; /* gcc maybe... */
4057 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
4058 if (rcStrict == VINF_SUCCESS)
4059 {
4060 *puEsp = RT_LOWORD(u32Tmp);
4061 *pSelSS = RT_HIWORD(u32Tmp);
4062 return VINF_SUCCESS;
4063 }
4064 }
4065 else
4066 {
4067 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
4068 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
4069 }
4070 break;
4071 }
4072
4073 /*
4074 * 32-bit TSS (X86TSS32).
4075 */
4076 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
4077 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4078 {
4079 uint32_t off = uCpl * 8 + 4;
4080 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
4081 {
4082/** @todo check actual access pattern here. */
4083 uint64_t u64Tmp;
4084 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
4085 if (rcStrict == VINF_SUCCESS)
4086 {
4087 *puEsp = u64Tmp & UINT32_MAX;
4088 *pSelSS = (RTSEL)(u64Tmp >> 32);
4089 return VINF_SUCCESS;
4090 }
4091 }
4092 else
4093 {
4094 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
4095 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
4096 }
4097 break;
4098 }
4099
4100 default:
4101 AssertFailed();
4102 rcStrict = VERR_IEM_IPE_4;
4103 break;
4104 }
4105
4106 *puEsp = 0; /* make gcc happy */
4107 *pSelSS = 0; /* make gcc happy */
4108 return rcStrict;
4109}
4110
4111
4112/**
4113 * Loads the specified stack pointer from the 64-bit TSS.
4114 *
4115 * @returns VBox strict status code.
4116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4117 * @param uCpl The CPL to load the stack for.
4118 * @param uIst The interrupt stack table index, 0 if to use uCpl.
4119 * @param puRsp Where to return the new stack pointer.
4120 */
4121IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
4122{
4123 Assert(uCpl < 4);
4124 Assert(uIst < 8);
4125 *puRsp = 0; /* make gcc happy */
4126
4127 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
4128 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
4129
4130 uint32_t off;
4131 if (uIst)
4132 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
4133 else
4134 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
4135 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
4136 {
4137 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
4138 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
4139 }
4140
4141 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
4142}
4143
4144
4145/**
4146 * Adjust the CPU state according to the exception being raised.
4147 *
4148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4149 * @param u8Vector The exception that has been raised.
4150 */
4151DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
4152{
4153 switch (u8Vector)
4154 {
4155 case X86_XCPT_DB:
4156 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
4157 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
4158 break;
4159 /** @todo Read the AMD and Intel exception reference... */
4160 }
4161}
4162
4163
4164/**
4165 * Implements exceptions and interrupts for real mode.
4166 *
4167 * @returns VBox strict status code.
4168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4169 * @param cbInstr The number of bytes to offset rIP by in the return
4170 * address.
4171 * @param u8Vector The interrupt / exception vector number.
4172 * @param fFlags The flags.
4173 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4174 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4175 */
4176IEM_STATIC VBOXSTRICTRC
4177iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
4178 uint8_t cbInstr,
4179 uint8_t u8Vector,
4180 uint32_t fFlags,
4181 uint16_t uErr,
4182 uint64_t uCr2)
4183{
4184 NOREF(uErr); NOREF(uCr2);
4185 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4186
4187 /*
4188 * Read the IDT entry.
4189 */
4190 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
4191 {
4192 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4193 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4194 }
4195 RTFAR16 Idte;
4196 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
4197 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4198 {
4199 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4200 return rcStrict;
4201 }
4202
4203 /*
4204 * Push the stack frame.
4205 */
4206 uint16_t *pu16Frame;
4207 uint64_t uNewRsp;
4208 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
4209 if (rcStrict != VINF_SUCCESS)
4210 return rcStrict;
4211
4212 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4213#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
4214 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
4215 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
4216 fEfl |= UINT16_C(0xf000);
4217#endif
4218 pu16Frame[2] = (uint16_t)fEfl;
4219 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
4220 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4221 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
4222 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4223 return rcStrict;
4224
4225 /*
4226 * Load the vector address into cs:ip and make exception specific state
4227 * adjustments.
4228 */
4229 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
4230 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
4231 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4232 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
4233 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
4234 pVCpu->cpum.GstCtx.rip = Idte.off;
4235 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
4236 IEMMISC_SET_EFL(pVCpu, fEfl);
4237
4238 /** @todo do we actually do this in real mode? */
4239 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4240 iemRaiseXcptAdjustState(pVCpu, u8Vector);
4241
4242 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4243}
4244
4245
4246/**
4247 * Loads a NULL data selector into when coming from V8086 mode.
4248 *
4249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4250 * @param pSReg Pointer to the segment register.
4251 */
4252IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
4253{
4254 pSReg->Sel = 0;
4255 pSReg->ValidSel = 0;
4256 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4257 {
4258 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
4259 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
4260 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
4261 }
4262 else
4263 {
4264 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
4265 /** @todo check this on AMD-V */
4266 pSReg->u64Base = 0;
4267 pSReg->u32Limit = 0;
4268 }
4269}
4270
4271
4272/**
4273 * Loads a segment selector during a task switch in V8086 mode.
4274 *
4275 * @param pSReg Pointer to the segment register.
4276 * @param uSel The selector value to load.
4277 */
4278IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
4279{
4280 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
4281 pSReg->Sel = uSel;
4282 pSReg->ValidSel = uSel;
4283 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
4284 pSReg->u64Base = uSel << 4;
4285 pSReg->u32Limit = 0xffff;
4286 pSReg->Attr.u = 0xf3;
4287}
4288
4289
4290/**
4291 * Loads a NULL data selector into a selector register, both the hidden and
4292 * visible parts, in protected mode.
4293 *
4294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4295 * @param pSReg Pointer to the segment register.
4296 * @param uRpl The RPL.
4297 */
4298IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
4299{
4300 /** @todo Testcase: write a testcase checking what happends when loading a NULL
4301 * data selector in protected mode. */
4302 pSReg->Sel = uRpl;
4303 pSReg->ValidSel = uRpl;
4304 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
4305 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4306 {
4307 /* VT-x (Intel 3960x) observed doing something like this. */
4308 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
4309 pSReg->u32Limit = UINT32_MAX;
4310 pSReg->u64Base = 0;
4311 }
4312 else
4313 {
4314 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
4315 pSReg->u32Limit = 0;
4316 pSReg->u64Base = 0;
4317 }
4318}
4319
4320
4321/**
4322 * Loads a segment selector during a task switch in protected mode.
4323 *
4324 * In this task switch scenario, we would throw \#TS exceptions rather than
4325 * \#GPs.
4326 *
4327 * @returns VBox strict status code.
4328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4329 * @param pSReg Pointer to the segment register.
4330 * @param uSel The new selector value.
4331 *
4332 * @remarks This does _not_ handle CS or SS.
4333 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
4334 */
4335IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
4336{
4337 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4338
4339 /* Null data selector. */
4340 if (!(uSel & X86_SEL_MASK_OFF_RPL))
4341 {
4342 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
4343 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
4344 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4345 return VINF_SUCCESS;
4346 }
4347
4348 /* Fetch the descriptor. */
4349 IEMSELDESC Desc;
4350 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
4351 if (rcStrict != VINF_SUCCESS)
4352 {
4353 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
4354 VBOXSTRICTRC_VAL(rcStrict)));
4355 return rcStrict;
4356 }
4357
4358 /* Must be a data segment or readable code segment. */
4359 if ( !Desc.Legacy.Gen.u1DescType
4360 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
4361 {
4362 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
4363 Desc.Legacy.Gen.u4Type));
4364 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
4365 }
4366
4367 /* Check privileges for data segments and non-conforming code segments. */
4368 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4369 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
4370 {
4371 /* The RPL and the new CPL must be less than or equal to the DPL. */
4372 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
4373 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
4374 {
4375 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
4376 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4377 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
4378 }
4379 }
4380
4381 /* Is it there? */
4382 if (!Desc.Legacy.Gen.u1Present)
4383 {
4384 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
4385 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
4386 }
4387
4388 /* The base and limit. */
4389 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
4390 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
4391
4392 /*
4393 * Ok, everything checked out fine. Now set the accessed bit before
4394 * committing the result into the registers.
4395 */
4396 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4397 {
4398 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
4399 if (rcStrict != VINF_SUCCESS)
4400 return rcStrict;
4401 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4402 }
4403
4404 /* Commit */
4405 pSReg->Sel = uSel;
4406 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
4407 pSReg->u32Limit = cbLimit;
4408 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
4409 pSReg->ValidSel = uSel;
4410 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
4411 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4412 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
4413
4414 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
4415 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4416 return VINF_SUCCESS;
4417}
4418
4419
4420/**
4421 * Performs a task switch.
4422 *
4423 * If the task switch is the result of a JMP, CALL or IRET instruction, the
4424 * caller is responsible for performing the necessary checks (like DPL, TSS
4425 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
4426 * reference for JMP, CALL, IRET.
4427 *
4428 * If the task switch is the due to a software interrupt or hardware exception,
4429 * the caller is responsible for validating the TSS selector and descriptor. See
4430 * Intel Instruction reference for INT n.
4431 *
4432 * @returns VBox strict status code.
4433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4434 * @param enmTaskSwitch The cause of the task switch.
4435 * @param uNextEip The EIP effective after the task switch.
4436 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4437 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4438 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4439 * @param SelTSS The TSS selector of the new task.
4440 * @param pNewDescTSS Pointer to the new TSS descriptor.
4441 */
4442IEM_STATIC VBOXSTRICTRC
4443iemTaskSwitch(PVMCPUCC pVCpu,
4444 IEMTASKSWITCH enmTaskSwitch,
4445 uint32_t uNextEip,
4446 uint32_t fFlags,
4447 uint16_t uErr,
4448 uint64_t uCr2,
4449 RTSEL SelTSS,
4450 PIEMSELDESC pNewDescTSS)
4451{
4452 Assert(!IEM_IS_REAL_MODE(pVCpu));
4453 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4454 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4455
4456 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4457 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4458 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4459 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4460 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4461
4462 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4463 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4464
4465 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4466 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4467
4468 /* Update CR2 in case it's a page-fault. */
4469 /** @todo This should probably be done much earlier in IEM/PGM. See
4470 * @bugref{5653#c49}. */
4471 if (fFlags & IEM_XCPT_FLAGS_CR2)
4472 pVCpu->cpum.GstCtx.cr2 = uCr2;
4473
4474 /*
4475 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4476 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4477 */
4478 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4479 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4480 if (uNewTSSLimit < uNewTSSLimitMin)
4481 {
4482 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4483 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4484 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4485 }
4486
4487 /*
4488 * Task switches in VMX non-root mode always cause task switches.
4489 * The new TSS must have been read and validated (DPL, limits etc.) before a
4490 * task-switch VM-exit commences.
4491 *
4492 * See Intel spec. 25.4.2 "Treatment of Task Switches".
4493 */
4494 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4495 {
4496 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4497 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4498 }
4499
4500 /*
4501 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4502 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4503 */
4504 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4505 {
4506 uint32_t const uExitInfo1 = SelTSS;
4507 uint32_t uExitInfo2 = uErr;
4508 switch (enmTaskSwitch)
4509 {
4510 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4511 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4512 default: break;
4513 }
4514 if (fFlags & IEM_XCPT_FLAGS_ERR)
4515 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4516 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4517 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4518
4519 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4520 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4521 RT_NOREF2(uExitInfo1, uExitInfo2);
4522 }
4523
4524 /*
4525 * Check the current TSS limit. The last written byte to the current TSS during the
4526 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4527 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4528 *
4529 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4530 * end up with smaller than "legal" TSS limits.
4531 */
4532 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4533 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4534 if (uCurTSSLimit < uCurTSSLimitMin)
4535 {
4536 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4537 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4538 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4539 }
4540
4541 /*
4542 * Verify that the new TSS can be accessed and map it. Map only the required contents
4543 * and not the entire TSS.
4544 */
4545 void *pvNewTSS;
4546 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
4547 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4548 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4549 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4550 * not perform correct translation if this happens. See Intel spec. 7.2.1
4551 * "Task-State Segment". */
4552 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4553 if (rcStrict != VINF_SUCCESS)
4554 {
4555 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4556 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4557 return rcStrict;
4558 }
4559
4560 /*
4561 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4562 */
4563 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4564 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4565 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4566 {
4567 PX86DESC pDescCurTSS;
4568 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4569 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4570 if (rcStrict != VINF_SUCCESS)
4571 {
4572 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4573 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4574 return rcStrict;
4575 }
4576
4577 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4578 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4579 if (rcStrict != VINF_SUCCESS)
4580 {
4581 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4582 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4583 return rcStrict;
4584 }
4585
4586 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4587 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4588 {
4589 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4590 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4591 u32EFlags &= ~X86_EFL_NT;
4592 }
4593 }
4594
4595 /*
4596 * Save the CPU state into the current TSS.
4597 */
4598 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4599 if (GCPtrNewTSS == GCPtrCurTSS)
4600 {
4601 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4602 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4603 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4604 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4605 pVCpu->cpum.GstCtx.ldtr.Sel));
4606 }
4607 if (fIsNewTSS386)
4608 {
4609 /*
4610 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4611 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4612 */
4613 void *pvCurTSS32;
4614 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4615 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4616 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4617 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4618 if (rcStrict != VINF_SUCCESS)
4619 {
4620 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4621 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4622 return rcStrict;
4623 }
4624
4625 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4626 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4627 pCurTSS32->eip = uNextEip;
4628 pCurTSS32->eflags = u32EFlags;
4629 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4630 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4631 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4632 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4633 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4634 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4635 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4636 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4637 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4638 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4639 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4640 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4641 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4642 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4643
4644 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4645 if (rcStrict != VINF_SUCCESS)
4646 {
4647 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4648 VBOXSTRICTRC_VAL(rcStrict)));
4649 return rcStrict;
4650 }
4651 }
4652 else
4653 {
4654 /*
4655 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4656 */
4657 void *pvCurTSS16;
4658 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4659 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4660 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4661 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4662 if (rcStrict != VINF_SUCCESS)
4663 {
4664 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4665 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4666 return rcStrict;
4667 }
4668
4669 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4670 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4671 pCurTSS16->ip = uNextEip;
4672 pCurTSS16->flags = u32EFlags;
4673 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4674 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4675 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4676 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4677 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4678 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4679 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4680 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4681 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4682 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4683 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4684 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4685
4686 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4687 if (rcStrict != VINF_SUCCESS)
4688 {
4689 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4690 VBOXSTRICTRC_VAL(rcStrict)));
4691 return rcStrict;
4692 }
4693 }
4694
4695 /*
4696 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4697 */
4698 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4699 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4700 {
4701 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4702 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4703 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4704 }
4705
4706 /*
4707 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4708 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4709 */
4710 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4711 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4712 bool fNewDebugTrap;
4713 if (fIsNewTSS386)
4714 {
4715 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
4716 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4717 uNewEip = pNewTSS32->eip;
4718 uNewEflags = pNewTSS32->eflags;
4719 uNewEax = pNewTSS32->eax;
4720 uNewEcx = pNewTSS32->ecx;
4721 uNewEdx = pNewTSS32->edx;
4722 uNewEbx = pNewTSS32->ebx;
4723 uNewEsp = pNewTSS32->esp;
4724 uNewEbp = pNewTSS32->ebp;
4725 uNewEsi = pNewTSS32->esi;
4726 uNewEdi = pNewTSS32->edi;
4727 uNewES = pNewTSS32->es;
4728 uNewCS = pNewTSS32->cs;
4729 uNewSS = pNewTSS32->ss;
4730 uNewDS = pNewTSS32->ds;
4731 uNewFS = pNewTSS32->fs;
4732 uNewGS = pNewTSS32->gs;
4733 uNewLdt = pNewTSS32->selLdt;
4734 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4735 }
4736 else
4737 {
4738 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
4739 uNewCr3 = 0;
4740 uNewEip = pNewTSS16->ip;
4741 uNewEflags = pNewTSS16->flags;
4742 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4743 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4744 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4745 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4746 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4747 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4748 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4749 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4750 uNewES = pNewTSS16->es;
4751 uNewCS = pNewTSS16->cs;
4752 uNewSS = pNewTSS16->ss;
4753 uNewDS = pNewTSS16->ds;
4754 uNewFS = 0;
4755 uNewGS = 0;
4756 uNewLdt = pNewTSS16->selLdt;
4757 fNewDebugTrap = false;
4758 }
4759
4760 if (GCPtrNewTSS == GCPtrCurTSS)
4761 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4762 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4763
4764 /*
4765 * We're done accessing the new TSS.
4766 */
4767 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4768 if (rcStrict != VINF_SUCCESS)
4769 {
4770 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4771 return rcStrict;
4772 }
4773
4774 /*
4775 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4776 */
4777 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4778 {
4779 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4780 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4781 if (rcStrict != VINF_SUCCESS)
4782 {
4783 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4784 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4785 return rcStrict;
4786 }
4787
4788 /* Check that the descriptor indicates the new TSS is available (not busy). */
4789 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4790 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4791 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4792
4793 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4794 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4795 if (rcStrict != VINF_SUCCESS)
4796 {
4797 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4798 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4799 return rcStrict;
4800 }
4801 }
4802
4803 /*
4804 * From this point on, we're technically in the new task. We will defer exceptions
4805 * until the completion of the task switch but before executing any instructions in the new task.
4806 */
4807 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4808 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4809 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4810 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4811 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4812 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4813 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4814
4815 /* Set the busy bit in TR. */
4816 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4817
4818 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4819 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4820 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4821 {
4822 uNewEflags |= X86_EFL_NT;
4823 }
4824
4825 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4826 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4827 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4828
4829 pVCpu->cpum.GstCtx.eip = uNewEip;
4830 pVCpu->cpum.GstCtx.eax = uNewEax;
4831 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4832 pVCpu->cpum.GstCtx.edx = uNewEdx;
4833 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4834 pVCpu->cpum.GstCtx.esp = uNewEsp;
4835 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4836 pVCpu->cpum.GstCtx.esi = uNewEsi;
4837 pVCpu->cpum.GstCtx.edi = uNewEdi;
4838
4839 uNewEflags &= X86_EFL_LIVE_MASK;
4840 uNewEflags |= X86_EFL_RA1_MASK;
4841 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4842
4843 /*
4844 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4845 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4846 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4847 */
4848 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4849 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4850
4851 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4852 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4853
4854 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4855 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4856
4857 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4858 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4859
4860 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4861 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4862
4863 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4864 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4865 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4866
4867 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4868 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4869 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4870 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4871
4872 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4873 {
4874 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4875 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4876 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4877 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4878 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4879 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4880 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4881 }
4882
4883 /*
4884 * Switch CR3 for the new task.
4885 */
4886 if ( fIsNewTSS386
4887 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4888 {
4889 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4890 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4891 AssertRCSuccessReturn(rc, rc);
4892
4893 /* Inform PGM. */
4894 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
4895 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4896 AssertRCReturn(rc, rc);
4897 /* ignore informational status codes */
4898
4899 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4900 }
4901
4902 /*
4903 * Switch LDTR for the new task.
4904 */
4905 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4906 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4907 else
4908 {
4909 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4910
4911 IEMSELDESC DescNewLdt;
4912 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4913 if (rcStrict != VINF_SUCCESS)
4914 {
4915 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4916 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4917 return rcStrict;
4918 }
4919 if ( !DescNewLdt.Legacy.Gen.u1Present
4920 || DescNewLdt.Legacy.Gen.u1DescType
4921 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4922 {
4923 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4924 uNewLdt, DescNewLdt.Legacy.u));
4925 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4926 }
4927
4928 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4929 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4930 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4931 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4932 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4933 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4934 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4935 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4936 }
4937
4938 IEMSELDESC DescSS;
4939 if (IEM_IS_V86_MODE(pVCpu))
4940 {
4941 pVCpu->iem.s.uCpl = 3;
4942 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4943 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4944 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4945 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4946 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4947 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4948
4949 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
4950 DescSS.Legacy.u = 0;
4951 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4952 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4953 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4954 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4955 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4956 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4957 DescSS.Legacy.Gen.u2Dpl = 3;
4958 }
4959 else
4960 {
4961 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
4962
4963 /*
4964 * Load the stack segment for the new task.
4965 */
4966 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4967 {
4968 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4969 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4970 }
4971
4972 /* Fetch the descriptor. */
4973 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4974 if (rcStrict != VINF_SUCCESS)
4975 {
4976 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4977 VBOXSTRICTRC_VAL(rcStrict)));
4978 return rcStrict;
4979 }
4980
4981 /* SS must be a data segment and writable. */
4982 if ( !DescSS.Legacy.Gen.u1DescType
4983 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4984 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4985 {
4986 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4987 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4988 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4989 }
4990
4991 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4992 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4993 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4994 {
4995 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4996 uNewCpl));
4997 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4998 }
4999
5000 /* Is it there? */
5001 if (!DescSS.Legacy.Gen.u1Present)
5002 {
5003 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
5004 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
5005 }
5006
5007 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
5008 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
5009
5010 /* Set the accessed bit before committing the result into SS. */
5011 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5012 {
5013 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
5014 if (rcStrict != VINF_SUCCESS)
5015 return rcStrict;
5016 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5017 }
5018
5019 /* Commit SS. */
5020 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
5021 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
5022 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5023 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
5024 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
5025 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5026 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
5027
5028 /* CPL has changed, update IEM before loading rest of segments. */
5029 pVCpu->iem.s.uCpl = uNewCpl;
5030
5031 /*
5032 * Load the data segments for the new task.
5033 */
5034 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
5035 if (rcStrict != VINF_SUCCESS)
5036 return rcStrict;
5037 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
5038 if (rcStrict != VINF_SUCCESS)
5039 return rcStrict;
5040 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
5041 if (rcStrict != VINF_SUCCESS)
5042 return rcStrict;
5043 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
5044 if (rcStrict != VINF_SUCCESS)
5045 return rcStrict;
5046
5047 /*
5048 * Load the code segment for the new task.
5049 */
5050 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
5051 {
5052 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
5053 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
5054 }
5055
5056 /* Fetch the descriptor. */
5057 IEMSELDESC DescCS;
5058 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
5059 if (rcStrict != VINF_SUCCESS)
5060 {
5061 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
5062 return rcStrict;
5063 }
5064
5065 /* CS must be a code segment. */
5066 if ( !DescCS.Legacy.Gen.u1DescType
5067 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
5068 {
5069 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
5070 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
5071 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
5072 }
5073
5074 /* For conforming CS, DPL must be less than or equal to the RPL. */
5075 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
5076 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
5077 {
5078 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
5079 DescCS.Legacy.Gen.u2Dpl));
5080 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
5081 }
5082
5083 /* For non-conforming CS, DPL must match RPL. */
5084 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
5085 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
5086 {
5087 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
5088 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
5089 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
5090 }
5091
5092 /* Is it there? */
5093 if (!DescCS.Legacy.Gen.u1Present)
5094 {
5095 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
5096 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
5097 }
5098
5099 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
5100 u64Base = X86DESC_BASE(&DescCS.Legacy);
5101
5102 /* Set the accessed bit before committing the result into CS. */
5103 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5104 {
5105 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
5106 if (rcStrict != VINF_SUCCESS)
5107 return rcStrict;
5108 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5109 }
5110
5111 /* Commit CS. */
5112 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
5113 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
5114 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5115 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
5116 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
5117 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
5119 }
5120
5121 /** @todo Debug trap. */
5122 if (fIsNewTSS386 && fNewDebugTrap)
5123 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
5124
5125 /*
5126 * Construct the error code masks based on what caused this task switch.
5127 * See Intel Instruction reference for INT.
5128 */
5129 uint16_t uExt;
5130 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
5131 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5132 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
5133 {
5134 uExt = 1;
5135 }
5136 else
5137 uExt = 0;
5138
5139 /*
5140 * Push any error code on to the new stack.
5141 */
5142 if (fFlags & IEM_XCPT_FLAGS_ERR)
5143 {
5144 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
5145 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5146 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
5147
5148 /* Check that there is sufficient space on the stack. */
5149 /** @todo Factor out segment limit checking for normal/expand down segments
5150 * into a separate function. */
5151 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5152 {
5153 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
5154 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
5155 {
5156 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
5157 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
5158 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
5159 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
5160 }
5161 }
5162 else
5163 {
5164 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
5165 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
5166 {
5167 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
5168 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
5169 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
5170 }
5171 }
5172
5173
5174 if (fIsNewTSS386)
5175 rcStrict = iemMemStackPushU32(pVCpu, uErr);
5176 else
5177 rcStrict = iemMemStackPushU16(pVCpu, uErr);
5178 if (rcStrict != VINF_SUCCESS)
5179 {
5180 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
5181 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
5182 return rcStrict;
5183 }
5184 }
5185
5186 /* Check the new EIP against the new CS limit. */
5187 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
5188 {
5189 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
5190 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
5191 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
5192 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
5193 }
5194
5195 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
5196 pVCpu->cpum.GstCtx.ss.Sel));
5197 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5198}
5199
5200
5201/**
5202 * Implements exceptions and interrupts for protected mode.
5203 *
5204 * @returns VBox strict status code.
5205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5206 * @param cbInstr The number of bytes to offset rIP by in the return
5207 * address.
5208 * @param u8Vector The interrupt / exception vector number.
5209 * @param fFlags The flags.
5210 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5211 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5212 */
5213IEM_STATIC VBOXSTRICTRC
5214iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
5215 uint8_t cbInstr,
5216 uint8_t u8Vector,
5217 uint32_t fFlags,
5218 uint16_t uErr,
5219 uint64_t uCr2)
5220{
5221 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5222
5223 /*
5224 * Read the IDT entry.
5225 */
5226 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
5227 {
5228 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5229 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5230 }
5231 X86DESC Idte;
5232 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
5233 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
5234 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5235 {
5236 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5237 return rcStrict;
5238 }
5239 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
5240 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5241 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5242
5243 /*
5244 * Check the descriptor type, DPL and such.
5245 * ASSUMES this is done in the same order as described for call-gate calls.
5246 */
5247 if (Idte.Gate.u1DescType)
5248 {
5249 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5250 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5251 }
5252 bool fTaskGate = false;
5253 uint8_t f32BitGate = true;
5254 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5255 switch (Idte.Gate.u4Type)
5256 {
5257 case X86_SEL_TYPE_SYS_UNDEFINED:
5258 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
5259 case X86_SEL_TYPE_SYS_LDT:
5260 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
5261 case X86_SEL_TYPE_SYS_286_CALL_GATE:
5262 case X86_SEL_TYPE_SYS_UNDEFINED2:
5263 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
5264 case X86_SEL_TYPE_SYS_UNDEFINED3:
5265 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
5266 case X86_SEL_TYPE_SYS_386_CALL_GATE:
5267 case X86_SEL_TYPE_SYS_UNDEFINED4:
5268 {
5269 /** @todo check what actually happens when the type is wrong...
5270 * esp. call gates. */
5271 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5272 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5273 }
5274
5275 case X86_SEL_TYPE_SYS_286_INT_GATE:
5276 f32BitGate = false;
5277 RT_FALL_THRU();
5278 case X86_SEL_TYPE_SYS_386_INT_GATE:
5279 fEflToClear |= X86_EFL_IF;
5280 break;
5281
5282 case X86_SEL_TYPE_SYS_TASK_GATE:
5283 fTaskGate = true;
5284#ifndef IEM_IMPLEMENTS_TASKSWITCH
5285 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
5286#endif
5287 break;
5288
5289 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
5290 f32BitGate = false;
5291 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
5292 break;
5293
5294 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5295 }
5296
5297 /* Check DPL against CPL if applicable. */
5298 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5299 {
5300 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5301 {
5302 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5303 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5304 }
5305 }
5306
5307 /* Is it there? */
5308 if (!Idte.Gate.u1Present)
5309 {
5310 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
5311 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5312 }
5313
5314 /* Is it a task-gate? */
5315 if (fTaskGate)
5316 {
5317 /*
5318 * Construct the error code masks based on what caused this task switch.
5319 * See Intel Instruction reference for INT.
5320 */
5321 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5322 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
5323 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
5324 RTSEL SelTSS = Idte.Gate.u16Sel;
5325
5326 /*
5327 * Fetch the TSS descriptor in the GDT.
5328 */
5329 IEMSELDESC DescTSS;
5330 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
5331 if (rcStrict != VINF_SUCCESS)
5332 {
5333 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
5334 VBOXSTRICTRC_VAL(rcStrict)));
5335 return rcStrict;
5336 }
5337
5338 /* The TSS descriptor must be a system segment and be available (not busy). */
5339 if ( DescTSS.Legacy.Gen.u1DescType
5340 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
5341 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
5342 {
5343 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
5344 u8Vector, SelTSS, DescTSS.Legacy.au64));
5345 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
5346 }
5347
5348 /* The TSS must be present. */
5349 if (!DescTSS.Legacy.Gen.u1Present)
5350 {
5351 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
5352 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
5353 }
5354
5355 /* Do the actual task switch. */
5356 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
5357 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
5358 fFlags, uErr, uCr2, SelTSS, &DescTSS);
5359 }
5360
5361 /* A null CS is bad. */
5362 RTSEL NewCS = Idte.Gate.u16Sel;
5363 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5364 {
5365 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5366 return iemRaiseGeneralProtectionFault0(pVCpu);
5367 }
5368
5369 /* Fetch the descriptor for the new CS. */
5370 IEMSELDESC DescCS;
5371 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
5372 if (rcStrict != VINF_SUCCESS)
5373 {
5374 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5375 return rcStrict;
5376 }
5377
5378 /* Must be a code segment. */
5379 if (!DescCS.Legacy.Gen.u1DescType)
5380 {
5381 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5382 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5383 }
5384 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
5385 {
5386 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5387 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5388 }
5389
5390 /* Don't allow lowering the privilege level. */
5391 /** @todo Does the lowering of privileges apply to software interrupts
5392 * only? This has bearings on the more-privileged or
5393 * same-privilege stack behavior further down. A testcase would
5394 * be nice. */
5395 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5396 {
5397 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5398 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5399 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5400 }
5401
5402 /* Make sure the selector is present. */
5403 if (!DescCS.Legacy.Gen.u1Present)
5404 {
5405 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5406 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5407 }
5408
5409 /* Check the new EIP against the new CS limit. */
5410 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
5411 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
5412 ? Idte.Gate.u16OffsetLow
5413 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
5414 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
5415 if (uNewEip > cbLimitCS)
5416 {
5417 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
5418 u8Vector, uNewEip, cbLimitCS, NewCS));
5419 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5420 }
5421 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
5422
5423 /* Calc the flag image to push. */
5424 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5425 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5426 fEfl &= ~X86_EFL_RF;
5427 else
5428 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5429
5430 /* From V8086 mode only go to CPL 0. */
5431 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5432 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5433 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5434 {
5435 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5436 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5437 }
5438
5439 /*
5440 * If the privilege level changes, we need to get a new stack from the TSS.
5441 * This in turns means validating the new SS and ESP...
5442 */
5443 if (uNewCpl != pVCpu->iem.s.uCpl)
5444 {
5445 RTSEL NewSS;
5446 uint32_t uNewEsp;
5447 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5448 if (rcStrict != VINF_SUCCESS)
5449 return rcStrict;
5450
5451 IEMSELDESC DescSS;
5452 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5453 if (rcStrict != VINF_SUCCESS)
5454 return rcStrict;
5455 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5456 if (!DescSS.Legacy.Gen.u1DefBig)
5457 {
5458 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5459 uNewEsp = (uint16_t)uNewEsp;
5460 }
5461
5462 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5463
5464 /* Check that there is sufficient space for the stack frame. */
5465 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5466 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5467 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5468 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5469
5470 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5471 {
5472 if ( uNewEsp - 1 > cbLimitSS
5473 || uNewEsp < cbStackFrame)
5474 {
5475 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5476 u8Vector, NewSS, uNewEsp, cbStackFrame));
5477 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5478 }
5479 }
5480 else
5481 {
5482 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5483 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5484 {
5485 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5486 u8Vector, NewSS, uNewEsp, cbStackFrame));
5487 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5488 }
5489 }
5490
5491 /*
5492 * Start making changes.
5493 */
5494
5495 /* Set the new CPL so that stack accesses use it. */
5496 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5497 pVCpu->iem.s.uCpl = uNewCpl;
5498
5499 /* Create the stack frame. */
5500 RTPTRUNION uStackFrame;
5501 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5502 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5503 if (rcStrict != VINF_SUCCESS)
5504 return rcStrict;
5505 void * const pvStackFrame = uStackFrame.pv;
5506 if (f32BitGate)
5507 {
5508 if (fFlags & IEM_XCPT_FLAGS_ERR)
5509 *uStackFrame.pu32++ = uErr;
5510 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5511 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5512 uStackFrame.pu32[2] = fEfl;
5513 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5514 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5515 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5516 if (fEfl & X86_EFL_VM)
5517 {
5518 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5519 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5520 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5521 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5522 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5523 }
5524 }
5525 else
5526 {
5527 if (fFlags & IEM_XCPT_FLAGS_ERR)
5528 *uStackFrame.pu16++ = uErr;
5529 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5530 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5531 uStackFrame.pu16[2] = fEfl;
5532 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5533 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5534 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5535 if (fEfl & X86_EFL_VM)
5536 {
5537 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5538 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5539 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5540 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5541 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5542 }
5543 }
5544 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5545 if (rcStrict != VINF_SUCCESS)
5546 return rcStrict;
5547
5548 /* Mark the selectors 'accessed' (hope this is the correct time). */
5549 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5550 * after pushing the stack frame? (Write protect the gdt + stack to
5551 * find out.) */
5552 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5553 {
5554 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5555 if (rcStrict != VINF_SUCCESS)
5556 return rcStrict;
5557 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5558 }
5559
5560 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5561 {
5562 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5563 if (rcStrict != VINF_SUCCESS)
5564 return rcStrict;
5565 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5566 }
5567
5568 /*
5569 * Start comitting the register changes (joins with the DPL=CPL branch).
5570 */
5571 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5572 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5573 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5574 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5575 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5576 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5577 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5578 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5579 * SP is loaded).
5580 * Need to check the other combinations too:
5581 * - 16-bit TSS, 32-bit handler
5582 * - 32-bit TSS, 16-bit handler */
5583 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5584 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5585 else
5586 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5587
5588 if (fEfl & X86_EFL_VM)
5589 {
5590 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5591 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5592 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5593 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5594 }
5595 }
5596 /*
5597 * Same privilege, no stack change and smaller stack frame.
5598 */
5599 else
5600 {
5601 uint64_t uNewRsp;
5602 RTPTRUNION uStackFrame;
5603 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5604 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5605 if (rcStrict != VINF_SUCCESS)
5606 return rcStrict;
5607 void * const pvStackFrame = uStackFrame.pv;
5608
5609 if (f32BitGate)
5610 {
5611 if (fFlags & IEM_XCPT_FLAGS_ERR)
5612 *uStackFrame.pu32++ = uErr;
5613 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5614 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5615 uStackFrame.pu32[2] = fEfl;
5616 }
5617 else
5618 {
5619 if (fFlags & IEM_XCPT_FLAGS_ERR)
5620 *uStackFrame.pu16++ = uErr;
5621 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5622 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5623 uStackFrame.pu16[2] = fEfl;
5624 }
5625 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5626 if (rcStrict != VINF_SUCCESS)
5627 return rcStrict;
5628
5629 /* Mark the CS selector as 'accessed'. */
5630 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5631 {
5632 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5633 if (rcStrict != VINF_SUCCESS)
5634 return rcStrict;
5635 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5636 }
5637
5638 /*
5639 * Start committing the register changes (joins with the other branch).
5640 */
5641 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5642 }
5643
5644 /* ... register committing continues. */
5645 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5646 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5647 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5648 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5649 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5650 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5651
5652 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5653 fEfl &= ~fEflToClear;
5654 IEMMISC_SET_EFL(pVCpu, fEfl);
5655
5656 if (fFlags & IEM_XCPT_FLAGS_CR2)
5657 pVCpu->cpum.GstCtx.cr2 = uCr2;
5658
5659 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5660 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5661
5662 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5663}
5664
5665
5666/**
5667 * Implements exceptions and interrupts for long mode.
5668 *
5669 * @returns VBox strict status code.
5670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5671 * @param cbInstr The number of bytes to offset rIP by in the return
5672 * address.
5673 * @param u8Vector The interrupt / exception vector number.
5674 * @param fFlags The flags.
5675 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5676 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5677 */
5678IEM_STATIC VBOXSTRICTRC
5679iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5680 uint8_t cbInstr,
5681 uint8_t u8Vector,
5682 uint32_t fFlags,
5683 uint16_t uErr,
5684 uint64_t uCr2)
5685{
5686 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5687
5688 /*
5689 * Read the IDT entry.
5690 */
5691 uint16_t offIdt = (uint16_t)u8Vector << 4;
5692 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5693 {
5694 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5695 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5696 }
5697 X86DESC64 Idte;
5698#ifdef _MSC_VER /* Shut up silly compiler warning. */
5699 Idte.au64[0] = 0;
5700 Idte.au64[1] = 0;
5701#endif
5702 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5703 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5704 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5705 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5706 {
5707 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5708 return rcStrict;
5709 }
5710 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5711 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5712 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5713
5714 /*
5715 * Check the descriptor type, DPL and such.
5716 * ASSUMES this is done in the same order as described for call-gate calls.
5717 */
5718 if (Idte.Gate.u1DescType)
5719 {
5720 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5721 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5722 }
5723 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5724 switch (Idte.Gate.u4Type)
5725 {
5726 case AMD64_SEL_TYPE_SYS_INT_GATE:
5727 fEflToClear |= X86_EFL_IF;
5728 break;
5729 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5730 break;
5731
5732 default:
5733 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5734 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5735 }
5736
5737 /* Check DPL against CPL if applicable. */
5738 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5739 {
5740 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5741 {
5742 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5743 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5744 }
5745 }
5746
5747 /* Is it there? */
5748 if (!Idte.Gate.u1Present)
5749 {
5750 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5751 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5752 }
5753
5754 /* A null CS is bad. */
5755 RTSEL NewCS = Idte.Gate.u16Sel;
5756 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5757 {
5758 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5759 return iemRaiseGeneralProtectionFault0(pVCpu);
5760 }
5761
5762 /* Fetch the descriptor for the new CS. */
5763 IEMSELDESC DescCS;
5764 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5765 if (rcStrict != VINF_SUCCESS)
5766 {
5767 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5768 return rcStrict;
5769 }
5770
5771 /* Must be a 64-bit code segment. */
5772 if (!DescCS.Long.Gen.u1DescType)
5773 {
5774 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5775 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5776 }
5777 if ( !DescCS.Long.Gen.u1Long
5778 || DescCS.Long.Gen.u1DefBig
5779 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5780 {
5781 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5782 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5783 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5784 }
5785
5786 /* Don't allow lowering the privilege level. For non-conforming CS
5787 selectors, the CS.DPL sets the privilege level the trap/interrupt
5788 handler runs at. For conforming CS selectors, the CPL remains
5789 unchanged, but the CS.DPL must be <= CPL. */
5790 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5791 * when CPU in Ring-0. Result \#GP? */
5792 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5793 {
5794 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5795 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5796 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5797 }
5798
5799
5800 /* Make sure the selector is present. */
5801 if (!DescCS.Legacy.Gen.u1Present)
5802 {
5803 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5804 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5805 }
5806
5807 /* Check that the new RIP is canonical. */
5808 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5809 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5810 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5811 if (!IEM_IS_CANONICAL(uNewRip))
5812 {
5813 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5814 return iemRaiseGeneralProtectionFault0(pVCpu);
5815 }
5816
5817 /*
5818 * If the privilege level changes or if the IST isn't zero, we need to get
5819 * a new stack from the TSS.
5820 */
5821 uint64_t uNewRsp;
5822 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5823 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5824 if ( uNewCpl != pVCpu->iem.s.uCpl
5825 || Idte.Gate.u3IST != 0)
5826 {
5827 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5828 if (rcStrict != VINF_SUCCESS)
5829 return rcStrict;
5830 }
5831 else
5832 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5833 uNewRsp &= ~(uint64_t)0xf;
5834
5835 /*
5836 * Calc the flag image to push.
5837 */
5838 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5839 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5840 fEfl &= ~X86_EFL_RF;
5841 else
5842 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5843
5844 /*
5845 * Start making changes.
5846 */
5847 /* Set the new CPL so that stack accesses use it. */
5848 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5849 pVCpu->iem.s.uCpl = uNewCpl;
5850
5851 /* Create the stack frame. */
5852 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5853 RTPTRUNION uStackFrame;
5854 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5855 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5856 if (rcStrict != VINF_SUCCESS)
5857 return rcStrict;
5858 void * const pvStackFrame = uStackFrame.pv;
5859
5860 if (fFlags & IEM_XCPT_FLAGS_ERR)
5861 *uStackFrame.pu64++ = uErr;
5862 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5863 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5864 uStackFrame.pu64[2] = fEfl;
5865 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5866 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5867 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5868 if (rcStrict != VINF_SUCCESS)
5869 return rcStrict;
5870
5871 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5872 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5873 * after pushing the stack frame? (Write protect the gdt + stack to
5874 * find out.) */
5875 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5876 {
5877 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5878 if (rcStrict != VINF_SUCCESS)
5879 return rcStrict;
5880 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5881 }
5882
5883 /*
5884 * Start comitting the register changes.
5885 */
5886 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5887 * hidden registers when interrupting 32-bit or 16-bit code! */
5888 if (uNewCpl != uOldCpl)
5889 {
5890 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5891 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5892 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5893 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5894 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5895 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5896 }
5897 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5898 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5899 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5900 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5901 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5902 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5903 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5904 pVCpu->cpum.GstCtx.rip = uNewRip;
5905
5906 fEfl &= ~fEflToClear;
5907 IEMMISC_SET_EFL(pVCpu, fEfl);
5908
5909 if (fFlags & IEM_XCPT_FLAGS_CR2)
5910 pVCpu->cpum.GstCtx.cr2 = uCr2;
5911
5912 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5913 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5914
5915 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5916}
5917
5918
5919/**
5920 * Implements exceptions and interrupts.
5921 *
5922 * All exceptions and interrupts goes thru this function!
5923 *
5924 * @returns VBox strict status code.
5925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5926 * @param cbInstr The number of bytes to offset rIP by in the return
5927 * address.
5928 * @param u8Vector The interrupt / exception vector number.
5929 * @param fFlags The flags.
5930 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5931 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5932 */
5933DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5934iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5935 uint8_t cbInstr,
5936 uint8_t u8Vector,
5937 uint32_t fFlags,
5938 uint16_t uErr,
5939 uint64_t uCr2)
5940{
5941 /*
5942 * Get all the state that we might need here.
5943 */
5944 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5945 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5946
5947#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5948 /*
5949 * Flush prefetch buffer
5950 */
5951 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5952#endif
5953
5954 /*
5955 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5956 */
5957 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5958 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5959 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5960 | IEM_XCPT_FLAGS_BP_INSTR
5961 | IEM_XCPT_FLAGS_ICEBP_INSTR
5962 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5963 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5964 {
5965 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5966 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5967 u8Vector = X86_XCPT_GP;
5968 uErr = 0;
5969 }
5970#ifdef DBGFTRACE_ENABLED
5971 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5972 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5973 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5974#endif
5975
5976 /*
5977 * Evaluate whether NMI blocking should be in effect.
5978 * Normally, NMI blocking is in effect whenever we inject an NMI.
5979 */
5980 bool fBlockNmi;
5981 if ( u8Vector == X86_XCPT_NMI
5982 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5983 fBlockNmi = true;
5984 else
5985 fBlockNmi = false;
5986
5987#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5988 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5989 {
5990 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5991 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5992 return rcStrict0;
5993
5994 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5995 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5996 {
5997 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5998 fBlockNmi = false;
5999 }
6000 }
6001#endif
6002
6003#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6004 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
6005 {
6006 /*
6007 * If the event is being injected as part of VMRUN, it isn't subject to event
6008 * intercepts in the nested-guest. However, secondary exceptions that occur
6009 * during injection of any event -are- subject to exception intercepts.
6010 *
6011 * See AMD spec. 15.20 "Event Injection".
6012 */
6013 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
6014 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
6015 else
6016 {
6017 /*
6018 * Check and handle if the event being raised is intercepted.
6019 */
6020 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
6021 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
6022 return rcStrict0;
6023 }
6024 }
6025#endif
6026
6027 /*
6028 * Set NMI blocking if necessary.
6029 */
6030 if ( fBlockNmi
6031 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
6032 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
6033
6034 /*
6035 * Do recursion accounting.
6036 */
6037 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
6038 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
6039 if (pVCpu->iem.s.cXcptRecursions == 0)
6040 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
6041 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
6042 else
6043 {
6044 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
6045 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
6046 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
6047
6048 if (pVCpu->iem.s.cXcptRecursions >= 4)
6049 {
6050#ifdef DEBUG_bird
6051 AssertFailed();
6052#endif
6053 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
6054 }
6055
6056 /*
6057 * Evaluate the sequence of recurring events.
6058 */
6059 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
6060 NULL /* pXcptRaiseInfo */);
6061 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
6062 { /* likely */ }
6063 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
6064 {
6065 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
6066 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
6067 u8Vector = X86_XCPT_DF;
6068 uErr = 0;
6069#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6070 /* VMX nested-guest #DF intercept needs to be checked here. */
6071 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
6072 {
6073 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
6074 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
6075 return rcStrict0;
6076 }
6077#endif
6078 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
6079 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
6080 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
6081 }
6082 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
6083 {
6084 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
6085 return iemInitiateCpuShutdown(pVCpu);
6086 }
6087 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
6088 {
6089 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
6090 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
6091 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
6092 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
6093 return VERR_EM_GUEST_CPU_HANG;
6094 }
6095 else
6096 {
6097 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
6098 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
6099 return VERR_IEM_IPE_9;
6100 }
6101
6102 /*
6103 * The 'EXT' bit is set when an exception occurs during deliver of an external
6104 * event (such as an interrupt or earlier exception)[1]. Privileged software
6105 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
6106 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
6107 *
6108 * [1] - Intel spec. 6.13 "Error Code"
6109 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
6110 * [3] - Intel Instruction reference for INT n.
6111 */
6112 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
6113 && (fFlags & IEM_XCPT_FLAGS_ERR)
6114 && u8Vector != X86_XCPT_PF
6115 && u8Vector != X86_XCPT_DF)
6116 {
6117 uErr |= X86_TRAP_ERR_EXTERNAL;
6118 }
6119 }
6120
6121 pVCpu->iem.s.cXcptRecursions++;
6122 pVCpu->iem.s.uCurXcpt = u8Vector;
6123 pVCpu->iem.s.fCurXcpt = fFlags;
6124 pVCpu->iem.s.uCurXcptErr = uErr;
6125 pVCpu->iem.s.uCurXcptCr2 = uCr2;
6126
6127 /*
6128 * Extensive logging.
6129 */
6130#if defined(LOG_ENABLED) && defined(IN_RING3)
6131 if (LogIs3Enabled())
6132 {
6133 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
6134 PVM pVM = pVCpu->CTX_SUFF(pVM);
6135 char szRegs[4096];
6136 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6137 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6138 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6139 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6140 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6141 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6142 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6143 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6144 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6145 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6146 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6147 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6148 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6149 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6150 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6151 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6152 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6153 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6154 " efer=%016VR{efer}\n"
6155 " pat=%016VR{pat}\n"
6156 " sf_mask=%016VR{sf_mask}\n"
6157 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6158 " lstar=%016VR{lstar}\n"
6159 " star=%016VR{star} cstar=%016VR{cstar}\n"
6160 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6161 );
6162
6163 char szInstr[256];
6164 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6165 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6166 szInstr, sizeof(szInstr), NULL);
6167 Log3(("%s%s\n", szRegs, szInstr));
6168 }
6169#endif /* LOG_ENABLED */
6170
6171 /*
6172 * Call the mode specific worker function.
6173 */
6174 VBOXSTRICTRC rcStrict;
6175 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
6176 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
6177 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
6178 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
6179 else
6180 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
6181
6182 /* Flush the prefetch buffer. */
6183#ifdef IEM_WITH_CODE_TLB
6184 pVCpu->iem.s.pbInstrBuf = NULL;
6185#else
6186 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6187#endif
6188
6189 /*
6190 * Unwind.
6191 */
6192 pVCpu->iem.s.cXcptRecursions--;
6193 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
6194 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
6195 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
6196 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
6197 pVCpu->iem.s.cXcptRecursions + 1));
6198 return rcStrict;
6199}
6200
6201#ifdef IEM_WITH_SETJMP
6202/**
6203 * See iemRaiseXcptOrInt. Will not return.
6204 */
6205IEM_STATIC DECL_NO_RETURN(void)
6206iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
6207 uint8_t cbInstr,
6208 uint8_t u8Vector,
6209 uint32_t fFlags,
6210 uint16_t uErr,
6211 uint64_t uCr2)
6212{
6213 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
6214 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6215}
6216#endif
6217
6218
6219/** \#DE - 00. */
6220DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
6221{
6222 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6223}
6224
6225
6226/** \#DB - 01.
6227 * @note This automatically clear DR7.GD. */
6228DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
6229{
6230 /** @todo set/clear RF. */
6231 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
6232 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6233}
6234
6235
6236/** \#BR - 05. */
6237DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
6238{
6239 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6240}
6241
6242
6243/** \#UD - 06. */
6244DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
6245{
6246 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6247}
6248
6249
6250/** \#NM - 07. */
6251DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
6252{
6253 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6254}
6255
6256
6257/** \#TS(err) - 0a. */
6258DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
6259{
6260 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
6261}
6262
6263
6264/** \#TS(tr) - 0a. */
6265DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
6266{
6267 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6268 pVCpu->cpum.GstCtx.tr.Sel, 0);
6269}
6270
6271
6272/** \#TS(0) - 0a. */
6273DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
6274{
6275 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6276 0, 0);
6277}
6278
6279
6280/** \#TS(err) - 0a. */
6281DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
6282{
6283 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6284 uSel & X86_SEL_MASK_OFF_RPL, 0);
6285}
6286
6287
6288/** \#NP(err) - 0b. */
6289DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
6290{
6291 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
6292}
6293
6294
6295/** \#NP(sel) - 0b. */
6296DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
6297{
6298 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6299 uSel & ~X86_SEL_RPL, 0);
6300}
6301
6302
6303/** \#SS(seg) - 0c. */
6304DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
6305{
6306 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6307 uSel & ~X86_SEL_RPL, 0);
6308}
6309
6310
6311/** \#SS(err) - 0c. */
6312DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
6313{
6314 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
6315}
6316
6317
6318/** \#GP(n) - 0d. */
6319DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
6320{
6321 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
6322}
6323
6324
6325/** \#GP(0) - 0d. */
6326DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
6327{
6328 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6329}
6330
6331#ifdef IEM_WITH_SETJMP
6332/** \#GP(0) - 0d. */
6333DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
6334{
6335 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6336}
6337#endif
6338
6339
6340/** \#GP(sel) - 0d. */
6341DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
6342{
6343 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
6344 Sel & ~X86_SEL_RPL, 0);
6345}
6346
6347
6348/** \#GP(0) - 0d. */
6349DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
6350{
6351 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6352}
6353
6354
6355/** \#GP(sel) - 0d. */
6356DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
6357{
6358 NOREF(iSegReg); NOREF(fAccess);
6359 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
6360 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6361}
6362
6363#ifdef IEM_WITH_SETJMP
6364/** \#GP(sel) - 0d, longjmp. */
6365DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
6366{
6367 NOREF(iSegReg); NOREF(fAccess);
6368 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
6369 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6370}
6371#endif
6372
6373/** \#GP(sel) - 0d. */
6374DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
6375{
6376 NOREF(Sel);
6377 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6378}
6379
6380#ifdef IEM_WITH_SETJMP
6381/** \#GP(sel) - 0d, longjmp. */
6382DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
6383{
6384 NOREF(Sel);
6385 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6386}
6387#endif
6388
6389
6390/** \#GP(sel) - 0d. */
6391DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
6392{
6393 NOREF(iSegReg); NOREF(fAccess);
6394 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6395}
6396
6397#ifdef IEM_WITH_SETJMP
6398/** \#GP(sel) - 0d, longjmp. */
6399DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
6400 uint32_t fAccess)
6401{
6402 NOREF(iSegReg); NOREF(fAccess);
6403 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
6404}
6405#endif
6406
6407
6408/** \#PF(n) - 0e. */
6409DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6410{
6411 uint16_t uErr;
6412 switch (rc)
6413 {
6414 case VERR_PAGE_NOT_PRESENT:
6415 case VERR_PAGE_TABLE_NOT_PRESENT:
6416 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
6417 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
6418 uErr = 0;
6419 break;
6420
6421 default:
6422 AssertMsgFailed(("%Rrc\n", rc));
6423 RT_FALL_THRU();
6424 case VERR_ACCESS_DENIED:
6425 uErr = X86_TRAP_PF_P;
6426 break;
6427
6428 /** @todo reserved */
6429 }
6430
6431 if (pVCpu->iem.s.uCpl == 3)
6432 uErr |= X86_TRAP_PF_US;
6433
6434 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
6435 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
6436 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
6437 uErr |= X86_TRAP_PF_ID;
6438
6439#if 0 /* This is so much non-sense, really. Why was it done like that? */
6440 /* Note! RW access callers reporting a WRITE protection fault, will clear
6441 the READ flag before calling. So, read-modify-write accesses (RW)
6442 can safely be reported as READ faults. */
6443 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
6444 uErr |= X86_TRAP_PF_RW;
6445#else
6446 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6447 {
6448 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
6449 /// (regardless of outcome of the comparison in the latter case).
6450 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
6451 uErr |= X86_TRAP_PF_RW;
6452 }
6453#endif
6454
6455 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
6456 uErr, GCPtrWhere);
6457}
6458
6459#ifdef IEM_WITH_SETJMP
6460/** \#PF(n) - 0e, longjmp. */
6461IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
6462{
6463 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
6464}
6465#endif
6466
6467
6468/** \#MF(0) - 10. */
6469DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
6470{
6471 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6472}
6473
6474
6475/** \#AC(0) - 11. */
6476DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
6477{
6478 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6479}
6480
6481
6482/**
6483 * Macro for calling iemCImplRaiseDivideError().
6484 *
6485 * This enables us to add/remove arguments and force different levels of
6486 * inlining as we wish.
6487 *
6488 * @return Strict VBox status code.
6489 */
6490#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6491IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6492{
6493 NOREF(cbInstr);
6494 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6495}
6496
6497
6498/**
6499 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6500 *
6501 * This enables us to add/remove arguments and force different levels of
6502 * inlining as we wish.
6503 *
6504 * @return Strict VBox status code.
6505 */
6506#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6507IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6508{
6509 NOREF(cbInstr);
6510 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6511}
6512
6513
6514/**
6515 * Macro for calling iemCImplRaiseInvalidOpcode().
6516 *
6517 * This enables us to add/remove arguments and force different levels of
6518 * inlining as we wish.
6519 *
6520 * @return Strict VBox status code.
6521 */
6522#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6523IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6524{
6525 NOREF(cbInstr);
6526 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6527}
6528
6529
6530/** @} */
6531
6532
6533/*
6534 *
6535 * Helpers routines.
6536 * Helpers routines.
6537 * Helpers routines.
6538 *
6539 */
6540
6541/**
6542 * Recalculates the effective operand size.
6543 *
6544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6545 */
6546IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
6547{
6548 switch (pVCpu->iem.s.enmCpuMode)
6549 {
6550 case IEMMODE_16BIT:
6551 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6552 break;
6553 case IEMMODE_32BIT:
6554 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6555 break;
6556 case IEMMODE_64BIT:
6557 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6558 {
6559 case 0:
6560 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6561 break;
6562 case IEM_OP_PRF_SIZE_OP:
6563 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6564 break;
6565 case IEM_OP_PRF_SIZE_REX_W:
6566 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6567 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6568 break;
6569 }
6570 break;
6571 default:
6572 AssertFailed();
6573 }
6574}
6575
6576
6577/**
6578 * Sets the default operand size to 64-bit and recalculates the effective
6579 * operand size.
6580 *
6581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6582 */
6583IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6584{
6585 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6586 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6587 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6588 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6589 else
6590 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6591}
6592
6593
6594/*
6595 *
6596 * Common opcode decoders.
6597 * Common opcode decoders.
6598 * Common opcode decoders.
6599 *
6600 */
6601//#include <iprt/mem.h>
6602
6603/**
6604 * Used to add extra details about a stub case.
6605 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6606 */
6607IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6608{
6609#if defined(LOG_ENABLED) && defined(IN_RING3)
6610 PVM pVM = pVCpu->CTX_SUFF(pVM);
6611 char szRegs[4096];
6612 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6613 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6614 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6615 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6616 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6617 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6618 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6619 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6620 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6621 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6622 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6623 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6624 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6625 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6626 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6627 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6628 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6629 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6630 " efer=%016VR{efer}\n"
6631 " pat=%016VR{pat}\n"
6632 " sf_mask=%016VR{sf_mask}\n"
6633 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6634 " lstar=%016VR{lstar}\n"
6635 " star=%016VR{star} cstar=%016VR{cstar}\n"
6636 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6637 );
6638
6639 char szInstr[256];
6640 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6641 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6642 szInstr, sizeof(szInstr), NULL);
6643
6644 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6645#else
6646 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6647#endif
6648}
6649
6650/**
6651 * Complains about a stub.
6652 *
6653 * Providing two versions of this macro, one for daily use and one for use when
6654 * working on IEM.
6655 */
6656#if 0
6657# define IEMOP_BITCH_ABOUT_STUB() \
6658 do { \
6659 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6660 iemOpStubMsg2(pVCpu); \
6661 RTAssertPanic(); \
6662 } while (0)
6663#else
6664# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6665#endif
6666
6667/** Stubs an opcode. */
6668#define FNIEMOP_STUB(a_Name) \
6669 FNIEMOP_DEF(a_Name) \
6670 { \
6671 RT_NOREF_PV(pVCpu); \
6672 IEMOP_BITCH_ABOUT_STUB(); \
6673 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6674 } \
6675 typedef int ignore_semicolon
6676
6677/** Stubs an opcode. */
6678#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6679 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6680 { \
6681 RT_NOREF_PV(pVCpu); \
6682 RT_NOREF_PV(a_Name0); \
6683 IEMOP_BITCH_ABOUT_STUB(); \
6684 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6685 } \
6686 typedef int ignore_semicolon
6687
6688/** Stubs an opcode which currently should raise \#UD. */
6689#define FNIEMOP_UD_STUB(a_Name) \
6690 FNIEMOP_DEF(a_Name) \
6691 { \
6692 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6693 return IEMOP_RAISE_INVALID_OPCODE(); \
6694 } \
6695 typedef int ignore_semicolon
6696
6697/** Stubs an opcode which currently should raise \#UD. */
6698#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6699 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6700 { \
6701 RT_NOREF_PV(pVCpu); \
6702 RT_NOREF_PV(a_Name0); \
6703 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6704 return IEMOP_RAISE_INVALID_OPCODE(); \
6705 } \
6706 typedef int ignore_semicolon
6707
6708
6709
6710/** @name Register Access.
6711 * @{
6712 */
6713
6714/**
6715 * Gets a reference (pointer) to the specified hidden segment register.
6716 *
6717 * @returns Hidden register reference.
6718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6719 * @param iSegReg The segment register.
6720 */
6721IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6722{
6723 Assert(iSegReg < X86_SREG_COUNT);
6724 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6725 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6726
6727 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6728 return pSReg;
6729}
6730
6731
6732/**
6733 * Ensures that the given hidden segment register is up to date.
6734 *
6735 * @returns Hidden register reference.
6736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6737 * @param pSReg The segment register.
6738 */
6739IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6740{
6741 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6742 NOREF(pVCpu);
6743 return pSReg;
6744}
6745
6746
6747/**
6748 * Gets a reference (pointer) to the specified segment register (the selector
6749 * value).
6750 *
6751 * @returns Pointer to the selector variable.
6752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6753 * @param iSegReg The segment register.
6754 */
6755DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6756{
6757 Assert(iSegReg < X86_SREG_COUNT);
6758 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6759 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6760}
6761
6762
6763/**
6764 * Fetches the selector value of a segment register.
6765 *
6766 * @returns The selector value.
6767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6768 * @param iSegReg The segment register.
6769 */
6770DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6771{
6772 Assert(iSegReg < X86_SREG_COUNT);
6773 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6774 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6775}
6776
6777
6778/**
6779 * Fetches the base address value of a segment register.
6780 *
6781 * @returns The selector value.
6782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6783 * @param iSegReg The segment register.
6784 */
6785DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6786{
6787 Assert(iSegReg < X86_SREG_COUNT);
6788 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6789 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6790}
6791
6792
6793/**
6794 * Gets a reference (pointer) to the specified general purpose register.
6795 *
6796 * @returns Register reference.
6797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6798 * @param iReg The general purpose register.
6799 */
6800DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6801{
6802 Assert(iReg < 16);
6803 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6804}
6805
6806
6807/**
6808 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6809 *
6810 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6811 *
6812 * @returns Register reference.
6813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6814 * @param iReg The register.
6815 */
6816DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6817{
6818 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6819 {
6820 Assert(iReg < 16);
6821 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6822 }
6823 /* high 8-bit register. */
6824 Assert(iReg < 8);
6825 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6826}
6827
6828
6829/**
6830 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6831 *
6832 * @returns Register reference.
6833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6834 * @param iReg The register.
6835 */
6836DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6837{
6838 Assert(iReg < 16);
6839 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6840}
6841
6842
6843/**
6844 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6845 *
6846 * @returns Register reference.
6847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6848 * @param iReg The register.
6849 */
6850DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6851{
6852 Assert(iReg < 16);
6853 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6854}
6855
6856
6857/**
6858 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6859 *
6860 * @returns Register reference.
6861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6862 * @param iReg The register.
6863 */
6864DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6865{
6866 Assert(iReg < 64);
6867 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6868}
6869
6870
6871/**
6872 * Gets a reference (pointer) to the specified segment register's base address.
6873 *
6874 * @returns Segment register base address reference.
6875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6876 * @param iSegReg The segment selector.
6877 */
6878DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6879{
6880 Assert(iSegReg < X86_SREG_COUNT);
6881 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6882 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6883}
6884
6885
6886/**
6887 * Fetches the value of a 8-bit general purpose register.
6888 *
6889 * @returns The register value.
6890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6891 * @param iReg The register.
6892 */
6893DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6894{
6895 return *iemGRegRefU8(pVCpu, iReg);
6896}
6897
6898
6899/**
6900 * Fetches the value of a 16-bit general purpose register.
6901 *
6902 * @returns The register value.
6903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6904 * @param iReg The register.
6905 */
6906DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6907{
6908 Assert(iReg < 16);
6909 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6910}
6911
6912
6913/**
6914 * Fetches the value of a 32-bit general purpose register.
6915 *
6916 * @returns The register value.
6917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6918 * @param iReg The register.
6919 */
6920DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6921{
6922 Assert(iReg < 16);
6923 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6924}
6925
6926
6927/**
6928 * Fetches the value of a 64-bit general purpose register.
6929 *
6930 * @returns The register value.
6931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6932 * @param iReg The register.
6933 */
6934DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6935{
6936 Assert(iReg < 16);
6937 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6938}
6939
6940
6941/**
6942 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6943 *
6944 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6945 * segment limit.
6946 *
6947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6948 * @param offNextInstr The offset of the next instruction.
6949 */
6950IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6951{
6952 switch (pVCpu->iem.s.enmEffOpSize)
6953 {
6954 case IEMMODE_16BIT:
6955 {
6956 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6957 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6958 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6959 return iemRaiseGeneralProtectionFault0(pVCpu);
6960 pVCpu->cpum.GstCtx.rip = uNewIp;
6961 break;
6962 }
6963
6964 case IEMMODE_32BIT:
6965 {
6966 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6967 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6968
6969 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6970 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6971 return iemRaiseGeneralProtectionFault0(pVCpu);
6972 pVCpu->cpum.GstCtx.rip = uNewEip;
6973 break;
6974 }
6975
6976 case IEMMODE_64BIT:
6977 {
6978 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6979
6980 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6981 if (!IEM_IS_CANONICAL(uNewRip))
6982 return iemRaiseGeneralProtectionFault0(pVCpu);
6983 pVCpu->cpum.GstCtx.rip = uNewRip;
6984 break;
6985 }
6986
6987 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6988 }
6989
6990 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6991
6992#ifndef IEM_WITH_CODE_TLB
6993 /* Flush the prefetch buffer. */
6994 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6995#endif
6996
6997 return VINF_SUCCESS;
6998}
6999
7000
7001/**
7002 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
7003 *
7004 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
7005 * segment limit.
7006 *
7007 * @returns Strict VBox status code.
7008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7009 * @param offNextInstr The offset of the next instruction.
7010 */
7011IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
7012{
7013 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
7014
7015 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
7016 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
7017 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
7018 return iemRaiseGeneralProtectionFault0(pVCpu);
7019 /** @todo Test 16-bit jump in 64-bit mode. possible? */
7020 pVCpu->cpum.GstCtx.rip = uNewIp;
7021 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
7022
7023#ifndef IEM_WITH_CODE_TLB
7024 /* Flush the prefetch buffer. */
7025 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
7026#endif
7027
7028 return VINF_SUCCESS;
7029}
7030
7031
7032/**
7033 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
7034 *
7035 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
7036 * segment limit.
7037 *
7038 * @returns Strict VBox status code.
7039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7040 * @param offNextInstr The offset of the next instruction.
7041 */
7042IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
7043{
7044 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
7045
7046 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
7047 {
7048 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
7049
7050 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
7051 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
7052 return iemRaiseGeneralProtectionFault0(pVCpu);
7053 pVCpu->cpum.GstCtx.rip = uNewEip;
7054 }
7055 else
7056 {
7057 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
7058
7059 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
7060 if (!IEM_IS_CANONICAL(uNewRip))
7061 return iemRaiseGeneralProtectionFault0(pVCpu);
7062 pVCpu->cpum.GstCtx.rip = uNewRip;
7063 }
7064 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
7065
7066#ifndef IEM_WITH_CODE_TLB
7067 /* Flush the prefetch buffer. */
7068 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
7069#endif
7070
7071 return VINF_SUCCESS;
7072}
7073
7074
7075/**
7076 * Performs a near jump to the specified address.
7077 *
7078 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
7079 * segment limit.
7080 *
7081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7082 * @param uNewRip The new RIP value.
7083 */
7084IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
7085{
7086 switch (pVCpu->iem.s.enmEffOpSize)
7087 {
7088 case IEMMODE_16BIT:
7089 {
7090 Assert(uNewRip <= UINT16_MAX);
7091 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
7092 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
7093 return iemRaiseGeneralProtectionFault0(pVCpu);
7094 /** @todo Test 16-bit jump in 64-bit mode. */
7095 pVCpu->cpum.GstCtx.rip = uNewRip;
7096 break;
7097 }
7098
7099 case IEMMODE_32BIT:
7100 {
7101 Assert(uNewRip <= UINT32_MAX);
7102 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
7103 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
7104
7105 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
7106 return iemRaiseGeneralProtectionFault0(pVCpu);
7107 pVCpu->cpum.GstCtx.rip = uNewRip;
7108 break;
7109 }
7110
7111 case IEMMODE_64BIT:
7112 {
7113 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
7114
7115 if (!IEM_IS_CANONICAL(uNewRip))
7116 return iemRaiseGeneralProtectionFault0(pVCpu);
7117 pVCpu->cpum.GstCtx.rip = uNewRip;
7118 break;
7119 }
7120
7121 IEM_NOT_REACHED_DEFAULT_CASE_RET();
7122 }
7123
7124 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
7125
7126#ifndef IEM_WITH_CODE_TLB
7127 /* Flush the prefetch buffer. */
7128 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
7129#endif
7130
7131 return VINF_SUCCESS;
7132}
7133
7134
7135/**
7136 * Get the address of the top of the stack.
7137 *
7138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7139 */
7140DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
7141{
7142 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7143 return pVCpu->cpum.GstCtx.rsp;
7144 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7145 return pVCpu->cpum.GstCtx.esp;
7146 return pVCpu->cpum.GstCtx.sp;
7147}
7148
7149
7150/**
7151 * Updates the RIP/EIP/IP to point to the next instruction.
7152 *
7153 * This function leaves the EFLAGS.RF flag alone.
7154 *
7155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7156 * @param cbInstr The number of bytes to add.
7157 */
7158IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
7159{
7160 switch (pVCpu->iem.s.enmCpuMode)
7161 {
7162 case IEMMODE_16BIT:
7163 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
7164 pVCpu->cpum.GstCtx.eip += cbInstr;
7165 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
7166 break;
7167
7168 case IEMMODE_32BIT:
7169 pVCpu->cpum.GstCtx.eip += cbInstr;
7170 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
7171 break;
7172
7173 case IEMMODE_64BIT:
7174 pVCpu->cpum.GstCtx.rip += cbInstr;
7175 break;
7176 default: AssertFailed();
7177 }
7178}
7179
7180
7181#if 0
7182/**
7183 * Updates the RIP/EIP/IP to point to the next instruction.
7184 *
7185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7186 */
7187IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
7188{
7189 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
7190}
7191#endif
7192
7193
7194
7195/**
7196 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
7197 *
7198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7199 * @param cbInstr The number of bytes to add.
7200 */
7201IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
7202{
7203 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
7204
7205 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
7206#if ARCH_BITS >= 64
7207 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
7208 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
7209 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
7210#else
7211 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7212 pVCpu->cpum.GstCtx.rip += cbInstr;
7213 else
7214 pVCpu->cpum.GstCtx.eip += cbInstr;
7215#endif
7216}
7217
7218
7219/**
7220 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
7221 *
7222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7223 */
7224IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
7225{
7226 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
7227}
7228
7229
7230/**
7231 * Adds to the stack pointer.
7232 *
7233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7234 * @param cbToAdd The number of bytes to add (8-bit!).
7235 */
7236DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
7237{
7238 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7239 pVCpu->cpum.GstCtx.rsp += cbToAdd;
7240 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7241 pVCpu->cpum.GstCtx.esp += cbToAdd;
7242 else
7243 pVCpu->cpum.GstCtx.sp += cbToAdd;
7244}
7245
7246
7247/**
7248 * Subtracts from the stack pointer.
7249 *
7250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7251 * @param cbToSub The number of bytes to subtract (8-bit!).
7252 */
7253DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
7254{
7255 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7256 pVCpu->cpum.GstCtx.rsp -= cbToSub;
7257 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7258 pVCpu->cpum.GstCtx.esp -= cbToSub;
7259 else
7260 pVCpu->cpum.GstCtx.sp -= cbToSub;
7261}
7262
7263
7264/**
7265 * Adds to the temporary stack pointer.
7266 *
7267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7268 * @param pTmpRsp The temporary SP/ESP/RSP to update.
7269 * @param cbToAdd The number of bytes to add (16-bit).
7270 */
7271DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
7272{
7273 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7274 pTmpRsp->u += cbToAdd;
7275 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7276 pTmpRsp->DWords.dw0 += cbToAdd;
7277 else
7278 pTmpRsp->Words.w0 += cbToAdd;
7279}
7280
7281
7282/**
7283 * Subtracts from the temporary stack pointer.
7284 *
7285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7286 * @param pTmpRsp The temporary SP/ESP/RSP to update.
7287 * @param cbToSub The number of bytes to subtract.
7288 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
7289 * expecting that.
7290 */
7291DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
7292{
7293 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7294 pTmpRsp->u -= cbToSub;
7295 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7296 pTmpRsp->DWords.dw0 -= cbToSub;
7297 else
7298 pTmpRsp->Words.w0 -= cbToSub;
7299}
7300
7301
7302/**
7303 * Calculates the effective stack address for a push of the specified size as
7304 * well as the new RSP value (upper bits may be masked).
7305 *
7306 * @returns Effective stack addressf for the push.
7307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7308 * @param cbItem The size of the stack item to pop.
7309 * @param puNewRsp Where to return the new RSP value.
7310 */
7311DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
7312{
7313 RTUINT64U uTmpRsp;
7314 RTGCPTR GCPtrTop;
7315 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
7316
7317 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7318 GCPtrTop = uTmpRsp.u -= cbItem;
7319 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7320 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
7321 else
7322 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
7323 *puNewRsp = uTmpRsp.u;
7324 return GCPtrTop;
7325}
7326
7327
7328/**
7329 * Gets the current stack pointer and calculates the value after a pop of the
7330 * specified size.
7331 *
7332 * @returns Current stack pointer.
7333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7334 * @param cbItem The size of the stack item to pop.
7335 * @param puNewRsp Where to return the new RSP value.
7336 */
7337DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
7338{
7339 RTUINT64U uTmpRsp;
7340 RTGCPTR GCPtrTop;
7341 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
7342
7343 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7344 {
7345 GCPtrTop = uTmpRsp.u;
7346 uTmpRsp.u += cbItem;
7347 }
7348 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7349 {
7350 GCPtrTop = uTmpRsp.DWords.dw0;
7351 uTmpRsp.DWords.dw0 += cbItem;
7352 }
7353 else
7354 {
7355 GCPtrTop = uTmpRsp.Words.w0;
7356 uTmpRsp.Words.w0 += cbItem;
7357 }
7358 *puNewRsp = uTmpRsp.u;
7359 return GCPtrTop;
7360}
7361
7362
7363/**
7364 * Calculates the effective stack address for a push of the specified size as
7365 * well as the new temporary RSP value (upper bits may be masked).
7366 *
7367 * @returns Effective stack addressf for the push.
7368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7369 * @param pTmpRsp The temporary stack pointer. This is updated.
7370 * @param cbItem The size of the stack item to pop.
7371 */
7372DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
7373{
7374 RTGCPTR GCPtrTop;
7375
7376 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7377 GCPtrTop = pTmpRsp->u -= cbItem;
7378 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7379 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
7380 else
7381 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
7382 return GCPtrTop;
7383}
7384
7385
7386/**
7387 * Gets the effective stack address for a pop of the specified size and
7388 * calculates and updates the temporary RSP.
7389 *
7390 * @returns Current stack pointer.
7391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7392 * @param pTmpRsp The temporary stack pointer. This is updated.
7393 * @param cbItem The size of the stack item to pop.
7394 */
7395DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
7396{
7397 RTGCPTR GCPtrTop;
7398 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7399 {
7400 GCPtrTop = pTmpRsp->u;
7401 pTmpRsp->u += cbItem;
7402 }
7403 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
7404 {
7405 GCPtrTop = pTmpRsp->DWords.dw0;
7406 pTmpRsp->DWords.dw0 += cbItem;
7407 }
7408 else
7409 {
7410 GCPtrTop = pTmpRsp->Words.w0;
7411 pTmpRsp->Words.w0 += cbItem;
7412 }
7413 return GCPtrTop;
7414}
7415
7416/** @} */
7417
7418
7419/** @name FPU access and helpers.
7420 *
7421 * @{
7422 */
7423
7424
7425/**
7426 * Hook for preparing to use the host FPU.
7427 *
7428 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7429 *
7430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7431 */
7432DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
7433{
7434#ifdef IN_RING3
7435 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7436#else
7437 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
7438#endif
7439 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7440}
7441
7442
7443/**
7444 * Hook for preparing to use the host FPU for SSE.
7445 *
7446 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7447 *
7448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7449 */
7450DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
7451{
7452 iemFpuPrepareUsage(pVCpu);
7453}
7454
7455
7456/**
7457 * Hook for preparing to use the host FPU for AVX.
7458 *
7459 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7460 *
7461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7462 */
7463DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
7464{
7465 iemFpuPrepareUsage(pVCpu);
7466}
7467
7468
7469/**
7470 * Hook for actualizing the guest FPU state before the interpreter reads it.
7471 *
7472 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7473 *
7474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7475 */
7476DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
7477{
7478#ifdef IN_RING3
7479 NOREF(pVCpu);
7480#else
7481 CPUMRZFpuStateActualizeForRead(pVCpu);
7482#endif
7483 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7484}
7485
7486
7487/**
7488 * Hook for actualizing the guest FPU state before the interpreter changes it.
7489 *
7490 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7491 *
7492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7493 */
7494DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
7495{
7496#ifdef IN_RING3
7497 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7498#else
7499 CPUMRZFpuStateActualizeForChange(pVCpu);
7500#endif
7501 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7502}
7503
7504
7505/**
7506 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7507 * only.
7508 *
7509 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7510 *
7511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7512 */
7513DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
7514{
7515#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7516 NOREF(pVCpu);
7517#else
7518 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7519#endif
7520 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7521}
7522
7523
7524/**
7525 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7526 * read+write.
7527 *
7528 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7529 *
7530 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7531 */
7532DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
7533{
7534#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7535 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7536#else
7537 CPUMRZFpuStateActualizeForChange(pVCpu);
7538#endif
7539 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7540
7541 /* Make sure any changes are loaded the next time around. */
7542 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
7543}
7544
7545
7546/**
7547 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7548 * only.
7549 *
7550 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7551 *
7552 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7553 */
7554DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7555{
7556#ifdef IN_RING3
7557 NOREF(pVCpu);
7558#else
7559 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7560#endif
7561 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7562}
7563
7564
7565/**
7566 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7567 * read+write.
7568 *
7569 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7570 *
7571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7572 */
7573DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7574{
7575#ifdef IN_RING3
7576 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7577#else
7578 CPUMRZFpuStateActualizeForChange(pVCpu);
7579#endif
7580 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7581
7582 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
7583 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
7584}
7585
7586
7587/**
7588 * Stores a QNaN value into a FPU register.
7589 *
7590 * @param pReg Pointer to the register.
7591 */
7592DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7593{
7594 pReg->au32[0] = UINT32_C(0x00000000);
7595 pReg->au32[1] = UINT32_C(0xc0000000);
7596 pReg->au16[4] = UINT16_C(0xffff);
7597}
7598
7599
7600/**
7601 * Updates the FOP, FPU.CS and FPUIP registers.
7602 *
7603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7604 * @param pFpuCtx The FPU context.
7605 */
7606DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7607{
7608 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7609 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7610 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7611 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7612 {
7613 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7614 * happens in real mode here based on the fnsave and fnstenv images. */
7615 pFpuCtx->CS = 0;
7616 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7617 }
7618 else if (!IEM_IS_LONG_MODE(pVCpu))
7619 {
7620 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7621 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7622 }
7623 else
7624 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7625}
7626
7627
7628/**
7629 * Updates the x87.DS and FPUDP registers.
7630 *
7631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7632 * @param pFpuCtx The FPU context.
7633 * @param iEffSeg The effective segment register.
7634 * @param GCPtrEff The effective address relative to @a iEffSeg.
7635 */
7636DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7637{
7638 RTSEL sel;
7639 switch (iEffSeg)
7640 {
7641 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7642 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7643 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7644 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7645 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7646 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7647 default:
7648 AssertMsgFailed(("%d\n", iEffSeg));
7649 sel = pVCpu->cpum.GstCtx.ds.Sel;
7650 }
7651 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7652 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7653 {
7654 pFpuCtx->DS = 0;
7655 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7656 }
7657 else if (!IEM_IS_LONG_MODE(pVCpu))
7658 {
7659 pFpuCtx->DS = sel;
7660 pFpuCtx->FPUDP = GCPtrEff;
7661 }
7662 else
7663 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
7664}
7665
7666
7667/**
7668 * Rotates the stack registers in the push direction.
7669 *
7670 * @param pFpuCtx The FPU context.
7671 * @remarks This is a complete waste of time, but fxsave stores the registers in
7672 * stack order.
7673 */
7674DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7675{
7676 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7677 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7678 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7679 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7680 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7681 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7682 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7683 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7684 pFpuCtx->aRegs[0].r80 = r80Tmp;
7685}
7686
7687
7688/**
7689 * Rotates the stack registers in the pop direction.
7690 *
7691 * @param pFpuCtx The FPU context.
7692 * @remarks This is a complete waste of time, but fxsave stores the registers in
7693 * stack order.
7694 */
7695DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7696{
7697 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7698 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7699 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7700 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7701 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7702 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7703 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7704 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7705 pFpuCtx->aRegs[7].r80 = r80Tmp;
7706}
7707
7708
7709/**
7710 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7711 * exception prevents it.
7712 *
7713 * @param pResult The FPU operation result to push.
7714 * @param pFpuCtx The FPU context.
7715 */
7716IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7717{
7718 /* Update FSW and bail if there are pending exceptions afterwards. */
7719 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7720 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7721 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7722 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7723 {
7724 pFpuCtx->FSW = fFsw;
7725 return;
7726 }
7727
7728 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7729 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7730 {
7731 /* All is fine, push the actual value. */
7732 pFpuCtx->FTW |= RT_BIT(iNewTop);
7733 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7734 }
7735 else if (pFpuCtx->FCW & X86_FCW_IM)
7736 {
7737 /* Masked stack overflow, push QNaN. */
7738 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7739 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7740 }
7741 else
7742 {
7743 /* Raise stack overflow, don't push anything. */
7744 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7745 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7746 return;
7747 }
7748
7749 fFsw &= ~X86_FSW_TOP_MASK;
7750 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7751 pFpuCtx->FSW = fFsw;
7752
7753 iemFpuRotateStackPush(pFpuCtx);
7754}
7755
7756
7757/**
7758 * Stores a result in a FPU register and updates the FSW and FTW.
7759 *
7760 * @param pFpuCtx The FPU context.
7761 * @param pResult The result to store.
7762 * @param iStReg Which FPU register to store it in.
7763 */
7764IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7765{
7766 Assert(iStReg < 8);
7767 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7768 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7769 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7770 pFpuCtx->FTW |= RT_BIT(iReg);
7771 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7772}
7773
7774
7775/**
7776 * Only updates the FPU status word (FSW) with the result of the current
7777 * instruction.
7778 *
7779 * @param pFpuCtx The FPU context.
7780 * @param u16FSW The FSW output of the current instruction.
7781 */
7782IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7783{
7784 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7785 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7786}
7787
7788
7789/**
7790 * Pops one item off the FPU stack if no pending exception prevents it.
7791 *
7792 * @param pFpuCtx The FPU context.
7793 */
7794IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7795{
7796 /* Check pending exceptions. */
7797 uint16_t uFSW = pFpuCtx->FSW;
7798 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7799 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7800 return;
7801
7802 /* TOP--. */
7803 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7804 uFSW &= ~X86_FSW_TOP_MASK;
7805 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7806 pFpuCtx->FSW = uFSW;
7807
7808 /* Mark the previous ST0 as empty. */
7809 iOldTop >>= X86_FSW_TOP_SHIFT;
7810 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7811
7812 /* Rotate the registers. */
7813 iemFpuRotateStackPop(pFpuCtx);
7814}
7815
7816
7817/**
7818 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7819 *
7820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7821 * @param pResult The FPU operation result to push.
7822 */
7823IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7824{
7825 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7826 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7827 iemFpuMaybePushResult(pResult, pFpuCtx);
7828}
7829
7830
7831/**
7832 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7833 * and sets FPUDP and FPUDS.
7834 *
7835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7836 * @param pResult The FPU operation result to push.
7837 * @param iEffSeg The effective segment register.
7838 * @param GCPtrEff The effective address relative to @a iEffSeg.
7839 */
7840IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7841{
7842 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7843 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7844 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7845 iemFpuMaybePushResult(pResult, pFpuCtx);
7846}
7847
7848
7849/**
7850 * Replace ST0 with the first value and push the second onto the FPU stack,
7851 * unless a pending exception prevents it.
7852 *
7853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7854 * @param pResult The FPU operation result to store and push.
7855 */
7856IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7857{
7858 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7859 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7860
7861 /* Update FSW and bail if there are pending exceptions afterwards. */
7862 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7863 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7864 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7865 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7866 {
7867 pFpuCtx->FSW = fFsw;
7868 return;
7869 }
7870
7871 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7872 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7873 {
7874 /* All is fine, push the actual value. */
7875 pFpuCtx->FTW |= RT_BIT(iNewTop);
7876 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7877 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7878 }
7879 else if (pFpuCtx->FCW & X86_FCW_IM)
7880 {
7881 /* Masked stack overflow, push QNaN. */
7882 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7883 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7884 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7885 }
7886 else
7887 {
7888 /* Raise stack overflow, don't push anything. */
7889 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7890 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7891 return;
7892 }
7893
7894 fFsw &= ~X86_FSW_TOP_MASK;
7895 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7896 pFpuCtx->FSW = fFsw;
7897
7898 iemFpuRotateStackPush(pFpuCtx);
7899}
7900
7901
7902/**
7903 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7904 * FOP.
7905 *
7906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7907 * @param pResult The result to store.
7908 * @param iStReg Which FPU register to store it in.
7909 */
7910IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7911{
7912 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7913 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7914 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7915}
7916
7917
7918/**
7919 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7920 * FOP, and then pops the stack.
7921 *
7922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7923 * @param pResult The result to store.
7924 * @param iStReg Which FPU register to store it in.
7925 */
7926IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7927{
7928 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7929 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7930 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7931 iemFpuMaybePopOne(pFpuCtx);
7932}
7933
7934
7935/**
7936 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7937 * FPUDP, and FPUDS.
7938 *
7939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7940 * @param pResult The result to store.
7941 * @param iStReg Which FPU register to store it in.
7942 * @param iEffSeg The effective memory operand selector register.
7943 * @param GCPtrEff The effective memory operand offset.
7944 */
7945IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7946 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7947{
7948 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7949 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7950 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7951 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7952}
7953
7954
7955/**
7956 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7957 * FPUDP, and FPUDS, and then pops the stack.
7958 *
7959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7960 * @param pResult The result to store.
7961 * @param iStReg Which FPU register to store it in.
7962 * @param iEffSeg The effective memory operand selector register.
7963 * @param GCPtrEff The effective memory operand offset.
7964 */
7965IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7966 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7967{
7968 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7969 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7970 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7971 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7972 iemFpuMaybePopOne(pFpuCtx);
7973}
7974
7975
7976/**
7977 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7978 *
7979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7980 */
7981IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7982{
7983 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7984 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7985}
7986
7987
7988/**
7989 * Marks the specified stack register as free (for FFREE).
7990 *
7991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7992 * @param iStReg The register to free.
7993 */
7994IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7995{
7996 Assert(iStReg < 8);
7997 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7998 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7999 pFpuCtx->FTW &= ~RT_BIT(iReg);
8000}
8001
8002
8003/**
8004 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
8005 *
8006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8007 */
8008IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
8009{
8010 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8011 uint16_t uFsw = pFpuCtx->FSW;
8012 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
8013 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
8014 uFsw &= ~X86_FSW_TOP_MASK;
8015 uFsw |= uTop;
8016 pFpuCtx->FSW = uFsw;
8017}
8018
8019
8020/**
8021 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
8022 *
8023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8024 */
8025IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
8026{
8027 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8028 uint16_t uFsw = pFpuCtx->FSW;
8029 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
8030 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
8031 uFsw &= ~X86_FSW_TOP_MASK;
8032 uFsw |= uTop;
8033 pFpuCtx->FSW = uFsw;
8034}
8035
8036
8037/**
8038 * Updates the FSW, FOP, FPUIP, and FPUCS.
8039 *
8040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8041 * @param u16FSW The FSW from the current instruction.
8042 */
8043IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
8044{
8045 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8046 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8047 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
8048}
8049
8050
8051/**
8052 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
8053 *
8054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8055 * @param u16FSW The FSW from the current instruction.
8056 */
8057IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
8058{
8059 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8060 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8061 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
8062 iemFpuMaybePopOne(pFpuCtx);
8063}
8064
8065
8066/**
8067 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
8068 *
8069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8070 * @param u16FSW The FSW from the current instruction.
8071 * @param iEffSeg The effective memory operand selector register.
8072 * @param GCPtrEff The effective memory operand offset.
8073 */
8074IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
8075{
8076 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8077 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
8078 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8079 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
8080}
8081
8082
8083/**
8084 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
8085 *
8086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8087 * @param u16FSW The FSW from the current instruction.
8088 */
8089IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
8090{
8091 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8092 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8093 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
8094 iemFpuMaybePopOne(pFpuCtx);
8095 iemFpuMaybePopOne(pFpuCtx);
8096}
8097
8098
8099/**
8100 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
8101 *
8102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8103 * @param u16FSW The FSW from the current instruction.
8104 * @param iEffSeg The effective memory operand selector register.
8105 * @param GCPtrEff The effective memory operand offset.
8106 */
8107IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
8108{
8109 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8110 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
8111 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8112 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
8113 iemFpuMaybePopOne(pFpuCtx);
8114}
8115
8116
8117/**
8118 * Worker routine for raising an FPU stack underflow exception.
8119 *
8120 * @param pFpuCtx The FPU context.
8121 * @param iStReg The stack register being accessed.
8122 */
8123IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
8124{
8125 Assert(iStReg < 8 || iStReg == UINT8_MAX);
8126 if (pFpuCtx->FCW & X86_FCW_IM)
8127 {
8128 /* Masked underflow. */
8129 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8130 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
8131 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
8132 if (iStReg != UINT8_MAX)
8133 {
8134 pFpuCtx->FTW |= RT_BIT(iReg);
8135 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
8136 }
8137 }
8138 else
8139 {
8140 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8141 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8142 }
8143}
8144
8145
8146/**
8147 * Raises a FPU stack underflow exception.
8148 *
8149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8150 * @param iStReg The destination register that should be loaded
8151 * with QNaN if \#IS is not masked. Specify
8152 * UINT8_MAX if none (like for fcom).
8153 */
8154DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
8155{
8156 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8157 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8158 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
8159}
8160
8161
8162DECL_NO_INLINE(IEM_STATIC, void)
8163iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
8164{
8165 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8166 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
8167 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8168 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
8169}
8170
8171
8172DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
8173{
8174 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8175 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8176 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
8177 iemFpuMaybePopOne(pFpuCtx);
8178}
8179
8180
8181DECL_NO_INLINE(IEM_STATIC, void)
8182iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
8183{
8184 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8185 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
8186 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8187 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
8188 iemFpuMaybePopOne(pFpuCtx);
8189}
8190
8191
8192DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
8193{
8194 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8195 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8196 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
8197 iemFpuMaybePopOne(pFpuCtx);
8198 iemFpuMaybePopOne(pFpuCtx);
8199}
8200
8201
8202DECL_NO_INLINE(IEM_STATIC, void)
8203iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
8204{
8205 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8206 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8207
8208 if (pFpuCtx->FCW & X86_FCW_IM)
8209 {
8210 /* Masked overflow - Push QNaN. */
8211 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
8212 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
8213 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
8214 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
8215 pFpuCtx->FTW |= RT_BIT(iNewTop);
8216 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
8217 iemFpuRotateStackPush(pFpuCtx);
8218 }
8219 else
8220 {
8221 /* Exception pending - don't change TOP or the register stack. */
8222 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8223 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8224 }
8225}
8226
8227
8228DECL_NO_INLINE(IEM_STATIC, void)
8229iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
8230{
8231 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8232 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8233
8234 if (pFpuCtx->FCW & X86_FCW_IM)
8235 {
8236 /* Masked overflow - Push QNaN. */
8237 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
8238 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
8239 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
8240 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
8241 pFpuCtx->FTW |= RT_BIT(iNewTop);
8242 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
8243 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
8244 iemFpuRotateStackPush(pFpuCtx);
8245 }
8246 else
8247 {
8248 /* Exception pending - don't change TOP or the register stack. */
8249 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8250 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8251 }
8252}
8253
8254
8255/**
8256 * Worker routine for raising an FPU stack overflow exception on a push.
8257 *
8258 * @param pFpuCtx The FPU context.
8259 */
8260IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
8261{
8262 if (pFpuCtx->FCW & X86_FCW_IM)
8263 {
8264 /* Masked overflow. */
8265 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
8266 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
8267 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
8268 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
8269 pFpuCtx->FTW |= RT_BIT(iNewTop);
8270 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
8271 iemFpuRotateStackPush(pFpuCtx);
8272 }
8273 else
8274 {
8275 /* Exception pending - don't change TOP or the register stack. */
8276 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
8277 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
8278 }
8279}
8280
8281
8282/**
8283 * Raises a FPU stack overflow exception on a push.
8284 *
8285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8286 */
8287DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
8288{
8289 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8290 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8291 iemFpuStackPushOverflowOnly(pFpuCtx);
8292}
8293
8294
8295/**
8296 * Raises a FPU stack overflow exception on a push with a memory operand.
8297 *
8298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8299 * @param iEffSeg The effective memory operand selector register.
8300 * @param GCPtrEff The effective memory operand offset.
8301 */
8302DECL_NO_INLINE(IEM_STATIC, void)
8303iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
8304{
8305 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8306 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
8307 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
8308 iemFpuStackPushOverflowOnly(pFpuCtx);
8309}
8310
8311
8312IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
8313{
8314 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8315 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
8316 if (pFpuCtx->FTW & RT_BIT(iReg))
8317 return VINF_SUCCESS;
8318 return VERR_NOT_FOUND;
8319}
8320
8321
8322IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
8323{
8324 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8325 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
8326 if (pFpuCtx->FTW & RT_BIT(iReg))
8327 {
8328 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
8329 return VINF_SUCCESS;
8330 }
8331 return VERR_NOT_FOUND;
8332}
8333
8334
8335IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
8336 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
8337{
8338 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8339 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
8340 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
8341 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
8342 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
8343 {
8344 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
8345 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
8346 return VINF_SUCCESS;
8347 }
8348 return VERR_NOT_FOUND;
8349}
8350
8351
8352IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
8353{
8354 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
8355 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
8356 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
8357 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
8358 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
8359 {
8360 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
8361 return VINF_SUCCESS;
8362 }
8363 return VERR_NOT_FOUND;
8364}
8365
8366
8367/**
8368 * Updates the FPU exception status after FCW is changed.
8369 *
8370 * @param pFpuCtx The FPU context.
8371 */
8372IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
8373{
8374 uint16_t u16Fsw = pFpuCtx->FSW;
8375 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
8376 u16Fsw |= X86_FSW_ES | X86_FSW_B;
8377 else
8378 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
8379 pFpuCtx->FSW = u16Fsw;
8380}
8381
8382
8383/**
8384 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
8385 *
8386 * @returns The full FTW.
8387 * @param pFpuCtx The FPU context.
8388 */
8389IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
8390{
8391 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
8392 uint16_t u16Ftw = 0;
8393 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
8394 for (unsigned iSt = 0; iSt < 8; iSt++)
8395 {
8396 unsigned const iReg = (iSt + iTop) & 7;
8397 if (!(u8Ftw & RT_BIT(iReg)))
8398 u16Ftw |= 3 << (iReg * 2); /* empty */
8399 else
8400 {
8401 uint16_t uTag;
8402 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
8403 if (pr80Reg->s.uExponent == 0x7fff)
8404 uTag = 2; /* Exponent is all 1's => Special. */
8405 else if (pr80Reg->s.uExponent == 0x0000)
8406 {
8407 if (pr80Reg->s.u64Mantissa == 0x0000)
8408 uTag = 1; /* All bits are zero => Zero. */
8409 else
8410 uTag = 2; /* Must be special. */
8411 }
8412 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
8413 uTag = 0; /* Valid. */
8414 else
8415 uTag = 2; /* Must be special. */
8416
8417 u16Ftw |= uTag << (iReg * 2); /* empty */
8418 }
8419 }
8420
8421 return u16Ftw;
8422}
8423
8424
8425/**
8426 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
8427 *
8428 * @returns The compressed FTW.
8429 * @param u16FullFtw The full FTW to convert.
8430 */
8431IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
8432{
8433 uint8_t u8Ftw = 0;
8434 for (unsigned i = 0; i < 8; i++)
8435 {
8436 if ((u16FullFtw & 3) != 3 /*empty*/)
8437 u8Ftw |= RT_BIT(i);
8438 u16FullFtw >>= 2;
8439 }
8440
8441 return u8Ftw;
8442}
8443
8444/** @} */
8445
8446
8447/** @name Memory access.
8448 *
8449 * @{
8450 */
8451
8452
8453/**
8454 * Updates the IEMCPU::cbWritten counter if applicable.
8455 *
8456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8457 * @param fAccess The access being accounted for.
8458 * @param cbMem The access size.
8459 */
8460DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
8461{
8462 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
8463 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
8464 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
8465}
8466
8467
8468/**
8469 * Checks if the given segment can be written to, raise the appropriate
8470 * exception if not.
8471 *
8472 * @returns VBox strict status code.
8473 *
8474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8475 * @param pHid Pointer to the hidden register.
8476 * @param iSegReg The register number.
8477 * @param pu64BaseAddr Where to return the base address to use for the
8478 * segment. (In 64-bit code it may differ from the
8479 * base in the hidden segment.)
8480 */
8481IEM_STATIC VBOXSTRICTRC
8482iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8483{
8484 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8485
8486 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8487 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8488 else
8489 {
8490 if (!pHid->Attr.n.u1Present)
8491 {
8492 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8493 AssertRelease(uSel == 0);
8494 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8495 return iemRaiseGeneralProtectionFault0(pVCpu);
8496 }
8497
8498 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8499 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8500 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8501 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8502 *pu64BaseAddr = pHid->u64Base;
8503 }
8504 return VINF_SUCCESS;
8505}
8506
8507
8508/**
8509 * Checks if the given segment can be read from, raise the appropriate
8510 * exception if not.
8511 *
8512 * @returns VBox strict status code.
8513 *
8514 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8515 * @param pHid Pointer to the hidden register.
8516 * @param iSegReg The register number.
8517 * @param pu64BaseAddr Where to return the base address to use for the
8518 * segment. (In 64-bit code it may differ from the
8519 * base in the hidden segment.)
8520 */
8521IEM_STATIC VBOXSTRICTRC
8522iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8523{
8524 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8525
8526 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8527 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8528 else
8529 {
8530 if (!pHid->Attr.n.u1Present)
8531 {
8532 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8533 AssertRelease(uSel == 0);
8534 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8535 return iemRaiseGeneralProtectionFault0(pVCpu);
8536 }
8537
8538 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8539 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8540 *pu64BaseAddr = pHid->u64Base;
8541 }
8542 return VINF_SUCCESS;
8543}
8544
8545
8546/**
8547 * Applies the segment limit, base and attributes.
8548 *
8549 * This may raise a \#GP or \#SS.
8550 *
8551 * @returns VBox strict status code.
8552 *
8553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8554 * @param fAccess The kind of access which is being performed.
8555 * @param iSegReg The index of the segment register to apply.
8556 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8557 * TSS, ++).
8558 * @param cbMem The access size.
8559 * @param pGCPtrMem Pointer to the guest memory address to apply
8560 * segmentation to. Input and output parameter.
8561 */
8562IEM_STATIC VBOXSTRICTRC
8563iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8564{
8565 if (iSegReg == UINT8_MAX)
8566 return VINF_SUCCESS;
8567
8568 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8569 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8570 switch (pVCpu->iem.s.enmCpuMode)
8571 {
8572 case IEMMODE_16BIT:
8573 case IEMMODE_32BIT:
8574 {
8575 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8576 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8577
8578 if ( pSel->Attr.n.u1Present
8579 && !pSel->Attr.n.u1Unusable)
8580 {
8581 Assert(pSel->Attr.n.u1DescType);
8582 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8583 {
8584 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8585 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8586 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8587
8588 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8589 {
8590 /** @todo CPL check. */
8591 }
8592
8593 /*
8594 * There are two kinds of data selectors, normal and expand down.
8595 */
8596 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8597 {
8598 if ( GCPtrFirst32 > pSel->u32Limit
8599 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8600 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8601 }
8602 else
8603 {
8604 /*
8605 * The upper boundary is defined by the B bit, not the G bit!
8606 */
8607 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8608 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8609 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8610 }
8611 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8612 }
8613 else
8614 {
8615
8616 /*
8617 * Code selector and usually be used to read thru, writing is
8618 * only permitted in real and V8086 mode.
8619 */
8620 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8621 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8622 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8623 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8624 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8625
8626 if ( GCPtrFirst32 > pSel->u32Limit
8627 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8628 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8629
8630 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8631 {
8632 /** @todo CPL check. */
8633 }
8634
8635 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8636 }
8637 }
8638 else
8639 return iemRaiseGeneralProtectionFault0(pVCpu);
8640 return VINF_SUCCESS;
8641 }
8642
8643 case IEMMODE_64BIT:
8644 {
8645 RTGCPTR GCPtrMem = *pGCPtrMem;
8646 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8647 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8648
8649 Assert(cbMem >= 1);
8650 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8651 return VINF_SUCCESS;
8652 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8653 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8654 return iemRaiseGeneralProtectionFault0(pVCpu);
8655 }
8656
8657 default:
8658 AssertFailedReturn(VERR_IEM_IPE_7);
8659 }
8660}
8661
8662
8663/**
8664 * Translates a virtual address to a physical physical address and checks if we
8665 * can access the page as specified.
8666 *
8667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8668 * @param GCPtrMem The virtual address.
8669 * @param fAccess The intended access.
8670 * @param pGCPhysMem Where to return the physical address.
8671 */
8672IEM_STATIC VBOXSTRICTRC
8673iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8674{
8675 /** @todo Need a different PGM interface here. We're currently using
8676 * generic / REM interfaces. this won't cut it for R0. */
8677 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8678 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
8679 * here. */
8680 PGMPTWALK Walk;
8681 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
8682 if (RT_FAILURE(rc))
8683 {
8684 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8685 /** @todo Check unassigned memory in unpaged mode. */
8686 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8687#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8688 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8689 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
8690#endif
8691 *pGCPhysMem = NIL_RTGCPHYS;
8692 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8693 }
8694
8695 /* If the page is writable and does not have the no-exec bit set, all
8696 access is allowed. Otherwise we'll have to check more carefully... */
8697 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8698 {
8699 /* Write to read only memory? */
8700 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8701 && !(Walk.fEffective & X86_PTE_RW)
8702 && ( ( pVCpu->iem.s.uCpl == 3
8703 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8704 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8705 {
8706 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8707 *pGCPhysMem = NIL_RTGCPHYS;
8708#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8709 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8710 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
8711#endif
8712 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8713 }
8714
8715 /* Kernel memory accessed by userland? */
8716 if ( !(Walk.fEffective & X86_PTE_US)
8717 && pVCpu->iem.s.uCpl == 3
8718 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8719 {
8720 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8721 *pGCPhysMem = NIL_RTGCPHYS;
8722#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8723 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8724 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
8725#endif
8726 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8727 }
8728
8729 /* Executing non-executable memory? */
8730 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8731 && (Walk.fEffective & X86_PTE_PAE_NX)
8732 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8733 {
8734 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8735 *pGCPhysMem = NIL_RTGCPHYS;
8736#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8737 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8738 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
8739#endif
8740 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8741 VERR_ACCESS_DENIED);
8742 }
8743 }
8744
8745 /*
8746 * Set the dirty / access flags.
8747 * ASSUMES this is set when the address is translated rather than on committ...
8748 */
8749 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8750 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8751 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
8752 {
8753 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8754 AssertRC(rc2);
8755 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
8756 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
8757 }
8758
8759 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
8760 *pGCPhysMem = GCPhys;
8761 return VINF_SUCCESS;
8762}
8763
8764
8765
8766/**
8767 * Maps a physical page.
8768 *
8769 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8771 * @param GCPhysMem The physical address.
8772 * @param fAccess The intended access.
8773 * @param ppvMem Where to return the mapping address.
8774 * @param pLock The PGM lock.
8775 */
8776IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8777{
8778#ifdef IEM_LOG_MEMORY_WRITES
8779 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8780 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8781#endif
8782
8783 /** @todo This API may require some improving later. A private deal with PGM
8784 * regarding locking and unlocking needs to be struct. A couple of TLBs
8785 * living in PGM, but with publicly accessible inlined access methods
8786 * could perhaps be an even better solution. */
8787 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8788 GCPhysMem,
8789 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8790 pVCpu->iem.s.fBypassHandlers,
8791 ppvMem,
8792 pLock);
8793 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8794 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8795
8796 return rc;
8797}
8798
8799
8800/**
8801 * Unmap a page previously mapped by iemMemPageMap.
8802 *
8803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8804 * @param GCPhysMem The physical address.
8805 * @param fAccess The intended access.
8806 * @param pvMem What iemMemPageMap returned.
8807 * @param pLock The PGM lock.
8808 */
8809DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8810{
8811 NOREF(pVCpu);
8812 NOREF(GCPhysMem);
8813 NOREF(fAccess);
8814 NOREF(pvMem);
8815 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8816}
8817
8818
8819/**
8820 * Looks up a memory mapping entry.
8821 *
8822 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8824 * @param pvMem The memory address.
8825 * @param fAccess The access to.
8826 */
8827DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8828{
8829 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8830 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8831 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8832 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8833 return 0;
8834 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8835 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8836 return 1;
8837 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8838 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8839 return 2;
8840 return VERR_NOT_FOUND;
8841}
8842
8843
8844/**
8845 * Finds a free memmap entry when using iNextMapping doesn't work.
8846 *
8847 * @returns Memory mapping index, 1024 on failure.
8848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8849 */
8850IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8851{
8852 /*
8853 * The easy case.
8854 */
8855 if (pVCpu->iem.s.cActiveMappings == 0)
8856 {
8857 pVCpu->iem.s.iNextMapping = 1;
8858 return 0;
8859 }
8860
8861 /* There should be enough mappings for all instructions. */
8862 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8863
8864 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8865 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8866 return i;
8867
8868 AssertFailedReturn(1024);
8869}
8870
8871
8872/**
8873 * Commits a bounce buffer that needs writing back and unmaps it.
8874 *
8875 * @returns Strict VBox status code.
8876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8877 * @param iMemMap The index of the buffer to commit.
8878 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8879 * Always false in ring-3, obviously.
8880 */
8881IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8882{
8883 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8884 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8885#ifdef IN_RING3
8886 Assert(!fPostponeFail);
8887 RT_NOREF_PV(fPostponeFail);
8888#endif
8889
8890 /*
8891 * Do the writing.
8892 */
8893 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8894 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8895 {
8896 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8897 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8898 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8899 if (!pVCpu->iem.s.fBypassHandlers)
8900 {
8901 /*
8902 * Carefully and efficiently dealing with access handler return
8903 * codes make this a little bloated.
8904 */
8905 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8906 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8907 pbBuf,
8908 cbFirst,
8909 PGMACCESSORIGIN_IEM);
8910 if (rcStrict == VINF_SUCCESS)
8911 {
8912 if (cbSecond)
8913 {
8914 rcStrict = PGMPhysWrite(pVM,
8915 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8916 pbBuf + cbFirst,
8917 cbSecond,
8918 PGMACCESSORIGIN_IEM);
8919 if (rcStrict == VINF_SUCCESS)
8920 { /* nothing */ }
8921 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8922 {
8923 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8924 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8925 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8926 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8927 }
8928#ifndef IN_RING3
8929 else if (fPostponeFail)
8930 {
8931 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8932 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8933 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8934 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8935 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8936 return iemSetPassUpStatus(pVCpu, rcStrict);
8937 }
8938#endif
8939 else
8940 {
8941 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8942 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8943 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8944 return rcStrict;
8945 }
8946 }
8947 }
8948 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8949 {
8950 if (!cbSecond)
8951 {
8952 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8953 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8954 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8955 }
8956 else
8957 {
8958 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8959 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8960 pbBuf + cbFirst,
8961 cbSecond,
8962 PGMACCESSORIGIN_IEM);
8963 if (rcStrict2 == VINF_SUCCESS)
8964 {
8965 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8966 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8967 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8968 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8969 }
8970 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8971 {
8972 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8973 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8974 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8975 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8976 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8977 }
8978#ifndef IN_RING3
8979 else if (fPostponeFail)
8980 {
8981 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8982 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8983 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8984 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8985 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8986 return iemSetPassUpStatus(pVCpu, rcStrict);
8987 }
8988#endif
8989 else
8990 {
8991 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8992 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8993 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8994 return rcStrict2;
8995 }
8996 }
8997 }
8998#ifndef IN_RING3
8999 else if (fPostponeFail)
9000 {
9001 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
9002 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
9003 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
9004 if (!cbSecond)
9005 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
9006 else
9007 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
9008 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
9009 return iemSetPassUpStatus(pVCpu, rcStrict);
9010 }
9011#endif
9012 else
9013 {
9014 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
9015 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
9016 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
9017 return rcStrict;
9018 }
9019 }
9020 else
9021 {
9022 /*
9023 * No access handlers, much simpler.
9024 */
9025 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
9026 if (RT_SUCCESS(rc))
9027 {
9028 if (cbSecond)
9029 {
9030 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
9031 if (RT_SUCCESS(rc))
9032 { /* likely */ }
9033 else
9034 {
9035 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
9036 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
9037 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
9038 return rc;
9039 }
9040 }
9041 }
9042 else
9043 {
9044 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
9045 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
9046 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
9047 return rc;
9048 }
9049 }
9050 }
9051
9052#if defined(IEM_LOG_MEMORY_WRITES)
9053 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
9054 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
9055 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
9056 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
9057 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
9058 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
9059
9060 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
9061 g_cbIemWrote = cbWrote;
9062 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
9063#endif
9064
9065 /*
9066 * Free the mapping entry.
9067 */
9068 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9069 Assert(pVCpu->iem.s.cActiveMappings != 0);
9070 pVCpu->iem.s.cActiveMappings--;
9071 return VINF_SUCCESS;
9072}
9073
9074
9075/**
9076 * iemMemMap worker that deals with a request crossing pages.
9077 */
9078IEM_STATIC VBOXSTRICTRC
9079iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
9080{
9081 /*
9082 * Do the address translations.
9083 */
9084 RTGCPHYS GCPhysFirst;
9085 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
9086 if (rcStrict != VINF_SUCCESS)
9087 return rcStrict;
9088
9089 RTGCPHYS GCPhysSecond;
9090 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
9091 fAccess, &GCPhysSecond);
9092 if (rcStrict != VINF_SUCCESS)
9093 return rcStrict;
9094 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
9095
9096 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9097
9098 /*
9099 * Read in the current memory content if it's a read, execute or partial
9100 * write access.
9101 */
9102 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
9103 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
9104 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
9105
9106 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
9107 {
9108 if (!pVCpu->iem.s.fBypassHandlers)
9109 {
9110 /*
9111 * Must carefully deal with access handler status codes here,
9112 * makes the code a bit bloated.
9113 */
9114 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
9115 if (rcStrict == VINF_SUCCESS)
9116 {
9117 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
9118 if (rcStrict == VINF_SUCCESS)
9119 { /*likely */ }
9120 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
9121 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
9122 else
9123 {
9124 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
9125 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
9126 return rcStrict;
9127 }
9128 }
9129 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
9130 {
9131 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
9132 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
9133 {
9134 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
9135 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
9136 }
9137 else
9138 {
9139 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
9140 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
9141 return rcStrict2;
9142 }
9143 }
9144 else
9145 {
9146 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
9147 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
9148 return rcStrict;
9149 }
9150 }
9151 else
9152 {
9153 /*
9154 * No informational status codes here, much more straight forward.
9155 */
9156 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
9157 if (RT_SUCCESS(rc))
9158 {
9159 Assert(rc == VINF_SUCCESS);
9160 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
9161 if (RT_SUCCESS(rc))
9162 Assert(rc == VINF_SUCCESS);
9163 else
9164 {
9165 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
9166 return rc;
9167 }
9168 }
9169 else
9170 {
9171 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
9172 return rc;
9173 }
9174 }
9175 }
9176#ifdef VBOX_STRICT
9177 else
9178 memset(pbBuf, 0xcc, cbMem);
9179 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
9180 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
9181#endif
9182
9183 /*
9184 * Commit the bounce buffer entry.
9185 */
9186 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
9187 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
9188 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
9189 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
9190 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
9191 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
9192 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
9193 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9194 pVCpu->iem.s.cActiveMappings++;
9195
9196 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9197 *ppvMem = pbBuf;
9198 return VINF_SUCCESS;
9199}
9200
9201
9202/**
9203 * iemMemMap woker that deals with iemMemPageMap failures.
9204 */
9205IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
9206 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
9207{
9208 /*
9209 * Filter out conditions we can handle and the ones which shouldn't happen.
9210 */
9211 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
9212 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
9213 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
9214 {
9215 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
9216 return rcMap;
9217 }
9218 pVCpu->iem.s.cPotentialExits++;
9219
9220 /*
9221 * Read in the current memory content if it's a read, execute or partial
9222 * write access.
9223 */
9224 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
9225 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
9226 {
9227 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
9228 memset(pbBuf, 0xff, cbMem);
9229 else
9230 {
9231 int rc;
9232 if (!pVCpu->iem.s.fBypassHandlers)
9233 {
9234 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
9235 if (rcStrict == VINF_SUCCESS)
9236 { /* nothing */ }
9237 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
9238 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
9239 else
9240 {
9241 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
9242 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
9243 return rcStrict;
9244 }
9245 }
9246 else
9247 {
9248 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
9249 if (RT_SUCCESS(rc))
9250 { /* likely */ }
9251 else
9252 {
9253 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
9254 GCPhysFirst, rc));
9255 return rc;
9256 }
9257 }
9258 }
9259 }
9260#ifdef VBOX_STRICT
9261 else
9262 memset(pbBuf, 0xcc, cbMem);
9263#endif
9264#ifdef VBOX_STRICT
9265 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
9266 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
9267#endif
9268
9269 /*
9270 * Commit the bounce buffer entry.
9271 */
9272 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
9273 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
9274 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
9275 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
9276 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
9277 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
9278 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
9279 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9280 pVCpu->iem.s.cActiveMappings++;
9281
9282 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9283 *ppvMem = pbBuf;
9284 return VINF_SUCCESS;
9285}
9286
9287
9288
9289/**
9290 * Maps the specified guest memory for the given kind of access.
9291 *
9292 * This may be using bounce buffering of the memory if it's crossing a page
9293 * boundary or if there is an access handler installed for any of it. Because
9294 * of lock prefix guarantees, we're in for some extra clutter when this
9295 * happens.
9296 *
9297 * This may raise a \#GP, \#SS, \#PF or \#AC.
9298 *
9299 * @returns VBox strict status code.
9300 *
9301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9302 * @param ppvMem Where to return the pointer to the mapped
9303 * memory.
9304 * @param cbMem The number of bytes to map. This is usually 1,
9305 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
9306 * string operations it can be up to a page.
9307 * @param iSegReg The index of the segment register to use for
9308 * this access. The base and limits are checked.
9309 * Use UINT8_MAX to indicate that no segmentation
9310 * is required (for IDT, GDT and LDT accesses).
9311 * @param GCPtrMem The address of the guest memory.
9312 * @param fAccess How the memory is being accessed. The
9313 * IEM_ACCESS_TYPE_XXX bit is used to figure out
9314 * how to map the memory, while the
9315 * IEM_ACCESS_WHAT_XXX bit is used when raising
9316 * exceptions.
9317 */
9318IEM_STATIC VBOXSTRICTRC
9319iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
9320{
9321 /*
9322 * Check the input and figure out which mapping entry to use.
9323 */
9324 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
9325 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
9326 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
9327
9328 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
9329 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
9330 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
9331 {
9332 iMemMap = iemMemMapFindFree(pVCpu);
9333 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9334 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9335 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9336 pVCpu->iem.s.aMemMappings[2].fAccess),
9337 VERR_IEM_IPE_9);
9338 }
9339
9340 /*
9341 * Map the memory, checking that we can actually access it. If something
9342 * slightly complicated happens, fall back on bounce buffering.
9343 */
9344 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9345 if (rcStrict != VINF_SUCCESS)
9346 return rcStrict;
9347
9348 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem > GUEST_PAGE_SIZE) /* Crossing a page boundary? */
9349 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
9350
9351 RTGCPHYS GCPhysFirst;
9352 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9353 if (rcStrict != VINF_SUCCESS)
9354 return rcStrict;
9355
9356 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9357 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9358 if (fAccess & IEM_ACCESS_TYPE_READ)
9359 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9360
9361 void *pvMem;
9362 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9363 if (rcStrict != VINF_SUCCESS)
9364 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9365
9366 /*
9367 * Fill in the mapping table entry.
9368 */
9369 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9370 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9371 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9372 pVCpu->iem.s.cActiveMappings++;
9373
9374 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9375 *ppvMem = pvMem;
9376
9377 return VINF_SUCCESS;
9378}
9379
9380
9381/**
9382 * Commits the guest memory if bounce buffered and unmaps it.
9383 *
9384 * @returns Strict VBox status code.
9385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9386 * @param pvMem The mapping.
9387 * @param fAccess The kind of access.
9388 */
9389IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9390{
9391 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9392 AssertReturn(iMemMap >= 0, iMemMap);
9393
9394 /* If it's bounce buffered, we may need to write back the buffer. */
9395 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9396 {
9397 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9398 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9399 }
9400 /* Otherwise unlock it. */
9401 else
9402 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9403
9404 /* Free the entry. */
9405 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9406 Assert(pVCpu->iem.s.cActiveMappings != 0);
9407 pVCpu->iem.s.cActiveMappings--;
9408 return VINF_SUCCESS;
9409}
9410
9411#ifdef IEM_WITH_SETJMP
9412
9413/**
9414 * Maps the specified guest memory for the given kind of access, longjmp on
9415 * error.
9416 *
9417 * This may be using bounce buffering of the memory if it's crossing a page
9418 * boundary or if there is an access handler installed for any of it. Because
9419 * of lock prefix guarantees, we're in for some extra clutter when this
9420 * happens.
9421 *
9422 * This may raise a \#GP, \#SS, \#PF or \#AC.
9423 *
9424 * @returns Pointer to the mapped memory.
9425 *
9426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9427 * @param cbMem The number of bytes to map. This is usually 1,
9428 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
9429 * string operations it can be up to a page.
9430 * @param iSegReg The index of the segment register to use for
9431 * this access. The base and limits are checked.
9432 * Use UINT8_MAX to indicate that no segmentation
9433 * is required (for IDT, GDT and LDT accesses).
9434 * @param GCPtrMem The address of the guest memory.
9435 * @param fAccess How the memory is being accessed. The
9436 * IEM_ACCESS_TYPE_XXX bit is used to figure out
9437 * how to map the memory, while the
9438 * IEM_ACCESS_WHAT_XXX bit is used when raising
9439 * exceptions.
9440 */
9441IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
9442{
9443 /*
9444 * Check the input and figure out which mapping entry to use.
9445 */
9446 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
9447 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
9448 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
9449
9450 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
9451 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
9452 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
9453 {
9454 iMemMap = iemMemMapFindFree(pVCpu);
9455 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9456 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9457 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9458 pVCpu->iem.s.aMemMappings[2].fAccess),
9459 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9460 }
9461
9462 /*
9463 * Map the memory, checking that we can actually access it. If something
9464 * slightly complicated happens, fall back on bounce buffering.
9465 */
9466 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9467 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9468 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9469
9470 /* Crossing a page boundary? */
9471 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
9472 { /* No (likely). */ }
9473 else
9474 {
9475 void *pvMem;
9476 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9477 if (rcStrict == VINF_SUCCESS)
9478 return pvMem;
9479 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9480 }
9481
9482 RTGCPHYS GCPhysFirst;
9483 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9484 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9485 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9486
9487 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9488 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9489 if (fAccess & IEM_ACCESS_TYPE_READ)
9490 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9491
9492 void *pvMem;
9493 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9494 if (rcStrict == VINF_SUCCESS)
9495 { /* likely */ }
9496 else
9497 {
9498 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9499 if (rcStrict == VINF_SUCCESS)
9500 return pvMem;
9501 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9502 }
9503
9504 /*
9505 * Fill in the mapping table entry.
9506 */
9507 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9508 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9509 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9510 pVCpu->iem.s.cActiveMappings++;
9511
9512 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9513 return pvMem;
9514}
9515
9516
9517/**
9518 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9519 *
9520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9521 * @param pvMem The mapping.
9522 * @param fAccess The kind of access.
9523 */
9524IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9525{
9526 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9527 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9528
9529 /* If it's bounce buffered, we may need to write back the buffer. */
9530 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9531 {
9532 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9533 {
9534 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9535 if (rcStrict == VINF_SUCCESS)
9536 return;
9537 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9538 }
9539 }
9540 /* Otherwise unlock it. */
9541 else
9542 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9543
9544 /* Free the entry. */
9545 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9546 Assert(pVCpu->iem.s.cActiveMappings != 0);
9547 pVCpu->iem.s.cActiveMappings--;
9548}
9549
9550#endif /* IEM_WITH_SETJMP */
9551
9552#ifndef IN_RING3
9553/**
9554 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9555 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9556 *
9557 * Allows the instruction to be completed and retired, while the IEM user will
9558 * return to ring-3 immediately afterwards and do the postponed writes there.
9559 *
9560 * @returns VBox status code (no strict statuses). Caller must check
9561 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9563 * @param pvMem The mapping.
9564 * @param fAccess The kind of access.
9565 */
9566IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9567{
9568 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9569 AssertReturn(iMemMap >= 0, iMemMap);
9570
9571 /* If it's bounce buffered, we may need to write back the buffer. */
9572 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9573 {
9574 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9575 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9576 }
9577 /* Otherwise unlock it. */
9578 else
9579 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9580
9581 /* Free the entry. */
9582 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9583 Assert(pVCpu->iem.s.cActiveMappings != 0);
9584 pVCpu->iem.s.cActiveMappings--;
9585 return VINF_SUCCESS;
9586}
9587#endif
9588
9589
9590/**
9591 * Rollbacks mappings, releasing page locks and such.
9592 *
9593 * The caller shall only call this after checking cActiveMappings.
9594 *
9595 * @returns Strict VBox status code to pass up.
9596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9597 */
9598IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9599{
9600 Assert(pVCpu->iem.s.cActiveMappings > 0);
9601
9602 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9603 while (iMemMap-- > 0)
9604 {
9605 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9606 if (fAccess != IEM_ACCESS_INVALID)
9607 {
9608 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9609 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9610 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9611 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9612 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9613 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9614 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9615 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9616 pVCpu->iem.s.cActiveMappings--;
9617 }
9618 }
9619}
9620
9621
9622/**
9623 * Fetches a data byte.
9624 *
9625 * @returns Strict VBox status code.
9626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9627 * @param pu8Dst Where to return the byte.
9628 * @param iSegReg The index of the segment register to use for
9629 * this access. The base and limits are checked.
9630 * @param GCPtrMem The address of the guest memory.
9631 */
9632IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9633{
9634 /* The lazy approach for now... */
9635 uint8_t const *pu8Src;
9636 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9637 if (rc == VINF_SUCCESS)
9638 {
9639 *pu8Dst = *pu8Src;
9640 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9641 }
9642 return rc;
9643}
9644
9645
9646#ifdef IEM_WITH_SETJMP
9647/**
9648 * Fetches a data byte, longjmp on error.
9649 *
9650 * @returns The byte.
9651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9652 * @param iSegReg The index of the segment register to use for
9653 * this access. The base and limits are checked.
9654 * @param GCPtrMem The address of the guest memory.
9655 */
9656DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9657{
9658 /* The lazy approach for now... */
9659 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9660 uint8_t const bRet = *pu8Src;
9661 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9662 return bRet;
9663}
9664#endif /* IEM_WITH_SETJMP */
9665
9666
9667/**
9668 * Fetches a data word.
9669 *
9670 * @returns Strict VBox status code.
9671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9672 * @param pu16Dst Where to return the word.
9673 * @param iSegReg The index of the segment register to use for
9674 * this access. The base and limits are checked.
9675 * @param GCPtrMem The address of the guest memory.
9676 */
9677IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9678{
9679 /* The lazy approach for now... */
9680 uint16_t const *pu16Src;
9681 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9682 if (rc == VINF_SUCCESS)
9683 {
9684 *pu16Dst = *pu16Src;
9685 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9686 }
9687 return rc;
9688}
9689
9690
9691#ifdef IEM_WITH_SETJMP
9692/**
9693 * Fetches a data word, longjmp on error.
9694 *
9695 * @returns The word
9696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9697 * @param iSegReg The index of the segment register to use for
9698 * this access. The base and limits are checked.
9699 * @param GCPtrMem The address of the guest memory.
9700 */
9701DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9702{
9703 /* The lazy approach for now... */
9704 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9705 uint16_t const u16Ret = *pu16Src;
9706 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9707 return u16Ret;
9708}
9709#endif
9710
9711
9712/**
9713 * Fetches a data dword.
9714 *
9715 * @returns Strict VBox status code.
9716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9717 * @param pu32Dst Where to return the dword.
9718 * @param iSegReg The index of the segment register to use for
9719 * this access. The base and limits are checked.
9720 * @param GCPtrMem The address of the guest memory.
9721 */
9722IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9723{
9724 /* The lazy approach for now... */
9725 uint32_t const *pu32Src;
9726 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9727 if (rc == VINF_SUCCESS)
9728 {
9729 *pu32Dst = *pu32Src;
9730 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9731 }
9732 return rc;
9733}
9734
9735
9736/**
9737 * Fetches a data dword and zero extends it to a qword.
9738 *
9739 * @returns Strict VBox status code.
9740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9741 * @param pu64Dst Where to return the qword.
9742 * @param iSegReg The index of the segment register to use for
9743 * this access. The base and limits are checked.
9744 * @param GCPtrMem The address of the guest memory.
9745 */
9746IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9747{
9748 /* The lazy approach for now... */
9749 uint32_t const *pu32Src;
9750 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9751 if (rc == VINF_SUCCESS)
9752 {
9753 *pu64Dst = *pu32Src;
9754 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9755 }
9756 return rc;
9757}
9758
9759
9760#ifdef IEM_WITH_SETJMP
9761
9762IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9763{
9764 Assert(cbMem >= 1);
9765 Assert(iSegReg < X86_SREG_COUNT);
9766
9767 /*
9768 * 64-bit mode is simpler.
9769 */
9770 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9771 {
9772 if (iSegReg >= X86_SREG_FS)
9773 {
9774 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9775 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9776 GCPtrMem += pSel->u64Base;
9777 }
9778
9779 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9780 return GCPtrMem;
9781 }
9782 /*
9783 * 16-bit and 32-bit segmentation.
9784 */
9785 else
9786 {
9787 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9788 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9789 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9790 == X86DESCATTR_P /* data, expand up */
9791 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9792 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9793 {
9794 /* expand up */
9795 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9796 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9797 && GCPtrLast32 > (uint32_t)GCPtrMem))
9798 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9799 }
9800 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9801 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9802 {
9803 /* expand down */
9804 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9805 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9806 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9807 && GCPtrLast32 > (uint32_t)GCPtrMem))
9808 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9809 }
9810 else
9811 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9812 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9813 }
9814 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9815}
9816
9817
9818IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9819{
9820 Assert(cbMem >= 1);
9821 Assert(iSegReg < X86_SREG_COUNT);
9822
9823 /*
9824 * 64-bit mode is simpler.
9825 */
9826 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9827 {
9828 if (iSegReg >= X86_SREG_FS)
9829 {
9830 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9831 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9832 GCPtrMem += pSel->u64Base;
9833 }
9834
9835 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9836 return GCPtrMem;
9837 }
9838 /*
9839 * 16-bit and 32-bit segmentation.
9840 */
9841 else
9842 {
9843 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9844 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9845 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9846 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9847 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9848 {
9849 /* expand up */
9850 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9851 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9852 && GCPtrLast32 > (uint32_t)GCPtrMem))
9853 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9854 }
9855 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9856 {
9857 /* expand down */
9858 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9859 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9860 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9861 && GCPtrLast32 > (uint32_t)GCPtrMem))
9862 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9863 }
9864 else
9865 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9866 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9867 }
9868 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9869}
9870
9871
9872/**
9873 * Fetches a data dword, longjmp on error, fallback/safe version.
9874 *
9875 * @returns The dword
9876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9877 * @param iSegReg The index of the segment register to use for
9878 * this access. The base and limits are checked.
9879 * @param GCPtrMem The address of the guest memory.
9880 */
9881IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9882{
9883 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9884 uint32_t const u32Ret = *pu32Src;
9885 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9886 return u32Ret;
9887}
9888
9889
9890/**
9891 * Fetches a data dword, longjmp on error.
9892 *
9893 * @returns The dword
9894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9895 * @param iSegReg The index of the segment register to use for
9896 * this access. The base and limits are checked.
9897 * @param GCPtrMem The address of the guest memory.
9898 */
9899DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9900{
9901# ifdef IEM_WITH_DATA_TLB
9902 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9903 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9904 {
9905 /// @todo more later.
9906 }
9907
9908 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9909# else
9910 /* The lazy approach. */
9911 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9912 uint32_t const u32Ret = *pu32Src;
9913 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9914 return u32Ret;
9915# endif
9916}
9917#endif
9918
9919
9920#ifdef SOME_UNUSED_FUNCTION
9921/**
9922 * Fetches a data dword and sign extends it to a qword.
9923 *
9924 * @returns Strict VBox status code.
9925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9926 * @param pu64Dst Where to return the sign extended value.
9927 * @param iSegReg The index of the segment register to use for
9928 * this access. The base and limits are checked.
9929 * @param GCPtrMem The address of the guest memory.
9930 */
9931IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9932{
9933 /* The lazy approach for now... */
9934 int32_t const *pi32Src;
9935 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9936 if (rc == VINF_SUCCESS)
9937 {
9938 *pu64Dst = *pi32Src;
9939 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9940 }
9941#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9942 else
9943 *pu64Dst = 0;
9944#endif
9945 return rc;
9946}
9947#endif
9948
9949
9950/**
9951 * Fetches a data qword.
9952 *
9953 * @returns Strict VBox status code.
9954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9955 * @param pu64Dst Where to return the qword.
9956 * @param iSegReg The index of the segment register to use for
9957 * this access. The base and limits are checked.
9958 * @param GCPtrMem The address of the guest memory.
9959 */
9960IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9961{
9962 /* The lazy approach for now... */
9963 uint64_t const *pu64Src;
9964 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9965 if (rc == VINF_SUCCESS)
9966 {
9967 *pu64Dst = *pu64Src;
9968 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9969 }
9970 return rc;
9971}
9972
9973
9974#ifdef IEM_WITH_SETJMP
9975/**
9976 * Fetches a data qword, longjmp on error.
9977 *
9978 * @returns The qword.
9979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9980 * @param iSegReg The index of the segment register to use for
9981 * this access. The base and limits are checked.
9982 * @param GCPtrMem The address of the guest memory.
9983 */
9984DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9985{
9986 /* The lazy approach for now... */
9987 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9988 uint64_t const u64Ret = *pu64Src;
9989 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9990 return u64Ret;
9991}
9992#endif
9993
9994
9995/**
9996 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9997 *
9998 * @returns Strict VBox status code.
9999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10000 * @param pu64Dst Where to return the qword.
10001 * @param iSegReg The index of the segment register to use for
10002 * this access. The base and limits are checked.
10003 * @param GCPtrMem The address of the guest memory.
10004 */
10005IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10006{
10007 /* The lazy approach for now... */
10008 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
10009 if (RT_UNLIKELY(GCPtrMem & 15))
10010 return iemRaiseGeneralProtectionFault0(pVCpu);
10011
10012 uint64_t const *pu64Src;
10013 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10014 if (rc == VINF_SUCCESS)
10015 {
10016 *pu64Dst = *pu64Src;
10017 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
10018 }
10019 return rc;
10020}
10021
10022
10023#ifdef IEM_WITH_SETJMP
10024/**
10025 * Fetches a data qword, longjmp on error.
10026 *
10027 * @returns The qword.
10028 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10029 * @param iSegReg The index of the segment register to use for
10030 * this access. The base and limits are checked.
10031 * @param GCPtrMem The address of the guest memory.
10032 */
10033DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
10034{
10035 /* The lazy approach for now... */
10036 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
10037 if (RT_LIKELY(!(GCPtrMem & 15)))
10038 {
10039 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10040 uint64_t const u64Ret = *pu64Src;
10041 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
10042 return u64Ret;
10043 }
10044
10045 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
10046 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
10047}
10048#endif
10049
10050
10051/**
10052 * Fetches a data tword.
10053 *
10054 * @returns Strict VBox status code.
10055 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10056 * @param pr80Dst Where to return the tword.
10057 * @param iSegReg The index of the segment register to use for
10058 * this access. The base and limits are checked.
10059 * @param GCPtrMem The address of the guest memory.
10060 */
10061IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10062{
10063 /* The lazy approach for now... */
10064 PCRTFLOAT80U pr80Src;
10065 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10066 if (rc == VINF_SUCCESS)
10067 {
10068 *pr80Dst = *pr80Src;
10069 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
10070 }
10071 return rc;
10072}
10073
10074
10075#ifdef IEM_WITH_SETJMP
10076/**
10077 * Fetches a data tword, longjmp on error.
10078 *
10079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10080 * @param pr80Dst Where to return the tword.
10081 * @param iSegReg The index of the segment register to use for
10082 * this access. The base and limits are checked.
10083 * @param GCPtrMem The address of the guest memory.
10084 */
10085DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10086{
10087 /* The lazy approach for now... */
10088 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10089 *pr80Dst = *pr80Src;
10090 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
10091}
10092#endif
10093
10094
10095/**
10096 * Fetches a data dqword (double qword), generally SSE related.
10097 *
10098 * @returns Strict VBox status code.
10099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10100 * @param pu128Dst Where to return the qword.
10101 * @param iSegReg The index of the segment register to use for
10102 * this access. The base and limits are checked.
10103 * @param GCPtrMem The address of the guest memory.
10104 */
10105IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10106{
10107 /* The lazy approach for now... */
10108 PCRTUINT128U pu128Src;
10109 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10110 if (rc == VINF_SUCCESS)
10111 {
10112 pu128Dst->au64[0] = pu128Src->au64[0];
10113 pu128Dst->au64[1] = pu128Src->au64[1];
10114 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
10115 }
10116 return rc;
10117}
10118
10119
10120#ifdef IEM_WITH_SETJMP
10121/**
10122 * Fetches a data dqword (double qword), generally SSE related.
10123 *
10124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10125 * @param pu128Dst Where to return the qword.
10126 * @param iSegReg The index of the segment register to use for
10127 * this access. The base and limits are checked.
10128 * @param GCPtrMem The address of the guest memory.
10129 */
10130IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10131{
10132 /* The lazy approach for now... */
10133 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10134 pu128Dst->au64[0] = pu128Src->au64[0];
10135 pu128Dst->au64[1] = pu128Src->au64[1];
10136 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
10137}
10138#endif
10139
10140
10141/**
10142 * Fetches a data dqword (double qword) at an aligned address, generally SSE
10143 * related.
10144 *
10145 * Raises \#GP(0) if not aligned.
10146 *
10147 * @returns Strict VBox status code.
10148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10149 * @param pu128Dst Where to return the qword.
10150 * @param iSegReg The index of the segment register to use for
10151 * this access. The base and limits are checked.
10152 * @param GCPtrMem The address of the guest memory.
10153 */
10154IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10155{
10156 /* The lazy approach for now... */
10157 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
10158 if ( (GCPtrMem & 15)
10159 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10160 return iemRaiseGeneralProtectionFault0(pVCpu);
10161
10162 PCRTUINT128U pu128Src;
10163 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10164 if (rc == VINF_SUCCESS)
10165 {
10166 pu128Dst->au64[0] = pu128Src->au64[0];
10167 pu128Dst->au64[1] = pu128Src->au64[1];
10168 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
10169 }
10170 return rc;
10171}
10172
10173
10174#ifdef IEM_WITH_SETJMP
10175/**
10176 * Fetches a data dqword (double qword) at an aligned address, generally SSE
10177 * related, longjmp on error.
10178 *
10179 * Raises \#GP(0) if not aligned.
10180 *
10181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10182 * @param pu128Dst Where to return the qword.
10183 * @param iSegReg The index of the segment register to use for
10184 * this access. The base and limits are checked.
10185 * @param GCPtrMem The address of the guest memory.
10186 */
10187DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10188{
10189 /* The lazy approach for now... */
10190 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
10191 if ( (GCPtrMem & 15) == 0
10192 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10193 {
10194 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10195 pu128Dst->au64[0] = pu128Src->au64[0];
10196 pu128Dst->au64[1] = pu128Src->au64[1];
10197 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
10198 return;
10199 }
10200
10201 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10202 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10203}
10204#endif
10205
10206
10207/**
10208 * Fetches a data oword (octo word), generally AVX related.
10209 *
10210 * @returns Strict VBox status code.
10211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10212 * @param pu256Dst Where to return the qword.
10213 * @param iSegReg The index of the segment register to use for
10214 * this access. The base and limits are checked.
10215 * @param GCPtrMem The address of the guest memory.
10216 */
10217IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10218{
10219 /* The lazy approach for now... */
10220 PCRTUINT256U pu256Src;
10221 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10222 if (rc == VINF_SUCCESS)
10223 {
10224 pu256Dst->au64[0] = pu256Src->au64[0];
10225 pu256Dst->au64[1] = pu256Src->au64[1];
10226 pu256Dst->au64[2] = pu256Src->au64[2];
10227 pu256Dst->au64[3] = pu256Src->au64[3];
10228 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
10229 }
10230 return rc;
10231}
10232
10233
10234#ifdef IEM_WITH_SETJMP
10235/**
10236 * Fetches a data oword (octo word), generally AVX related.
10237 *
10238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10239 * @param pu256Dst Where to return the qword.
10240 * @param iSegReg The index of the segment register to use for
10241 * this access. The base and limits are checked.
10242 * @param GCPtrMem The address of the guest memory.
10243 */
10244IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10245{
10246 /* The lazy approach for now... */
10247 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10248 pu256Dst->au64[0] = pu256Src->au64[0];
10249 pu256Dst->au64[1] = pu256Src->au64[1];
10250 pu256Dst->au64[2] = pu256Src->au64[2];
10251 pu256Dst->au64[3] = pu256Src->au64[3];
10252 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
10253}
10254#endif
10255
10256
10257/**
10258 * Fetches a data oword (octo word) at an aligned address, generally AVX
10259 * related.
10260 *
10261 * Raises \#GP(0) if not aligned.
10262 *
10263 * @returns Strict VBox status code.
10264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10265 * @param pu256Dst Where to return the qword.
10266 * @param iSegReg The index of the segment register to use for
10267 * this access. The base and limits are checked.
10268 * @param GCPtrMem The address of the guest memory.
10269 */
10270IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10271{
10272 /* The lazy approach for now... */
10273 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
10274 if (GCPtrMem & 31)
10275 return iemRaiseGeneralProtectionFault0(pVCpu);
10276
10277 PCRTUINT256U pu256Src;
10278 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10279 if (rc == VINF_SUCCESS)
10280 {
10281 pu256Dst->au64[0] = pu256Src->au64[0];
10282 pu256Dst->au64[1] = pu256Src->au64[1];
10283 pu256Dst->au64[2] = pu256Src->au64[2];
10284 pu256Dst->au64[3] = pu256Src->au64[3];
10285 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
10286 }
10287 return rc;
10288}
10289
10290
10291#ifdef IEM_WITH_SETJMP
10292/**
10293 * Fetches a data oword (octo word) at an aligned address, generally AVX
10294 * related, longjmp on error.
10295 *
10296 * Raises \#GP(0) if not aligned.
10297 *
10298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10299 * @param pu256Dst Where to return the qword.
10300 * @param iSegReg The index of the segment register to use for
10301 * this access. The base and limits are checked.
10302 * @param GCPtrMem The address of the guest memory.
10303 */
10304DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10305{
10306 /* The lazy approach for now... */
10307 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
10308 if ((GCPtrMem & 31) == 0)
10309 {
10310 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
10311 pu256Dst->au64[0] = pu256Src->au64[0];
10312 pu256Dst->au64[1] = pu256Src->au64[1];
10313 pu256Dst->au64[2] = pu256Src->au64[2];
10314 pu256Dst->au64[3] = pu256Src->au64[3];
10315 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
10316 return;
10317 }
10318
10319 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10320 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10321}
10322#endif
10323
10324
10325
10326/**
10327 * Fetches a descriptor register (lgdt, lidt).
10328 *
10329 * @returns Strict VBox status code.
10330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10331 * @param pcbLimit Where to return the limit.
10332 * @param pGCPtrBase Where to return the base.
10333 * @param iSegReg The index of the segment register to use for
10334 * this access. The base and limits are checked.
10335 * @param GCPtrMem The address of the guest memory.
10336 * @param enmOpSize The effective operand size.
10337 */
10338IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
10339 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
10340{
10341 /*
10342 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
10343 * little special:
10344 * - The two reads are done separately.
10345 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
10346 * - We suspect the 386 to actually commit the limit before the base in
10347 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
10348 * don't try emulate this eccentric behavior, because it's not well
10349 * enough understood and rather hard to trigger.
10350 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
10351 */
10352 VBOXSTRICTRC rcStrict;
10353 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
10354 {
10355 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
10356 if (rcStrict == VINF_SUCCESS)
10357 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
10358 }
10359 else
10360 {
10361 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
10362 if (enmOpSize == IEMMODE_32BIT)
10363 {
10364 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
10365 {
10366 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
10367 if (rcStrict == VINF_SUCCESS)
10368 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
10369 }
10370 else
10371 {
10372 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
10373 if (rcStrict == VINF_SUCCESS)
10374 {
10375 *pcbLimit = (uint16_t)uTmp;
10376 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
10377 }
10378 }
10379 if (rcStrict == VINF_SUCCESS)
10380 *pGCPtrBase = uTmp;
10381 }
10382 else
10383 {
10384 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
10385 if (rcStrict == VINF_SUCCESS)
10386 {
10387 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
10388 if (rcStrict == VINF_SUCCESS)
10389 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
10390 }
10391 }
10392 }
10393 return rcStrict;
10394}
10395
10396
10397
10398/**
10399 * Stores a data byte.
10400 *
10401 * @returns Strict VBox status code.
10402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10403 * @param iSegReg The index of the segment register to use for
10404 * this access. The base and limits are checked.
10405 * @param GCPtrMem The address of the guest memory.
10406 * @param u8Value The value to store.
10407 */
10408IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
10409{
10410 /* The lazy approach for now... */
10411 uint8_t *pu8Dst;
10412 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10413 if (rc == VINF_SUCCESS)
10414 {
10415 *pu8Dst = u8Value;
10416 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
10417 }
10418 return rc;
10419}
10420
10421
10422#ifdef IEM_WITH_SETJMP
10423/**
10424 * Stores a data byte, longjmp on error.
10425 *
10426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10427 * @param iSegReg The index of the segment register to use for
10428 * this access. The base and limits are checked.
10429 * @param GCPtrMem The address of the guest memory.
10430 * @param u8Value The value to store.
10431 */
10432IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
10433{
10434 /* The lazy approach for now... */
10435 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10436 *pu8Dst = u8Value;
10437 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
10438}
10439#endif
10440
10441
10442/**
10443 * Stores a data word.
10444 *
10445 * @returns Strict VBox status code.
10446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10447 * @param iSegReg The index of the segment register to use for
10448 * this access. The base and limits are checked.
10449 * @param GCPtrMem The address of the guest memory.
10450 * @param u16Value The value to store.
10451 */
10452IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
10453{
10454 /* The lazy approach for now... */
10455 uint16_t *pu16Dst;
10456 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10457 if (rc == VINF_SUCCESS)
10458 {
10459 *pu16Dst = u16Value;
10460 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10461 }
10462 return rc;
10463}
10464
10465
10466#ifdef IEM_WITH_SETJMP
10467/**
10468 * Stores a data word, longjmp on error.
10469 *
10470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10471 * @param iSegReg The index of the segment register to use for
10472 * this access. The base and limits are checked.
10473 * @param GCPtrMem The address of the guest memory.
10474 * @param u16Value The value to store.
10475 */
10476IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
10477{
10478 /* The lazy approach for now... */
10479 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10480 *pu16Dst = u16Value;
10481 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10482}
10483#endif
10484
10485
10486/**
10487 * Stores a data dword.
10488 *
10489 * @returns Strict VBox status code.
10490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10491 * @param iSegReg The index of the segment register to use for
10492 * this access. The base and limits are checked.
10493 * @param GCPtrMem The address of the guest memory.
10494 * @param u32Value The value to store.
10495 */
10496IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10497{
10498 /* The lazy approach for now... */
10499 uint32_t *pu32Dst;
10500 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10501 if (rc == VINF_SUCCESS)
10502 {
10503 *pu32Dst = u32Value;
10504 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10505 }
10506 return rc;
10507}
10508
10509
10510#ifdef IEM_WITH_SETJMP
10511/**
10512 * Stores a data dword.
10513 *
10514 * @returns Strict VBox status code.
10515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10516 * @param iSegReg The index of the segment register to use for
10517 * this access. The base and limits are checked.
10518 * @param GCPtrMem The address of the guest memory.
10519 * @param u32Value The value to store.
10520 */
10521IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10522{
10523 /* The lazy approach for now... */
10524 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10525 *pu32Dst = u32Value;
10526 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10527}
10528#endif
10529
10530
10531/**
10532 * Stores a data qword.
10533 *
10534 * @returns Strict VBox status code.
10535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10536 * @param iSegReg The index of the segment register to use for
10537 * this access. The base and limits are checked.
10538 * @param GCPtrMem The address of the guest memory.
10539 * @param u64Value The value to store.
10540 */
10541IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10542{
10543 /* The lazy approach for now... */
10544 uint64_t *pu64Dst;
10545 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10546 if (rc == VINF_SUCCESS)
10547 {
10548 *pu64Dst = u64Value;
10549 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10550 }
10551 return rc;
10552}
10553
10554
10555#ifdef IEM_WITH_SETJMP
10556/**
10557 * Stores a data qword, longjmp on error.
10558 *
10559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10560 * @param iSegReg The index of the segment register to use for
10561 * this access. The base and limits are checked.
10562 * @param GCPtrMem The address of the guest memory.
10563 * @param u64Value The value to store.
10564 */
10565IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10566{
10567 /* The lazy approach for now... */
10568 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10569 *pu64Dst = u64Value;
10570 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10571}
10572#endif
10573
10574
10575/**
10576 * Stores a data dqword.
10577 *
10578 * @returns Strict VBox status code.
10579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10580 * @param iSegReg The index of the segment register to use for
10581 * this access. The base and limits are checked.
10582 * @param GCPtrMem The address of the guest memory.
10583 * @param u128Value The value to store.
10584 */
10585IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10586{
10587 /* The lazy approach for now... */
10588 PRTUINT128U pu128Dst;
10589 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10590 if (rc == VINF_SUCCESS)
10591 {
10592 pu128Dst->au64[0] = u128Value.au64[0];
10593 pu128Dst->au64[1] = u128Value.au64[1];
10594 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10595 }
10596 return rc;
10597}
10598
10599
10600#ifdef IEM_WITH_SETJMP
10601/**
10602 * Stores a data dqword, longjmp on error.
10603 *
10604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10605 * @param iSegReg The index of the segment register to use for
10606 * this access. The base and limits are checked.
10607 * @param GCPtrMem The address of the guest memory.
10608 * @param u128Value The value to store.
10609 */
10610IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10611{
10612 /* The lazy approach for now... */
10613 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10614 pu128Dst->au64[0] = u128Value.au64[0];
10615 pu128Dst->au64[1] = u128Value.au64[1];
10616 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10617}
10618#endif
10619
10620
10621/**
10622 * Stores a data dqword, SSE aligned.
10623 *
10624 * @returns Strict VBox status code.
10625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10626 * @param iSegReg The index of the segment register to use for
10627 * this access. The base and limits are checked.
10628 * @param GCPtrMem The address of the guest memory.
10629 * @param u128Value The value to store.
10630 */
10631IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10632{
10633 /* The lazy approach for now... */
10634 if ( (GCPtrMem & 15)
10635 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10636 return iemRaiseGeneralProtectionFault0(pVCpu);
10637
10638 PRTUINT128U pu128Dst;
10639 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10640 if (rc == VINF_SUCCESS)
10641 {
10642 pu128Dst->au64[0] = u128Value.au64[0];
10643 pu128Dst->au64[1] = u128Value.au64[1];
10644 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10645 }
10646 return rc;
10647}
10648
10649
10650#ifdef IEM_WITH_SETJMP
10651/**
10652 * Stores a data dqword, SSE aligned.
10653 *
10654 * @returns Strict VBox status code.
10655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10656 * @param iSegReg The index of the segment register to use for
10657 * this access. The base and limits are checked.
10658 * @param GCPtrMem The address of the guest memory.
10659 * @param u128Value The value to store.
10660 */
10661DECL_NO_INLINE(IEM_STATIC, void)
10662iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10663{
10664 /* The lazy approach for now... */
10665 if ( (GCPtrMem & 15) == 0
10666 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10667 {
10668 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10669 pu128Dst->au64[0] = u128Value.au64[0];
10670 pu128Dst->au64[1] = u128Value.au64[1];
10671 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10672 return;
10673 }
10674
10675 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10676 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10677}
10678#endif
10679
10680
10681/**
10682 * Stores a data dqword.
10683 *
10684 * @returns Strict VBox status code.
10685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10686 * @param iSegReg The index of the segment register to use for
10687 * this access. The base and limits are checked.
10688 * @param GCPtrMem The address of the guest memory.
10689 * @param pu256Value Pointer to the value to store.
10690 */
10691IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10692{
10693 /* The lazy approach for now... */
10694 PRTUINT256U pu256Dst;
10695 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10696 if (rc == VINF_SUCCESS)
10697 {
10698 pu256Dst->au64[0] = pu256Value->au64[0];
10699 pu256Dst->au64[1] = pu256Value->au64[1];
10700 pu256Dst->au64[2] = pu256Value->au64[2];
10701 pu256Dst->au64[3] = pu256Value->au64[3];
10702 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10703 }
10704 return rc;
10705}
10706
10707
10708#ifdef IEM_WITH_SETJMP
10709/**
10710 * Stores a data dqword, longjmp on error.
10711 *
10712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10713 * @param iSegReg The index of the segment register to use for
10714 * this access. The base and limits are checked.
10715 * @param GCPtrMem The address of the guest memory.
10716 * @param pu256Value Pointer to the value to store.
10717 */
10718IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10719{
10720 /* The lazy approach for now... */
10721 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10722 pu256Dst->au64[0] = pu256Value->au64[0];
10723 pu256Dst->au64[1] = pu256Value->au64[1];
10724 pu256Dst->au64[2] = pu256Value->au64[2];
10725 pu256Dst->au64[3] = pu256Value->au64[3];
10726 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10727}
10728#endif
10729
10730
10731/**
10732 * Stores a data dqword, AVX aligned.
10733 *
10734 * @returns Strict VBox status code.
10735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10736 * @param iSegReg The index of the segment register to use for
10737 * this access. The base and limits are checked.
10738 * @param GCPtrMem The address of the guest memory.
10739 * @param pu256Value Pointer to the value to store.
10740 */
10741IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10742{
10743 /* The lazy approach for now... */
10744 if (GCPtrMem & 31)
10745 return iemRaiseGeneralProtectionFault0(pVCpu);
10746
10747 PRTUINT256U pu256Dst;
10748 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10749 if (rc == VINF_SUCCESS)
10750 {
10751 pu256Dst->au64[0] = pu256Value->au64[0];
10752 pu256Dst->au64[1] = pu256Value->au64[1];
10753 pu256Dst->au64[2] = pu256Value->au64[2];
10754 pu256Dst->au64[3] = pu256Value->au64[3];
10755 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10756 }
10757 return rc;
10758}
10759
10760
10761#ifdef IEM_WITH_SETJMP
10762/**
10763 * Stores a data dqword, AVX aligned.
10764 *
10765 * @returns Strict VBox status code.
10766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10767 * @param iSegReg The index of the segment register to use for
10768 * this access. The base and limits are checked.
10769 * @param GCPtrMem The address of the guest memory.
10770 * @param pu256Value Pointer to the value to store.
10771 */
10772DECL_NO_INLINE(IEM_STATIC, void)
10773iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10774{
10775 /* The lazy approach for now... */
10776 if ((GCPtrMem & 31) == 0)
10777 {
10778 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10779 pu256Dst->au64[0] = pu256Value->au64[0];
10780 pu256Dst->au64[1] = pu256Value->au64[1];
10781 pu256Dst->au64[2] = pu256Value->au64[2];
10782 pu256Dst->au64[3] = pu256Value->au64[3];
10783 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10784 return;
10785 }
10786
10787 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10788 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10789}
10790#endif
10791
10792
10793/**
10794 * Stores a descriptor register (sgdt, sidt).
10795 *
10796 * @returns Strict VBox status code.
10797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10798 * @param cbLimit The limit.
10799 * @param GCPtrBase The base address.
10800 * @param iSegReg The index of the segment register to use for
10801 * this access. The base and limits are checked.
10802 * @param GCPtrMem The address of the guest memory.
10803 */
10804IEM_STATIC VBOXSTRICTRC
10805iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10806{
10807 /*
10808 * The SIDT and SGDT instructions actually stores the data using two
10809 * independent writes. The instructions does not respond to opsize prefixes.
10810 */
10811 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10812 if (rcStrict == VINF_SUCCESS)
10813 {
10814 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10815 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10816 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10817 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10818 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10819 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10820 else
10821 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10822 }
10823 return rcStrict;
10824}
10825
10826
10827/**
10828 * Pushes a word onto the stack.
10829 *
10830 * @returns Strict VBox status code.
10831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10832 * @param u16Value The value to push.
10833 */
10834IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10835{
10836 /* Increment the stack pointer. */
10837 uint64_t uNewRsp;
10838 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10839
10840 /* Write the word the lazy way. */
10841 uint16_t *pu16Dst;
10842 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10843 if (rc == VINF_SUCCESS)
10844 {
10845 *pu16Dst = u16Value;
10846 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10847 }
10848
10849 /* Commit the new RSP value unless we an access handler made trouble. */
10850 if (rc == VINF_SUCCESS)
10851 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10852
10853 return rc;
10854}
10855
10856
10857/**
10858 * Pushes a dword onto the stack.
10859 *
10860 * @returns Strict VBox status code.
10861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10862 * @param u32Value The value to push.
10863 */
10864IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10865{
10866 /* Increment the stack pointer. */
10867 uint64_t uNewRsp;
10868 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10869
10870 /* Write the dword the lazy way. */
10871 uint32_t *pu32Dst;
10872 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10873 if (rc == VINF_SUCCESS)
10874 {
10875 *pu32Dst = u32Value;
10876 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10877 }
10878
10879 /* Commit the new RSP value unless we an access handler made trouble. */
10880 if (rc == VINF_SUCCESS)
10881 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10882
10883 return rc;
10884}
10885
10886
10887/**
10888 * Pushes a dword segment register value onto the stack.
10889 *
10890 * @returns Strict VBox status code.
10891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10892 * @param u32Value The value to push.
10893 */
10894IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10895{
10896 /* Increment the stack pointer. */
10897 uint64_t uNewRsp;
10898 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10899
10900 /* The intel docs talks about zero extending the selector register
10901 value. My actual intel CPU here might be zero extending the value
10902 but it still only writes the lower word... */
10903 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10904 * happens when crossing an electric page boundrary, is the high word checked
10905 * for write accessibility or not? Probably it is. What about segment limits?
10906 * It appears this behavior is also shared with trap error codes.
10907 *
10908 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10909 * ancient hardware when it actually did change. */
10910 uint16_t *pu16Dst;
10911 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10912 if (rc == VINF_SUCCESS)
10913 {
10914 *pu16Dst = (uint16_t)u32Value;
10915 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10916 }
10917
10918 /* Commit the new RSP value unless we an access handler made trouble. */
10919 if (rc == VINF_SUCCESS)
10920 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10921
10922 return rc;
10923}
10924
10925
10926/**
10927 * Pushes a qword onto the stack.
10928 *
10929 * @returns Strict VBox status code.
10930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10931 * @param u64Value The value to push.
10932 */
10933IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10934{
10935 /* Increment the stack pointer. */
10936 uint64_t uNewRsp;
10937 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10938
10939 /* Write the word the lazy way. */
10940 uint64_t *pu64Dst;
10941 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10942 if (rc == VINF_SUCCESS)
10943 {
10944 *pu64Dst = u64Value;
10945 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10946 }
10947
10948 /* Commit the new RSP value unless we an access handler made trouble. */
10949 if (rc == VINF_SUCCESS)
10950 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10951
10952 return rc;
10953}
10954
10955
10956/**
10957 * Pops a word from the stack.
10958 *
10959 * @returns Strict VBox status code.
10960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10961 * @param pu16Value Where to store the popped value.
10962 */
10963IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10964{
10965 /* Increment the stack pointer. */
10966 uint64_t uNewRsp;
10967 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10968
10969 /* Write the word the lazy way. */
10970 uint16_t const *pu16Src;
10971 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10972 if (rc == VINF_SUCCESS)
10973 {
10974 *pu16Value = *pu16Src;
10975 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10976
10977 /* Commit the new RSP value. */
10978 if (rc == VINF_SUCCESS)
10979 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10980 }
10981
10982 return rc;
10983}
10984
10985
10986/**
10987 * Pops a dword from the stack.
10988 *
10989 * @returns Strict VBox status code.
10990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10991 * @param pu32Value Where to store the popped value.
10992 */
10993IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10994{
10995 /* Increment the stack pointer. */
10996 uint64_t uNewRsp;
10997 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10998
10999 /* Write the word the lazy way. */
11000 uint32_t const *pu32Src;
11001 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
11002 if (rc == VINF_SUCCESS)
11003 {
11004 *pu32Value = *pu32Src;
11005 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
11006
11007 /* Commit the new RSP value. */
11008 if (rc == VINF_SUCCESS)
11009 pVCpu->cpum.GstCtx.rsp = uNewRsp;
11010 }
11011
11012 return rc;
11013}
11014
11015
11016/**
11017 * Pops a qword from the stack.
11018 *
11019 * @returns Strict VBox status code.
11020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11021 * @param pu64Value Where to store the popped value.
11022 */
11023IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
11024{
11025 /* Increment the stack pointer. */
11026 uint64_t uNewRsp;
11027 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
11028
11029 /* Write the word the lazy way. */
11030 uint64_t const *pu64Src;
11031 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
11032 if (rc == VINF_SUCCESS)
11033 {
11034 *pu64Value = *pu64Src;
11035 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
11036
11037 /* Commit the new RSP value. */
11038 if (rc == VINF_SUCCESS)
11039 pVCpu->cpum.GstCtx.rsp = uNewRsp;
11040 }
11041
11042 return rc;
11043}
11044
11045
11046/**
11047 * Pushes a word onto the stack, using a temporary stack pointer.
11048 *
11049 * @returns Strict VBox status code.
11050 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11051 * @param u16Value The value to push.
11052 * @param pTmpRsp Pointer to the temporary stack pointer.
11053 */
11054IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
11055{
11056 /* Increment the stack pointer. */
11057 RTUINT64U NewRsp = *pTmpRsp;
11058 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
11059
11060 /* Write the word the lazy way. */
11061 uint16_t *pu16Dst;
11062 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
11063 if (rc == VINF_SUCCESS)
11064 {
11065 *pu16Dst = u16Value;
11066 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
11067 }
11068
11069 /* Commit the new RSP value unless we an access handler made trouble. */
11070 if (rc == VINF_SUCCESS)
11071 *pTmpRsp = NewRsp;
11072
11073 return rc;
11074}
11075
11076
11077/**
11078 * Pushes a dword onto the stack, using a temporary stack pointer.
11079 *
11080 * @returns Strict VBox status code.
11081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11082 * @param u32Value The value to push.
11083 * @param pTmpRsp Pointer to the temporary stack pointer.
11084 */
11085IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
11086{
11087 /* Increment the stack pointer. */
11088 RTUINT64U NewRsp = *pTmpRsp;
11089 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
11090
11091 /* Write the word the lazy way. */
11092 uint32_t *pu32Dst;
11093 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
11094 if (rc == VINF_SUCCESS)
11095 {
11096 *pu32Dst = u32Value;
11097 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
11098 }
11099
11100 /* Commit the new RSP value unless we an access handler made trouble. */
11101 if (rc == VINF_SUCCESS)
11102 *pTmpRsp = NewRsp;
11103
11104 return rc;
11105}
11106
11107
11108/**
11109 * Pushes a dword onto the stack, using a temporary stack pointer.
11110 *
11111 * @returns Strict VBox status code.
11112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11113 * @param u64Value The value to push.
11114 * @param pTmpRsp Pointer to the temporary stack pointer.
11115 */
11116IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
11117{
11118 /* Increment the stack pointer. */
11119 RTUINT64U NewRsp = *pTmpRsp;
11120 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
11121
11122 /* Write the word the lazy way. */
11123 uint64_t *pu64Dst;
11124 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
11125 if (rc == VINF_SUCCESS)
11126 {
11127 *pu64Dst = u64Value;
11128 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
11129 }
11130
11131 /* Commit the new RSP value unless we an access handler made trouble. */
11132 if (rc == VINF_SUCCESS)
11133 *pTmpRsp = NewRsp;
11134
11135 return rc;
11136}
11137
11138
11139/**
11140 * Pops a word from the stack, using a temporary stack pointer.
11141 *
11142 * @returns Strict VBox status code.
11143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11144 * @param pu16Value Where to store the popped value.
11145 * @param pTmpRsp Pointer to the temporary stack pointer.
11146 */
11147IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
11148{
11149 /* Increment the stack pointer. */
11150 RTUINT64U NewRsp = *pTmpRsp;
11151 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
11152
11153 /* Write the word the lazy way. */
11154 uint16_t const *pu16Src;
11155 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
11156 if (rc == VINF_SUCCESS)
11157 {
11158 *pu16Value = *pu16Src;
11159 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
11160
11161 /* Commit the new RSP value. */
11162 if (rc == VINF_SUCCESS)
11163 *pTmpRsp = NewRsp;
11164 }
11165
11166 return rc;
11167}
11168
11169
11170/**
11171 * Pops a dword from the stack, using a temporary stack pointer.
11172 *
11173 * @returns Strict VBox status code.
11174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11175 * @param pu32Value Where to store the popped value.
11176 * @param pTmpRsp Pointer to the temporary stack pointer.
11177 */
11178IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
11179{
11180 /* Increment the stack pointer. */
11181 RTUINT64U NewRsp = *pTmpRsp;
11182 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
11183
11184 /* Write the word the lazy way. */
11185 uint32_t const *pu32Src;
11186 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
11187 if (rc == VINF_SUCCESS)
11188 {
11189 *pu32Value = *pu32Src;
11190 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
11191
11192 /* Commit the new RSP value. */
11193 if (rc == VINF_SUCCESS)
11194 *pTmpRsp = NewRsp;
11195 }
11196
11197 return rc;
11198}
11199
11200
11201/**
11202 * Pops a qword from the stack, using a temporary stack pointer.
11203 *
11204 * @returns Strict VBox status code.
11205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11206 * @param pu64Value Where to store the popped value.
11207 * @param pTmpRsp Pointer to the temporary stack pointer.
11208 */
11209IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
11210{
11211 /* Increment the stack pointer. */
11212 RTUINT64U NewRsp = *pTmpRsp;
11213 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
11214
11215 /* Write the word the lazy way. */
11216 uint64_t const *pu64Src;
11217 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
11218 if (rcStrict == VINF_SUCCESS)
11219 {
11220 *pu64Value = *pu64Src;
11221 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
11222
11223 /* Commit the new RSP value. */
11224 if (rcStrict == VINF_SUCCESS)
11225 *pTmpRsp = NewRsp;
11226 }
11227
11228 return rcStrict;
11229}
11230
11231
11232/**
11233 * Begin a special stack push (used by interrupt, exceptions and such).
11234 *
11235 * This will raise \#SS or \#PF if appropriate.
11236 *
11237 * @returns Strict VBox status code.
11238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11239 * @param cbMem The number of bytes to push onto the stack.
11240 * @param ppvMem Where to return the pointer to the stack memory.
11241 * As with the other memory functions this could be
11242 * direct access or bounce buffered access, so
11243 * don't commit register until the commit call
11244 * succeeds.
11245 * @param puNewRsp Where to return the new RSP value. This must be
11246 * passed unchanged to
11247 * iemMemStackPushCommitSpecial().
11248 */
11249IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
11250{
11251 Assert(cbMem < UINT8_MAX);
11252 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
11253 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
11254}
11255
11256
11257/**
11258 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
11259 *
11260 * This will update the rSP.
11261 *
11262 * @returns Strict VBox status code.
11263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11264 * @param pvMem The pointer returned by
11265 * iemMemStackPushBeginSpecial().
11266 * @param uNewRsp The new RSP value returned by
11267 * iemMemStackPushBeginSpecial().
11268 */
11269IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
11270{
11271 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
11272 if (rcStrict == VINF_SUCCESS)
11273 pVCpu->cpum.GstCtx.rsp = uNewRsp;
11274 return rcStrict;
11275}
11276
11277
11278/**
11279 * Begin a special stack pop (used by iret, retf and such).
11280 *
11281 * This will raise \#SS or \#PF if appropriate.
11282 *
11283 * @returns Strict VBox status code.
11284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11285 * @param cbMem The number of bytes to pop from the stack.
11286 * @param ppvMem Where to return the pointer to the stack memory.
11287 * @param puNewRsp Where to return the new RSP value. This must be
11288 * assigned to CPUMCTX::rsp manually some time
11289 * after iemMemStackPopDoneSpecial() has been
11290 * called.
11291 */
11292IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
11293{
11294 Assert(cbMem < UINT8_MAX);
11295 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
11296 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
11297}
11298
11299
11300/**
11301 * Continue a special stack pop (used by iret and retf).
11302 *
11303 * This will raise \#SS or \#PF if appropriate.
11304 *
11305 * @returns Strict VBox status code.
11306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11307 * @param cbMem The number of bytes to pop from the stack.
11308 * @param ppvMem Where to return the pointer to the stack memory.
11309 * @param puNewRsp Where to return the new RSP value. This must be
11310 * assigned to CPUMCTX::rsp manually some time
11311 * after iemMemStackPopDoneSpecial() has been
11312 * called.
11313 */
11314IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
11315{
11316 Assert(cbMem < UINT8_MAX);
11317 RTUINT64U NewRsp;
11318 NewRsp.u = *puNewRsp;
11319 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
11320 *puNewRsp = NewRsp.u;
11321 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
11322}
11323
11324
11325/**
11326 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
11327 * iemMemStackPopContinueSpecial).
11328 *
11329 * The caller will manually commit the rSP.
11330 *
11331 * @returns Strict VBox status code.
11332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11333 * @param pvMem The pointer returned by
11334 * iemMemStackPopBeginSpecial() or
11335 * iemMemStackPopContinueSpecial().
11336 */
11337IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
11338{
11339 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
11340}
11341
11342
11343/**
11344 * Fetches a system table byte.
11345 *
11346 * @returns Strict VBox status code.
11347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11348 * @param pbDst Where to return the byte.
11349 * @param iSegReg The index of the segment register to use for
11350 * this access. The base and limits are checked.
11351 * @param GCPtrMem The address of the guest memory.
11352 */
11353IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
11354{
11355 /* The lazy approach for now... */
11356 uint8_t const *pbSrc;
11357 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
11358 if (rc == VINF_SUCCESS)
11359 {
11360 *pbDst = *pbSrc;
11361 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
11362 }
11363 return rc;
11364}
11365
11366
11367/**
11368 * Fetches a system table word.
11369 *
11370 * @returns Strict VBox status code.
11371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11372 * @param pu16Dst Where to return the word.
11373 * @param iSegReg The index of the segment register to use for
11374 * this access. The base and limits are checked.
11375 * @param GCPtrMem The address of the guest memory.
11376 */
11377IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
11378{
11379 /* The lazy approach for now... */
11380 uint16_t const *pu16Src;
11381 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
11382 if (rc == VINF_SUCCESS)
11383 {
11384 *pu16Dst = *pu16Src;
11385 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
11386 }
11387 return rc;
11388}
11389
11390
11391/**
11392 * Fetches a system table dword.
11393 *
11394 * @returns Strict VBox status code.
11395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11396 * @param pu32Dst Where to return the dword.
11397 * @param iSegReg The index of the segment register to use for
11398 * this access. The base and limits are checked.
11399 * @param GCPtrMem The address of the guest memory.
11400 */
11401IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
11402{
11403 /* The lazy approach for now... */
11404 uint32_t const *pu32Src;
11405 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
11406 if (rc == VINF_SUCCESS)
11407 {
11408 *pu32Dst = *pu32Src;
11409 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
11410 }
11411 return rc;
11412}
11413
11414
11415/**
11416 * Fetches a system table qword.
11417 *
11418 * @returns Strict VBox status code.
11419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11420 * @param pu64Dst Where to return the qword.
11421 * @param iSegReg The index of the segment register to use for
11422 * this access. The base and limits are checked.
11423 * @param GCPtrMem The address of the guest memory.
11424 */
11425IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
11426{
11427 /* The lazy approach for now... */
11428 uint64_t const *pu64Src;
11429 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
11430 if (rc == VINF_SUCCESS)
11431 {
11432 *pu64Dst = *pu64Src;
11433 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
11434 }
11435 return rc;
11436}
11437
11438
11439/**
11440 * Fetches a descriptor table entry with caller specified error code.
11441 *
11442 * @returns Strict VBox status code.
11443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11444 * @param pDesc Where to return the descriptor table entry.
11445 * @param uSel The selector which table entry to fetch.
11446 * @param uXcpt The exception to raise on table lookup error.
11447 * @param uErrorCode The error code associated with the exception.
11448 */
11449IEM_STATIC VBOXSTRICTRC
11450iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
11451{
11452 AssertPtr(pDesc);
11453 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
11454
11455 /** @todo did the 286 require all 8 bytes to be accessible? */
11456 /*
11457 * Get the selector table base and check bounds.
11458 */
11459 RTGCPTR GCPtrBase;
11460 if (uSel & X86_SEL_LDT)
11461 {
11462 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
11463 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
11464 {
11465 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
11466 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
11467 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11468 uErrorCode, 0);
11469 }
11470
11471 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
11472 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
11473 }
11474 else
11475 {
11476 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
11477 {
11478 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
11479 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11480 uErrorCode, 0);
11481 }
11482 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
11483 }
11484
11485 /*
11486 * Read the legacy descriptor and maybe the long mode extensions if
11487 * required.
11488 */
11489 VBOXSTRICTRC rcStrict;
11490 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11491 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11492 else
11493 {
11494 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11495 if (rcStrict == VINF_SUCCESS)
11496 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11497 if (rcStrict == VINF_SUCCESS)
11498 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11499 if (rcStrict == VINF_SUCCESS)
11500 pDesc->Legacy.au16[3] = 0;
11501 else
11502 return rcStrict;
11503 }
11504
11505 if (rcStrict == VINF_SUCCESS)
11506 {
11507 if ( !IEM_IS_LONG_MODE(pVCpu)
11508 || pDesc->Legacy.Gen.u1DescType)
11509 pDesc->Long.au64[1] = 0;
11510 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11511 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11512 else
11513 {
11514 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11515 /** @todo is this the right exception? */
11516 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11517 }
11518 }
11519 return rcStrict;
11520}
11521
11522
11523/**
11524 * Fetches a descriptor table entry.
11525 *
11526 * @returns Strict VBox status code.
11527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11528 * @param pDesc Where to return the descriptor table entry.
11529 * @param uSel The selector which table entry to fetch.
11530 * @param uXcpt The exception to raise on table lookup error.
11531 */
11532IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11533{
11534 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11535}
11536
11537
11538/**
11539 * Fakes a long mode stack selector for SS = 0.
11540 *
11541 * @param pDescSs Where to return the fake stack descriptor.
11542 * @param uDpl The DPL we want.
11543 */
11544IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11545{
11546 pDescSs->Long.au64[0] = 0;
11547 pDescSs->Long.au64[1] = 0;
11548 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11549 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11550 pDescSs->Long.Gen.u2Dpl = uDpl;
11551 pDescSs->Long.Gen.u1Present = 1;
11552 pDescSs->Long.Gen.u1Long = 1;
11553}
11554
11555
11556/**
11557 * Marks the selector descriptor as accessed (only non-system descriptors).
11558 *
11559 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11560 * will therefore skip the limit checks.
11561 *
11562 * @returns Strict VBox status code.
11563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11564 * @param uSel The selector.
11565 */
11566IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
11567{
11568 /*
11569 * Get the selector table base and calculate the entry address.
11570 */
11571 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11572 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11573 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11574 GCPtr += uSel & X86_SEL_MASK;
11575
11576 /*
11577 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11578 * ugly stuff to avoid this. This will make sure it's an atomic access
11579 * as well more or less remove any question about 8-bit or 32-bit accesss.
11580 */
11581 VBOXSTRICTRC rcStrict;
11582 uint32_t volatile *pu32;
11583 if ((GCPtr & 3) == 0)
11584 {
11585 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11586 GCPtr += 2 + 2;
11587 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11588 if (rcStrict != VINF_SUCCESS)
11589 return rcStrict;
11590 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11591 }
11592 else
11593 {
11594 /* The misaligned GDT/LDT case, map the whole thing. */
11595 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11596 if (rcStrict != VINF_SUCCESS)
11597 return rcStrict;
11598 switch ((uintptr_t)pu32 & 3)
11599 {
11600 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11601 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11602 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11603 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11604 }
11605 }
11606
11607 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11608}
11609
11610/** @} */
11611
11612
11613/*
11614 * Include the C/C++ implementation of instruction.
11615 */
11616#include "IEMAllCImpl.cpp.h"
11617
11618
11619
11620/** @name "Microcode" macros.
11621 *
11622 * The idea is that we should be able to use the same code to interpret
11623 * instructions as well as recompiler instructions. Thus this obfuscation.
11624 *
11625 * @{
11626 */
11627#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11628#define IEM_MC_END() }
11629#define IEM_MC_PAUSE() do {} while (0)
11630#define IEM_MC_CONTINUE() do {} while (0)
11631
11632/** Internal macro. */
11633#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11634 do \
11635 { \
11636 VBOXSTRICTRC rcStrict2 = a_Expr; \
11637 if (rcStrict2 != VINF_SUCCESS) \
11638 return rcStrict2; \
11639 } while (0)
11640
11641
11642#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11643#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11644#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11645#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11646#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11647#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11648#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11649#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11650#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11651 do { \
11652 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11653 return iemRaiseDeviceNotAvailable(pVCpu); \
11654 } while (0)
11655#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11656 do { \
11657 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11658 return iemRaiseDeviceNotAvailable(pVCpu); \
11659 } while (0)
11660#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11661 do { \
11662 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
11663 return iemRaiseMathFault(pVCpu); \
11664 } while (0)
11665#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11666 do { \
11667 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11668 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11669 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11670 return iemRaiseUndefinedOpcode(pVCpu); \
11671 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11672 return iemRaiseDeviceNotAvailable(pVCpu); \
11673 } while (0)
11674#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11675 do { \
11676 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11677 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11678 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11679 return iemRaiseUndefinedOpcode(pVCpu); \
11680 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11681 return iemRaiseDeviceNotAvailable(pVCpu); \
11682 } while (0)
11683#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11684 do { \
11685 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11686 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11687 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11688 return iemRaiseUndefinedOpcode(pVCpu); \
11689 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11690 return iemRaiseDeviceNotAvailable(pVCpu); \
11691 } while (0)
11692#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11693 do { \
11694 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11695 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11696 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11697 return iemRaiseUndefinedOpcode(pVCpu); \
11698 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11699 return iemRaiseDeviceNotAvailable(pVCpu); \
11700 } while (0)
11701#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11702 do { \
11703 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11704 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11705 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11706 return iemRaiseUndefinedOpcode(pVCpu); \
11707 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11708 return iemRaiseDeviceNotAvailable(pVCpu); \
11709 } while (0)
11710#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11711 do { \
11712 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11713 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11714 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11715 return iemRaiseUndefinedOpcode(pVCpu); \
11716 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11717 return iemRaiseDeviceNotAvailable(pVCpu); \
11718 } while (0)
11719#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11720 do { \
11721 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11722 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11723 return iemRaiseUndefinedOpcode(pVCpu); \
11724 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11725 return iemRaiseDeviceNotAvailable(pVCpu); \
11726 } while (0)
11727#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11728 do { \
11729 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11730 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11731 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11732 return iemRaiseUndefinedOpcode(pVCpu); \
11733 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11734 return iemRaiseDeviceNotAvailable(pVCpu); \
11735 } while (0)
11736#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11737 do { \
11738 if (pVCpu->iem.s.uCpl != 0) \
11739 return iemRaiseGeneralProtectionFault0(pVCpu); \
11740 } while (0)
11741#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11742 do { \
11743 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11744 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11745 } while (0)
11746#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11747 do { \
11748 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11749 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11750 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11751 return iemRaiseUndefinedOpcode(pVCpu); \
11752 } while (0)
11753#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11754 do { \
11755 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11756 return iemRaiseGeneralProtectionFault0(pVCpu); \
11757 } while (0)
11758
11759
11760#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11761#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11762#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11763#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11764#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11765#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11766#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11767 uint32_t a_Name; \
11768 uint32_t *a_pName = &a_Name
11769#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11770 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11771
11772#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11773#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11774
11775#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11776#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11777#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11778#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11779#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11780#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11781#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11782#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11783#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11784#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11785#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11786#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11787#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11788#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11789#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11790#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11791#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11792#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11793 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11794 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11795 } while (0)
11796#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11797 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11798 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11799 } while (0)
11800#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11801 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11802 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11803 } while (0)
11804/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11805#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11806 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11807 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11808 } while (0)
11809#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11810 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11811 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11812 } while (0)
11813/** @note Not for IOPL or IF testing or modification. */
11814#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11815#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11816#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
11817#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
11818
11819#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11820#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11821#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11822#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11823#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11824#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11825#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11826#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11827#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11828#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11829/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11830#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11831 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11832 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11833 } while (0)
11834#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11835 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11836 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11837 } while (0)
11838#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11839 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11840
11841
11842#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11843#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11844/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11845 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11846#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11847#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11848/** @note Not for IOPL or IF testing or modification. */
11849#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11850
11851#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11852#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11853#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11854 do { \
11855 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11856 *pu32Reg += (a_u32Value); \
11857 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11858 } while (0)
11859#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11860
11861#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11862#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11863#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11864 do { \
11865 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11866 *pu32Reg -= (a_u32Value); \
11867 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11868 } while (0)
11869#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11870#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11871
11872#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11873#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11874#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11875#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11876#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11877#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11878#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11879
11880#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11881#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11882#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11883#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11884
11885#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11886#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11887#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11888
11889#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11890#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11891#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11892
11893#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11894#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11895#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11896
11897#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11898#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11899#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11900
11901#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11902
11903#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11904
11905#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11906#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11907#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11908 do { \
11909 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11910 *pu32Reg &= (a_u32Value); \
11911 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11912 } while (0)
11913#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11914
11915#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11916#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11917#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11918 do { \
11919 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11920 *pu32Reg |= (a_u32Value); \
11921 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11922 } while (0)
11923#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11924
11925
11926/** @note Not for IOPL or IF modification. */
11927#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11928/** @note Not for IOPL or IF modification. */
11929#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11930/** @note Not for IOPL or IF modification. */
11931#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11932
11933#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11934
11935/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11936#define IEM_MC_FPU_TO_MMX_MODE() do { \
11937 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
11938 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
11939 } while (0)
11940
11941/** Switches the FPU state from MMX mode (FTW=0xffff). */
11942#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11943 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
11944 } while (0)
11945
11946#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11947 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
11948#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11949 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11950#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11951 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11952 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11953 } while (0)
11954#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11955 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11956 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11957 } while (0)
11958#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11959 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11960#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11961 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11962#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11963 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11964
11965#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11966 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
11967 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
11968 } while (0)
11969#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11970 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11971#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11972 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11973#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11974 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11975#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11976 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11977 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11978 } while (0)
11979#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11980 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11981#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11982 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11983 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11984 } while (0)
11985#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11986 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11987#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11988 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11989 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11990 } while (0)
11991#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11992 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11993#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11994 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11995#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11996 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11997#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11998 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
11999#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
12000 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
12001 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
12002 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
12003 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
12004 } while (0)
12005
12006#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
12007 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
12008 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
12009 } while (0)
12010#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
12011 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
12012 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
12013 } while (0)
12014#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
12015 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
12016 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
12017 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
12018 } while (0)
12019#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
12020 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
12021 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
12022 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
12023 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
12024 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
12025 } while (0)
12026
12027#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
12028#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
12029 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
12030 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
12031 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
12032 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
12033 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
12034 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
12035 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
12036 } while (0)
12037#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
12038 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
12039 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
12040 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
12041 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
12042 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
12043 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
12044 } while (0)
12045#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
12046 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
12047 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
12048 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
12049 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
12050 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
12051 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
12052 } while (0)
12053#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
12054 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
12055 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
12056 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
12057 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
12058 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
12059 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
12060 } while (0)
12061
12062#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
12063 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
12064#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
12065 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
12066#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
12067 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
12068#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
12069 do { uintptr_t const iYRegTmp = (a_iYReg); \
12070 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
12071 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
12072 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
12073 } while (0)
12074
12075#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
12076 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
12077 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
12078 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
12079 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
12080 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
12081 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
12082 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
12083 } while (0)
12084#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
12085 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
12086 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
12087 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
12088 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
12089 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
12090 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
12091 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
12092 } while (0)
12093#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
12094 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
12095 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
12096 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
12097 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
12098 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
12099 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
12100 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
12101 } while (0)
12102
12103#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
12104 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
12105 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
12106 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
12107 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
12108 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
12109 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
12110 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
12111 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
12112 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
12113 } while (0)
12114#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
12115 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
12116 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
12117 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
12118 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
12119 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
12120 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
12121 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
12122 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
12123 } while (0)
12124#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
12125 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
12126 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
12127 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
12128 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
12129 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
12130 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
12131 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
12132 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
12133 } while (0)
12134#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
12135 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
12136 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
12137 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
12138 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
12139 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
12140 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
12141 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
12142 } while (0)
12143
12144#ifndef IEM_WITH_SETJMP
12145# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
12146 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
12147# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
12148 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
12149# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
12150 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
12151#else
12152# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
12153 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12154# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
12155 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
12156# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
12157 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
12158#endif
12159
12160#ifndef IEM_WITH_SETJMP
12161# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
12162 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
12163# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
12164 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
12165# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
12166 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
12167#else
12168# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
12169 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12170# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
12171 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
12172# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
12173 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12174#endif
12175
12176#ifndef IEM_WITH_SETJMP
12177# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12178 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
12179# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
12180 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
12181# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
12182 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
12183#else
12184# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12185 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12186# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
12187 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
12188# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
12189 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12190#endif
12191
12192#ifdef SOME_UNUSED_FUNCTION
12193# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12194 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
12195#endif
12196
12197#ifndef IEM_WITH_SETJMP
12198# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12199 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
12200# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
12201 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
12202# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
12203 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
12204# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
12205 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
12206#else
12207# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12208 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12209# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
12210 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
12211# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
12212 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12213# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
12214 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12215#endif
12216
12217#ifndef IEM_WITH_SETJMP
12218# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
12219 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
12220# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
12221 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
12222# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
12223 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
12224#else
12225# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
12226 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12227# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
12228 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12229# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
12230 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
12231#endif
12232
12233#ifndef IEM_WITH_SETJMP
12234# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
12235 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
12236# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
12237 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
12238#else
12239# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
12240 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
12241# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
12242 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
12243#endif
12244
12245#ifndef IEM_WITH_SETJMP
12246# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
12247 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
12248# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
12249 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
12250#else
12251# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
12252 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
12253# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
12254 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
12255#endif
12256
12257
12258
12259#ifndef IEM_WITH_SETJMP
12260# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
12261 do { \
12262 uint8_t u8Tmp; \
12263 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12264 (a_u16Dst) = u8Tmp; \
12265 } while (0)
12266# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12267 do { \
12268 uint8_t u8Tmp; \
12269 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12270 (a_u32Dst) = u8Tmp; \
12271 } while (0)
12272# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12273 do { \
12274 uint8_t u8Tmp; \
12275 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12276 (a_u64Dst) = u8Tmp; \
12277 } while (0)
12278# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12279 do { \
12280 uint16_t u16Tmp; \
12281 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
12282 (a_u32Dst) = u16Tmp; \
12283 } while (0)
12284# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12285 do { \
12286 uint16_t u16Tmp; \
12287 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
12288 (a_u64Dst) = u16Tmp; \
12289 } while (0)
12290# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12291 do { \
12292 uint32_t u32Tmp; \
12293 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
12294 (a_u64Dst) = u32Tmp; \
12295 } while (0)
12296#else /* IEM_WITH_SETJMP */
12297# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
12298 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12299# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12300 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12301# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12302 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12303# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12304 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12305# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12306 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12307# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12308 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12309#endif /* IEM_WITH_SETJMP */
12310
12311#ifndef IEM_WITH_SETJMP
12312# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
12313 do { \
12314 uint8_t u8Tmp; \
12315 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12316 (a_u16Dst) = (int8_t)u8Tmp; \
12317 } while (0)
12318# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12319 do { \
12320 uint8_t u8Tmp; \
12321 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12322 (a_u32Dst) = (int8_t)u8Tmp; \
12323 } while (0)
12324# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12325 do { \
12326 uint8_t u8Tmp; \
12327 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
12328 (a_u64Dst) = (int8_t)u8Tmp; \
12329 } while (0)
12330# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12331 do { \
12332 uint16_t u16Tmp; \
12333 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
12334 (a_u32Dst) = (int16_t)u16Tmp; \
12335 } while (0)
12336# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12337 do { \
12338 uint16_t u16Tmp; \
12339 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
12340 (a_u64Dst) = (int16_t)u16Tmp; \
12341 } while (0)
12342# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12343 do { \
12344 uint32_t u32Tmp; \
12345 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
12346 (a_u64Dst) = (int32_t)u32Tmp; \
12347 } while (0)
12348#else /* IEM_WITH_SETJMP */
12349# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
12350 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12351# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12352 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12353# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12354 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12355# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
12356 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12357# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12358 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12359# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
12360 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
12361#endif /* IEM_WITH_SETJMP */
12362
12363#ifndef IEM_WITH_SETJMP
12364# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
12365 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
12366# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
12367 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
12368# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
12369 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
12370# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
12371 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
12372#else
12373# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
12374 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
12375# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
12376 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
12377# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
12378 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
12379# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
12380 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
12381#endif
12382
12383#ifndef IEM_WITH_SETJMP
12384# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
12385 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
12386# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
12387 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
12388# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
12389 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
12390# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
12391 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
12392#else
12393# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
12394 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
12395# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
12396 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
12397# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
12398 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
12399# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
12400 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
12401#endif
12402
12403#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
12404#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
12405#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
12406#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
12407#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
12408#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
12409#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
12410 do { \
12411 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
12412 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
12413 } while (0)
12414#define IEM_MC_STORE_MEM_INDEF_D80_BY_REF(a_pd80Dst) \
12415 do { \
12416 (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
12417 (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
12418 } while (0)
12419
12420#ifndef IEM_WITH_SETJMP
12421# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
12422 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
12423# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
12424 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
12425#else
12426# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
12427 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
12428# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
12429 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
12430#endif
12431
12432#ifndef IEM_WITH_SETJMP
12433# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
12434 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
12435# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
12436 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
12437#else
12438# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
12439 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
12440# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
12441 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
12442#endif
12443
12444
12445#define IEM_MC_PUSH_U16(a_u16Value) \
12446 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
12447#define IEM_MC_PUSH_U32(a_u32Value) \
12448 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
12449#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
12450 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
12451#define IEM_MC_PUSH_U64(a_u64Value) \
12452 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
12453
12454#define IEM_MC_POP_U16(a_pu16Value) \
12455 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
12456#define IEM_MC_POP_U32(a_pu32Value) \
12457 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
12458#define IEM_MC_POP_U64(a_pu64Value) \
12459 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
12460
12461/** Maps guest memory for direct or bounce buffered access.
12462 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12463 * @remarks May return.
12464 */
12465#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
12466 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12467
12468/** Maps guest memory for direct or bounce buffered access.
12469 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12470 * @remarks May return.
12471 */
12472#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12473 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12474
12475/** Commits the memory and unmaps the guest memory.
12476 * @remarks May return.
12477 */
12478#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12479 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12480
12481/** Commits the memory and unmaps the guest memory unless the FPU status word
12482 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12483 * that would cause FLD not to store.
12484 *
12485 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12486 * store, while \#P will not.
12487 *
12488 * @remarks May in theory return - for now.
12489 */
12490#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12491 do { \
12492 if ( !(a_u16FSW & X86_FSW_ES) \
12493 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12494 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
12495 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12496 } while (0)
12497
12498/** Calculate efficient address from R/M. */
12499#ifndef IEM_WITH_SETJMP
12500# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12501 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12502#else
12503# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12504 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12505#endif
12506
12507#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12508#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12509#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12510#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12511#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12512#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12513#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12514
12515/**
12516 * Defers the rest of the instruction emulation to a C implementation routine
12517 * and returns, only taking the standard parameters.
12518 *
12519 * @param a_pfnCImpl The pointer to the C routine.
12520 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12521 */
12522#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12523
12524/**
12525 * Defers the rest of instruction emulation to a C implementation routine and
12526 * returns, taking one argument in addition to the standard ones.
12527 *
12528 * @param a_pfnCImpl The pointer to the C routine.
12529 * @param a0 The argument.
12530 */
12531#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12532
12533/**
12534 * Defers the rest of the instruction emulation to a C implementation routine
12535 * and returns, taking two arguments in addition to the standard ones.
12536 *
12537 * @param a_pfnCImpl The pointer to the C routine.
12538 * @param a0 The first extra argument.
12539 * @param a1 The second extra argument.
12540 */
12541#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12542
12543/**
12544 * Defers the rest of the instruction emulation to a C implementation routine
12545 * and returns, taking three arguments in addition to the standard ones.
12546 *
12547 * @param a_pfnCImpl The pointer to the C routine.
12548 * @param a0 The first extra argument.
12549 * @param a1 The second extra argument.
12550 * @param a2 The third extra argument.
12551 */
12552#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12553
12554/**
12555 * Defers the rest of the instruction emulation to a C implementation routine
12556 * and returns, taking four arguments in addition to the standard ones.
12557 *
12558 * @param a_pfnCImpl The pointer to the C routine.
12559 * @param a0 The first extra argument.
12560 * @param a1 The second extra argument.
12561 * @param a2 The third extra argument.
12562 * @param a3 The fourth extra argument.
12563 */
12564#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12565
12566/**
12567 * Defers the rest of the instruction emulation to a C implementation routine
12568 * and returns, taking two arguments in addition to the standard ones.
12569 *
12570 * @param a_pfnCImpl The pointer to the C routine.
12571 * @param a0 The first extra argument.
12572 * @param a1 The second extra argument.
12573 * @param a2 The third extra argument.
12574 * @param a3 The fourth extra argument.
12575 * @param a4 The fifth extra argument.
12576 */
12577#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12578
12579/**
12580 * Defers the entire instruction emulation to a C implementation routine and
12581 * returns, only taking the standard parameters.
12582 *
12583 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12584 *
12585 * @param a_pfnCImpl The pointer to the C routine.
12586 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12587 */
12588#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12589
12590/**
12591 * Defers the entire instruction emulation to a C implementation routine and
12592 * returns, taking one argument in addition to the standard ones.
12593 *
12594 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12595 *
12596 * @param a_pfnCImpl The pointer to the C routine.
12597 * @param a0 The argument.
12598 */
12599#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12600
12601/**
12602 * Defers the entire instruction emulation to a C implementation routine and
12603 * returns, taking two arguments in addition to the standard ones.
12604 *
12605 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12606 *
12607 * @param a_pfnCImpl The pointer to the C routine.
12608 * @param a0 The first extra argument.
12609 * @param a1 The second extra argument.
12610 */
12611#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12612
12613/**
12614 * Defers the entire instruction emulation to a C implementation routine and
12615 * returns, taking three arguments in addition to the standard ones.
12616 *
12617 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12618 *
12619 * @param a_pfnCImpl The pointer to the C routine.
12620 * @param a0 The first extra argument.
12621 * @param a1 The second extra argument.
12622 * @param a2 The third extra argument.
12623 */
12624#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12625
12626/**
12627 * Calls a FPU assembly implementation taking one visible argument.
12628 *
12629 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12630 * @param a0 The first extra argument.
12631 */
12632#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12633 do { \
12634 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
12635 } while (0)
12636
12637/**
12638 * Calls a FPU assembly implementation taking two visible arguments.
12639 *
12640 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12641 * @param a0 The first extra argument.
12642 * @param a1 The second extra argument.
12643 */
12644#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12645 do { \
12646 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12647 } while (0)
12648
12649/**
12650 * Calls a FPU assembly implementation taking three visible arguments.
12651 *
12652 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12653 * @param a0 The first extra argument.
12654 * @param a1 The second extra argument.
12655 * @param a2 The third extra argument.
12656 */
12657#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12658 do { \
12659 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12660 } while (0)
12661
12662#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12663 do { \
12664 (a_FpuData).FSW = (a_FSW); \
12665 (a_FpuData).r80Result = *(a_pr80Value); \
12666 } while (0)
12667
12668/** Pushes FPU result onto the stack. */
12669#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12670 iemFpuPushResult(pVCpu, &a_FpuData)
12671/** Pushes FPU result onto the stack and sets the FPUDP. */
12672#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12673 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12674
12675/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12676#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12677 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12678
12679/** Stores FPU result in a stack register. */
12680#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12681 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12682/** Stores FPU result in a stack register and pops the stack. */
12683#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12684 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12685/** Stores FPU result in a stack register and sets the FPUDP. */
12686#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12687 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12688/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12689 * stack. */
12690#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12691 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12692
12693/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12694#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12695 iemFpuUpdateOpcodeAndIp(pVCpu)
12696/** Free a stack register (for FFREE and FFREEP). */
12697#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12698 iemFpuStackFree(pVCpu, a_iStReg)
12699/** Increment the FPU stack pointer. */
12700#define IEM_MC_FPU_STACK_INC_TOP() \
12701 iemFpuStackIncTop(pVCpu)
12702/** Decrement the FPU stack pointer. */
12703#define IEM_MC_FPU_STACK_DEC_TOP() \
12704 iemFpuStackDecTop(pVCpu)
12705
12706/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12707#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12708 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12709/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12710#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12711 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12712/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12713#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12714 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12715/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12716#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12717 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12718/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12719 * stack. */
12720#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12721 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12722/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12723#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12724 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12725
12726/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12727#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12728 iemFpuStackUnderflow(pVCpu, a_iStDst)
12729/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12730 * stack. */
12731#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12732 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12733/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12734 * FPUDS. */
12735#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12736 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12737/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12738 * FPUDS. Pops stack. */
12739#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12740 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12741/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12742 * stack twice. */
12743#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12744 iemFpuStackUnderflowThenPopPop(pVCpu)
12745/** Raises a FPU stack underflow exception for an instruction pushing a result
12746 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12747#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12748 iemFpuStackPushUnderflow(pVCpu)
12749/** Raises a FPU stack underflow exception for an instruction pushing a result
12750 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12751#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12752 iemFpuStackPushUnderflowTwo(pVCpu)
12753
12754/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12755 * FPUIP, FPUCS and FOP. */
12756#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12757 iemFpuStackPushOverflow(pVCpu)
12758/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12759 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12760#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12761 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12762/** Prepares for using the FPU state.
12763 * Ensures that we can use the host FPU in the current context (RC+R0.
12764 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12765#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12766/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12767#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12768/** Actualizes the guest FPU state so it can be accessed and modified. */
12769#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12770
12771/** Prepares for using the SSE state.
12772 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12773 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12774#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12775/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12776#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12777/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12778#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12779
12780/** Prepares for using the AVX state.
12781 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12782 * Ensures the guest AVX state in the CPUMCTX is up to date.
12783 * @note This will include the AVX512 state too when support for it is added
12784 * due to the zero extending feature of VEX instruction. */
12785#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12786/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12787#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12788/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12789#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12790
12791/**
12792 * Calls a MMX assembly implementation taking two visible arguments.
12793 *
12794 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12795 * @param a0 The first extra argument.
12796 * @param a1 The second extra argument.
12797 */
12798#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12799 do { \
12800 IEM_MC_PREPARE_FPU_USAGE(); \
12801 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12802 } while (0)
12803
12804/**
12805 * Calls a MMX assembly implementation taking three visible arguments.
12806 *
12807 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12808 * @param a0 The first extra argument.
12809 * @param a1 The second extra argument.
12810 * @param a2 The third extra argument.
12811 */
12812#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12813 do { \
12814 IEM_MC_PREPARE_FPU_USAGE(); \
12815 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12816 } while (0)
12817
12818
12819/**
12820 * Calls a SSE assembly implementation taking two visible arguments.
12821 *
12822 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12823 * @param a0 The first extra argument.
12824 * @param a1 The second extra argument.
12825 */
12826#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12827 do { \
12828 IEM_MC_PREPARE_SSE_USAGE(); \
12829 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12830 } while (0)
12831
12832/**
12833 * Calls a SSE assembly implementation taking three visible arguments.
12834 *
12835 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12836 * @param a0 The first extra argument.
12837 * @param a1 The second extra argument.
12838 * @param a2 The third extra argument.
12839 */
12840#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12841 do { \
12842 IEM_MC_PREPARE_SSE_USAGE(); \
12843 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12844 } while (0)
12845
12846
12847/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12848 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12849#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12850 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
12851
12852/**
12853 * Calls a AVX assembly implementation taking two visible arguments.
12854 *
12855 * There is one implicit zero'th argument, a pointer to the extended state.
12856 *
12857 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12858 * @param a1 The first extra argument.
12859 * @param a2 The second extra argument.
12860 */
12861#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12862 do { \
12863 IEM_MC_PREPARE_AVX_USAGE(); \
12864 a_pfnAImpl(pXState, (a1), (a2)); \
12865 } while (0)
12866
12867/**
12868 * Calls a AVX assembly implementation taking three visible arguments.
12869 *
12870 * There is one implicit zero'th argument, a pointer to the extended state.
12871 *
12872 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12873 * @param a1 The first extra argument.
12874 * @param a2 The second extra argument.
12875 * @param a3 The third extra argument.
12876 */
12877#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12878 do { \
12879 IEM_MC_PREPARE_AVX_USAGE(); \
12880 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12881 } while (0)
12882
12883/** @note Not for IOPL or IF testing. */
12884#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12885/** @note Not for IOPL or IF testing. */
12886#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12887/** @note Not for IOPL or IF testing. */
12888#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12889/** @note Not for IOPL or IF testing. */
12890#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12891/** @note Not for IOPL or IF testing. */
12892#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12893 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12894 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12895/** @note Not for IOPL or IF testing. */
12896#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12897 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12898 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12899/** @note Not for IOPL or IF testing. */
12900#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12901 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12902 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12903 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12904/** @note Not for IOPL or IF testing. */
12905#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12906 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12907 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12908 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12909#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12910#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12911#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12912/** @note Not for IOPL or IF testing. */
12913#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12914 if ( pVCpu->cpum.GstCtx.cx != 0 \
12915 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12916/** @note Not for IOPL or IF testing. */
12917#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12918 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12919 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12920/** @note Not for IOPL or IF testing. */
12921#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12922 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12923 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12924/** @note Not for IOPL or IF testing. */
12925#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12926 if ( pVCpu->cpum.GstCtx.cx != 0 \
12927 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12928/** @note Not for IOPL or IF testing. */
12929#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12930 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12931 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12932/** @note Not for IOPL or IF testing. */
12933#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12934 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12935 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12936#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12937#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12938
12939#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12940 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12941#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12942 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12943#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12944 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12945#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12946 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12947#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12948 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12949#define IEM_MC_IF_FCW_IM() \
12950 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
12951
12952#define IEM_MC_ELSE() } else {
12953#define IEM_MC_ENDIF() } do {} while (0)
12954
12955/** @} */
12956
12957
12958/** @name Opcode Debug Helpers.
12959 * @{
12960 */
12961#ifdef VBOX_WITH_STATISTICS
12962# ifdef IN_RING3
12963# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsR3.a_Stats += 1; } while (0)
12964# else
12965# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsRZ.a_Stats += 1; } while (0)
12966# endif
12967#else
12968# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12969#endif
12970
12971#ifdef DEBUG
12972# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12973 do { \
12974 IEMOP_INC_STATS(a_Stats); \
12975 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12976 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12977 } while (0)
12978
12979# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12980 do { \
12981 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12982 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12983 (void)RT_CONCAT(OP_,a_Upper); \
12984 (void)(a_fDisHints); \
12985 (void)(a_fIemHints); \
12986 } while (0)
12987
12988# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12989 do { \
12990 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12991 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12992 (void)RT_CONCAT(OP_,a_Upper); \
12993 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12994 (void)(a_fDisHints); \
12995 (void)(a_fIemHints); \
12996 } while (0)
12997
12998# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12999 do { \
13000 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
13001 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
13002 (void)RT_CONCAT(OP_,a_Upper); \
13003 (void)RT_CONCAT(OP_PARM_,a_Op1); \
13004 (void)RT_CONCAT(OP_PARM_,a_Op2); \
13005 (void)(a_fDisHints); \
13006 (void)(a_fIemHints); \
13007 } while (0)
13008
13009# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
13010 do { \
13011 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
13012 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
13013 (void)RT_CONCAT(OP_,a_Upper); \
13014 (void)RT_CONCAT(OP_PARM_,a_Op1); \
13015 (void)RT_CONCAT(OP_PARM_,a_Op2); \
13016 (void)RT_CONCAT(OP_PARM_,a_Op3); \
13017 (void)(a_fDisHints); \
13018 (void)(a_fIemHints); \
13019 } while (0)
13020
13021# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
13022 do { \
13023 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
13024 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
13025 (void)RT_CONCAT(OP_,a_Upper); \
13026 (void)RT_CONCAT(OP_PARM_,a_Op1); \
13027 (void)RT_CONCAT(OP_PARM_,a_Op2); \
13028 (void)RT_CONCAT(OP_PARM_,a_Op3); \
13029 (void)RT_CONCAT(OP_PARM_,a_Op4); \
13030 (void)(a_fDisHints); \
13031 (void)(a_fIemHints); \
13032 } while (0)
13033
13034#else
13035# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
13036
13037# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
13038 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
13039# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
13040 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
13041# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
13042 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
13043# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
13044 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
13045# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
13046 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
13047
13048#endif
13049
13050#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
13051 IEMOP_MNEMONIC0EX(a_Lower, \
13052 #a_Lower, \
13053 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
13054#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
13055 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
13056 #a_Lower " " #a_Op1, \
13057 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
13058#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
13059 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
13060 #a_Lower " " #a_Op1 "," #a_Op2, \
13061 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
13062#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
13063 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
13064 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
13065 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
13066#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
13067 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
13068 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
13069 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
13070
13071/** @} */
13072
13073
13074/** @name Opcode Helpers.
13075 * @{
13076 */
13077
13078#ifdef IN_RING3
13079# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
13080 do { \
13081 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
13082 else \
13083 { \
13084 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
13085 return IEMOP_RAISE_INVALID_OPCODE(); \
13086 } \
13087 } while (0)
13088#else
13089# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
13090 do { \
13091 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
13092 else return IEMOP_RAISE_INVALID_OPCODE(); \
13093 } while (0)
13094#endif
13095
13096/** The instruction requires a 186 or later. */
13097#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
13098# define IEMOP_HLP_MIN_186() do { } while (0)
13099#else
13100# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
13101#endif
13102
13103/** The instruction requires a 286 or later. */
13104#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
13105# define IEMOP_HLP_MIN_286() do { } while (0)
13106#else
13107# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
13108#endif
13109
13110/** The instruction requires a 386 or later. */
13111#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
13112# define IEMOP_HLP_MIN_386() do { } while (0)
13113#else
13114# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
13115#endif
13116
13117/** The instruction requires a 386 or later if the given expression is true. */
13118#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
13119# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
13120#else
13121# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
13122#endif
13123
13124/** The instruction requires a 486 or later. */
13125#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
13126# define IEMOP_HLP_MIN_486() do { } while (0)
13127#else
13128# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
13129#endif
13130
13131/** The instruction requires a Pentium (586) or later. */
13132#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
13133# define IEMOP_HLP_MIN_586() do { } while (0)
13134#else
13135# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
13136#endif
13137
13138/** The instruction requires a PentiumPro (686) or later. */
13139#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
13140# define IEMOP_HLP_MIN_686() do { } while (0)
13141#else
13142# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
13143#endif
13144
13145
13146/** The instruction raises an \#UD in real and V8086 mode. */
13147#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
13148 do \
13149 { \
13150 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
13151 else return IEMOP_RAISE_INVALID_OPCODE(); \
13152 } while (0)
13153
13154#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13155/** This instruction raises an \#UD in real and V8086 mode or when not using a
13156 * 64-bit code segment when in long mode (applicable to all VMX instructions
13157 * except VMCALL).
13158 */
13159#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
13160 do \
13161 { \
13162 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
13163 && ( !IEM_IS_LONG_MODE(pVCpu) \
13164 || IEM_IS_64BIT_CODE(pVCpu))) \
13165 { /* likely */ } \
13166 else \
13167 { \
13168 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
13169 { \
13170 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
13171 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
13172 return IEMOP_RAISE_INVALID_OPCODE(); \
13173 } \
13174 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
13175 { \
13176 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
13177 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
13178 return IEMOP_RAISE_INVALID_OPCODE(); \
13179 } \
13180 } \
13181 } while (0)
13182
13183/** The instruction can only be executed in VMX operation (VMX root mode and
13184 * non-root mode).
13185 *
13186 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
13187 */
13188# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
13189 do \
13190 { \
13191 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
13192 else \
13193 { \
13194 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
13195 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
13196 return IEMOP_RAISE_INVALID_OPCODE(); \
13197 } \
13198 } while (0)
13199#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13200
13201/** The instruction is not available in 64-bit mode, throw \#UD if we're in
13202 * 64-bit mode. */
13203#define IEMOP_HLP_NO_64BIT() \
13204 do \
13205 { \
13206 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
13207 return IEMOP_RAISE_INVALID_OPCODE(); \
13208 } while (0)
13209
13210/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
13211 * 64-bit mode. */
13212#define IEMOP_HLP_ONLY_64BIT() \
13213 do \
13214 { \
13215 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
13216 return IEMOP_RAISE_INVALID_OPCODE(); \
13217 } while (0)
13218
13219/** The instruction defaults to 64-bit operand size if 64-bit mode. */
13220#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
13221 do \
13222 { \
13223 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
13224 iemRecalEffOpSize64Default(pVCpu); \
13225 } while (0)
13226
13227/** The instruction has 64-bit operand size if 64-bit mode. */
13228#define IEMOP_HLP_64BIT_OP_SIZE() \
13229 do \
13230 { \
13231 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
13232 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
13233 } while (0)
13234
13235/** Only a REX prefix immediately preceeding the first opcode byte takes
13236 * effect. This macro helps ensuring this as well as logging bad guest code. */
13237#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
13238 do \
13239 { \
13240 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
13241 { \
13242 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
13243 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
13244 pVCpu->iem.s.uRexB = 0; \
13245 pVCpu->iem.s.uRexIndex = 0; \
13246 pVCpu->iem.s.uRexReg = 0; \
13247 iemRecalEffOpSize(pVCpu); \
13248 } \
13249 } while (0)
13250
13251/**
13252 * Done decoding.
13253 */
13254#define IEMOP_HLP_DONE_DECODING() \
13255 do \
13256 { \
13257 /*nothing for now, maybe later... */ \
13258 } while (0)
13259
13260/**
13261 * Done decoding, raise \#UD exception if lock prefix present.
13262 */
13263#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
13264 do \
13265 { \
13266 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
13267 { /* likely */ } \
13268 else \
13269 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13270 } while (0)
13271
13272
13273/**
13274 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
13275 * repnz or size prefixes are present, or if in real or v8086 mode.
13276 */
13277#define IEMOP_HLP_DONE_VEX_DECODING() \
13278 do \
13279 { \
13280 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
13281 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
13282 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
13283 { /* likely */ } \
13284 else \
13285 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13286 } while (0)
13287
13288/**
13289 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
13290 * repnz or size prefixes are present, or if in real or v8086 mode.
13291 */
13292#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
13293 do \
13294 { \
13295 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
13296 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
13297 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
13298 && pVCpu->iem.s.uVexLength == 0)) \
13299 { /* likely */ } \
13300 else \
13301 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13302 } while (0)
13303
13304
13305/**
13306 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
13307 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
13308 * register 0, or if in real or v8086 mode.
13309 */
13310#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
13311 do \
13312 { \
13313 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
13314 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
13315 && !pVCpu->iem.s.uVex3rdReg \
13316 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
13317 { /* likely */ } \
13318 else \
13319 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13320 } while (0)
13321
13322/**
13323 * Done decoding VEX, no V, L=0.
13324 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
13325 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
13326 */
13327#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
13328 do \
13329 { \
13330 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
13331 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
13332 && pVCpu->iem.s.uVexLength == 0 \
13333 && pVCpu->iem.s.uVex3rdReg == 0 \
13334 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
13335 { /* likely */ } \
13336 else \
13337 return IEMOP_RAISE_INVALID_OPCODE(); \
13338 } while (0)
13339
13340#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
13341 do \
13342 { \
13343 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
13344 { /* likely */ } \
13345 else \
13346 { \
13347 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
13348 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13349 } \
13350 } while (0)
13351#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
13352 do \
13353 { \
13354 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
13355 { /* likely */ } \
13356 else \
13357 { \
13358 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
13359 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
13360 } \
13361 } while (0)
13362
13363/**
13364 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
13365 * are present.
13366 */
13367#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
13368 do \
13369 { \
13370 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
13371 { /* likely */ } \
13372 else \
13373 return IEMOP_RAISE_INVALID_OPCODE(); \
13374 } while (0)
13375
13376/**
13377 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
13378 * prefixes are present.
13379 */
13380#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
13381 do \
13382 { \
13383 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
13384 { /* likely */ } \
13385 else \
13386 return IEMOP_RAISE_INVALID_OPCODE(); \
13387 } while (0)
13388
13389
13390/**
13391 * Calculates the effective address of a ModR/M memory operand.
13392 *
13393 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13394 *
13395 * @return Strict VBox status code.
13396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13397 * @param bRm The ModRM byte.
13398 * @param cbImm The size of any immediate following the
13399 * effective address opcode bytes. Important for
13400 * RIP relative addressing.
13401 * @param pGCPtrEff Where to return the effective address.
13402 */
13403IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
13404{
13405 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13406# define SET_SS_DEF() \
13407 do \
13408 { \
13409 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13410 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13411 } while (0)
13412
13413 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13414 {
13415/** @todo Check the effective address size crap! */
13416 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13417 {
13418 uint16_t u16EffAddr;
13419
13420 /* Handle the disp16 form with no registers first. */
13421 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13422 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13423 else
13424 {
13425 /* Get the displacment. */
13426 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13427 {
13428 case 0: u16EffAddr = 0; break;
13429 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13430 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13431 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13432 }
13433
13434 /* Add the base and index registers to the disp. */
13435 switch (bRm & X86_MODRM_RM_MASK)
13436 {
13437 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13438 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13439 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13440 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13441 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13442 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13443 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13444 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13445 }
13446 }
13447
13448 *pGCPtrEff = u16EffAddr;
13449 }
13450 else
13451 {
13452 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13453 uint32_t u32EffAddr;
13454
13455 /* Handle the disp32 form with no registers first. */
13456 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13457 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13458 else
13459 {
13460 /* Get the register (or SIB) value. */
13461 switch ((bRm & X86_MODRM_RM_MASK))
13462 {
13463 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13464 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13465 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13466 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13467 case 4: /* SIB */
13468 {
13469 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13470
13471 /* Get the index and scale it. */
13472 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13473 {
13474 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13475 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13476 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13477 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13478 case 4: u32EffAddr = 0; /*none */ break;
13479 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13480 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13481 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13482 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13483 }
13484 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13485
13486 /* add base */
13487 switch (bSib & X86_SIB_BASE_MASK)
13488 {
13489 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13490 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13491 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13492 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13493 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13494 case 5:
13495 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13496 {
13497 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13498 SET_SS_DEF();
13499 }
13500 else
13501 {
13502 uint32_t u32Disp;
13503 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13504 u32EffAddr += u32Disp;
13505 }
13506 break;
13507 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13508 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13509 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13510 }
13511 break;
13512 }
13513 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13514 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13515 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13516 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13517 }
13518
13519 /* Get and add the displacement. */
13520 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13521 {
13522 case 0:
13523 break;
13524 case 1:
13525 {
13526 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13527 u32EffAddr += i8Disp;
13528 break;
13529 }
13530 case 2:
13531 {
13532 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13533 u32EffAddr += u32Disp;
13534 break;
13535 }
13536 default:
13537 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13538 }
13539
13540 }
13541 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13542 *pGCPtrEff = u32EffAddr;
13543 else
13544 {
13545 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13546 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13547 }
13548 }
13549 }
13550 else
13551 {
13552 uint64_t u64EffAddr;
13553
13554 /* Handle the rip+disp32 form with no registers first. */
13555 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13556 {
13557 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13558 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13559 }
13560 else
13561 {
13562 /* Get the register (or SIB) value. */
13563 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13564 {
13565 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13566 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13567 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13568 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13569 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13570 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13571 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13572 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13573 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13574 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13575 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13576 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13577 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13578 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13579 /* SIB */
13580 case 4:
13581 case 12:
13582 {
13583 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13584
13585 /* Get the index and scale it. */
13586 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13587 {
13588 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13589 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13590 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13591 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13592 case 4: u64EffAddr = 0; /*none */ break;
13593 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13594 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13595 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13596 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13597 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13598 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13599 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13600 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13601 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13602 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13603 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13604 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13605 }
13606 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13607
13608 /* add base */
13609 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13610 {
13611 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13612 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13613 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13614 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13615 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13616 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13617 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13618 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13619 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13620 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13621 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13622 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13623 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13624 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13625 /* complicated encodings */
13626 case 5:
13627 case 13:
13628 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13629 {
13630 if (!pVCpu->iem.s.uRexB)
13631 {
13632 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13633 SET_SS_DEF();
13634 }
13635 else
13636 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13637 }
13638 else
13639 {
13640 uint32_t u32Disp;
13641 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13642 u64EffAddr += (int32_t)u32Disp;
13643 }
13644 break;
13645 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13646 }
13647 break;
13648 }
13649 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13650 }
13651
13652 /* Get and add the displacement. */
13653 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13654 {
13655 case 0:
13656 break;
13657 case 1:
13658 {
13659 int8_t i8Disp;
13660 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13661 u64EffAddr += i8Disp;
13662 break;
13663 }
13664 case 2:
13665 {
13666 uint32_t u32Disp;
13667 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13668 u64EffAddr += (int32_t)u32Disp;
13669 break;
13670 }
13671 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13672 }
13673
13674 }
13675
13676 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13677 *pGCPtrEff = u64EffAddr;
13678 else
13679 {
13680 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13681 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13682 }
13683 }
13684
13685 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13686 return VINF_SUCCESS;
13687}
13688
13689
13690/**
13691 * Calculates the effective address of a ModR/M memory operand.
13692 *
13693 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13694 *
13695 * @return Strict VBox status code.
13696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13697 * @param bRm The ModRM byte.
13698 * @param cbImm The size of any immediate following the
13699 * effective address opcode bytes. Important for
13700 * RIP relative addressing.
13701 * @param pGCPtrEff Where to return the effective address.
13702 * @param offRsp RSP displacement.
13703 */
13704IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13705{
13706 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13707# define SET_SS_DEF() \
13708 do \
13709 { \
13710 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13711 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13712 } while (0)
13713
13714 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13715 {
13716/** @todo Check the effective address size crap! */
13717 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13718 {
13719 uint16_t u16EffAddr;
13720
13721 /* Handle the disp16 form with no registers first. */
13722 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13723 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13724 else
13725 {
13726 /* Get the displacment. */
13727 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13728 {
13729 case 0: u16EffAddr = 0; break;
13730 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13731 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13732 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13733 }
13734
13735 /* Add the base and index registers to the disp. */
13736 switch (bRm & X86_MODRM_RM_MASK)
13737 {
13738 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13739 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13740 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13741 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13742 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13743 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13744 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13745 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13746 }
13747 }
13748
13749 *pGCPtrEff = u16EffAddr;
13750 }
13751 else
13752 {
13753 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13754 uint32_t u32EffAddr;
13755
13756 /* Handle the disp32 form with no registers first. */
13757 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13758 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13759 else
13760 {
13761 /* Get the register (or SIB) value. */
13762 switch ((bRm & X86_MODRM_RM_MASK))
13763 {
13764 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13765 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13766 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13767 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13768 case 4: /* SIB */
13769 {
13770 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13771
13772 /* Get the index and scale it. */
13773 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13774 {
13775 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13776 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13777 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13778 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13779 case 4: u32EffAddr = 0; /*none */ break;
13780 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13781 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13782 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13783 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13784 }
13785 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13786
13787 /* add base */
13788 switch (bSib & X86_SIB_BASE_MASK)
13789 {
13790 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13791 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13792 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13793 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13794 case 4:
13795 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13796 SET_SS_DEF();
13797 break;
13798 case 5:
13799 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13800 {
13801 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13802 SET_SS_DEF();
13803 }
13804 else
13805 {
13806 uint32_t u32Disp;
13807 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13808 u32EffAddr += u32Disp;
13809 }
13810 break;
13811 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13812 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13813 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13814 }
13815 break;
13816 }
13817 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13818 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13819 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13820 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13821 }
13822
13823 /* Get and add the displacement. */
13824 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13825 {
13826 case 0:
13827 break;
13828 case 1:
13829 {
13830 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13831 u32EffAddr += i8Disp;
13832 break;
13833 }
13834 case 2:
13835 {
13836 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13837 u32EffAddr += u32Disp;
13838 break;
13839 }
13840 default:
13841 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13842 }
13843
13844 }
13845 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13846 *pGCPtrEff = u32EffAddr;
13847 else
13848 {
13849 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13850 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13851 }
13852 }
13853 }
13854 else
13855 {
13856 uint64_t u64EffAddr;
13857
13858 /* Handle the rip+disp32 form with no registers first. */
13859 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13860 {
13861 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13862 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13863 }
13864 else
13865 {
13866 /* Get the register (or SIB) value. */
13867 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13868 {
13869 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13870 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13871 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13872 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13873 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13874 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13875 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13876 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13877 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13878 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13879 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13880 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13881 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13882 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13883 /* SIB */
13884 case 4:
13885 case 12:
13886 {
13887 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13888
13889 /* Get the index and scale it. */
13890 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13891 {
13892 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13893 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13894 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13895 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13896 case 4: u64EffAddr = 0; /*none */ break;
13897 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13898 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13899 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13900 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13901 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13902 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13903 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13904 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13905 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13906 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13907 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13908 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13909 }
13910 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13911
13912 /* add base */
13913 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13914 {
13915 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13916 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13917 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13918 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13919 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13920 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13921 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13922 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13923 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13924 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13925 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13926 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13927 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13928 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13929 /* complicated encodings */
13930 case 5:
13931 case 13:
13932 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13933 {
13934 if (!pVCpu->iem.s.uRexB)
13935 {
13936 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13937 SET_SS_DEF();
13938 }
13939 else
13940 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13941 }
13942 else
13943 {
13944 uint32_t u32Disp;
13945 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13946 u64EffAddr += (int32_t)u32Disp;
13947 }
13948 break;
13949 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13950 }
13951 break;
13952 }
13953 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13954 }
13955
13956 /* Get and add the displacement. */
13957 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13958 {
13959 case 0:
13960 break;
13961 case 1:
13962 {
13963 int8_t i8Disp;
13964 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13965 u64EffAddr += i8Disp;
13966 break;
13967 }
13968 case 2:
13969 {
13970 uint32_t u32Disp;
13971 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13972 u64EffAddr += (int32_t)u32Disp;
13973 break;
13974 }
13975 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13976 }
13977
13978 }
13979
13980 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13981 *pGCPtrEff = u64EffAddr;
13982 else
13983 {
13984 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13985 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13986 }
13987 }
13988
13989 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13990 return VINF_SUCCESS;
13991}
13992
13993
13994#ifdef IEM_WITH_SETJMP
13995/**
13996 * Calculates the effective address of a ModR/M memory operand.
13997 *
13998 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13999 *
14000 * May longjmp on internal error.
14001 *
14002 * @return The effective address.
14003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14004 * @param bRm The ModRM byte.
14005 * @param cbImm The size of any immediate following the
14006 * effective address opcode bytes. Important for
14007 * RIP relative addressing.
14008 */
14009IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
14010{
14011 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
14012# define SET_SS_DEF() \
14013 do \
14014 { \
14015 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
14016 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
14017 } while (0)
14018
14019 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
14020 {
14021/** @todo Check the effective address size crap! */
14022 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
14023 {
14024 uint16_t u16EffAddr;
14025
14026 /* Handle the disp16 form with no registers first. */
14027 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
14028 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
14029 else
14030 {
14031 /* Get the displacment. */
14032 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
14033 {
14034 case 0: u16EffAddr = 0; break;
14035 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
14036 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
14037 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
14038 }
14039
14040 /* Add the base and index registers to the disp. */
14041 switch (bRm & X86_MODRM_RM_MASK)
14042 {
14043 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
14044 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
14045 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
14046 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
14047 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
14048 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
14049 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
14050 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
14051 }
14052 }
14053
14054 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
14055 return u16EffAddr;
14056 }
14057
14058 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
14059 uint32_t u32EffAddr;
14060
14061 /* Handle the disp32 form with no registers first. */
14062 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
14063 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
14064 else
14065 {
14066 /* Get the register (or SIB) value. */
14067 switch ((bRm & X86_MODRM_RM_MASK))
14068 {
14069 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
14070 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
14071 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
14072 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
14073 case 4: /* SIB */
14074 {
14075 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
14076
14077 /* Get the index and scale it. */
14078 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
14079 {
14080 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
14081 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
14082 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
14083 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
14084 case 4: u32EffAddr = 0; /*none */ break;
14085 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
14086 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
14087 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
14088 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
14089 }
14090 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
14091
14092 /* add base */
14093 switch (bSib & X86_SIB_BASE_MASK)
14094 {
14095 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
14096 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
14097 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
14098 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
14099 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
14100 case 5:
14101 if ((bRm & X86_MODRM_MOD_MASK) != 0)
14102 {
14103 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
14104 SET_SS_DEF();
14105 }
14106 else
14107 {
14108 uint32_t u32Disp;
14109 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
14110 u32EffAddr += u32Disp;
14111 }
14112 break;
14113 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
14114 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
14115 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
14116 }
14117 break;
14118 }
14119 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
14120 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
14121 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
14122 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
14123 }
14124
14125 /* Get and add the displacement. */
14126 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
14127 {
14128 case 0:
14129 break;
14130 case 1:
14131 {
14132 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
14133 u32EffAddr += i8Disp;
14134 break;
14135 }
14136 case 2:
14137 {
14138 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
14139 u32EffAddr += u32Disp;
14140 break;
14141 }
14142 default:
14143 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
14144 }
14145 }
14146
14147 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
14148 {
14149 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
14150 return u32EffAddr;
14151 }
14152 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
14153 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
14154 return u32EffAddr & UINT16_MAX;
14155 }
14156
14157 uint64_t u64EffAddr;
14158
14159 /* Handle the rip+disp32 form with no registers first. */
14160 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
14161 {
14162 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
14163 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
14164 }
14165 else
14166 {
14167 /* Get the register (or SIB) value. */
14168 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
14169 {
14170 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
14171 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
14172 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
14173 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
14174 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
14175 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
14176 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
14177 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
14178 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
14179 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
14180 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
14181 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
14182 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
14183 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
14184 /* SIB */
14185 case 4:
14186 case 12:
14187 {
14188 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
14189
14190 /* Get the index and scale it. */
14191 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
14192 {
14193 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
14194 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
14195 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
14196 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
14197 case 4: u64EffAddr = 0; /*none */ break;
14198 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
14199 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
14200 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
14201 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
14202 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
14203 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
14204 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
14205 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
14206 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
14207 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
14208 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
14209 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
14210 }
14211 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
14212
14213 /* add base */
14214 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
14215 {
14216 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
14217 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
14218 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
14219 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
14220 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
14221 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
14222 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
14223 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
14224 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
14225 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
14226 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
14227 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
14228 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
14229 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
14230 /* complicated encodings */
14231 case 5:
14232 case 13:
14233 if ((bRm & X86_MODRM_MOD_MASK) != 0)
14234 {
14235 if (!pVCpu->iem.s.uRexB)
14236 {
14237 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
14238 SET_SS_DEF();
14239 }
14240 else
14241 u64EffAddr += pVCpu->cpum.GstCtx.r13;
14242 }
14243 else
14244 {
14245 uint32_t u32Disp;
14246 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
14247 u64EffAddr += (int32_t)u32Disp;
14248 }
14249 break;
14250 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
14251 }
14252 break;
14253 }
14254 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
14255 }
14256
14257 /* Get and add the displacement. */
14258 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
14259 {
14260 case 0:
14261 break;
14262 case 1:
14263 {
14264 int8_t i8Disp;
14265 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
14266 u64EffAddr += i8Disp;
14267 break;
14268 }
14269 case 2:
14270 {
14271 uint32_t u32Disp;
14272 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
14273 u64EffAddr += (int32_t)u32Disp;
14274 break;
14275 }
14276 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
14277 }
14278
14279 }
14280
14281 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
14282 {
14283 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
14284 return u64EffAddr;
14285 }
14286 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
14287 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
14288 return u64EffAddr & UINT32_MAX;
14289}
14290#endif /* IEM_WITH_SETJMP */
14291
14292/** @} */
14293
14294
14295
14296/*
14297 * Include the instructions
14298 */
14299#include "IEMAllInstructions.cpp.h"
14300
14301
14302
14303#ifdef LOG_ENABLED
14304/**
14305 * Logs the current instruction.
14306 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14307 * @param fSameCtx Set if we have the same context information as the VMM,
14308 * clear if we may have already executed an instruction in
14309 * our debug context. When clear, we assume IEMCPU holds
14310 * valid CPU mode info.
14311 *
14312 * The @a fSameCtx parameter is now misleading and obsolete.
14313 * @param pszFunction The IEM function doing the execution.
14314 */
14315IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
14316{
14317# ifdef IN_RING3
14318 if (LogIs2Enabled())
14319 {
14320 char szInstr[256];
14321 uint32_t cbInstr = 0;
14322 if (fSameCtx)
14323 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14324 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14325 szInstr, sizeof(szInstr), &cbInstr);
14326 else
14327 {
14328 uint32_t fFlags = 0;
14329 switch (pVCpu->iem.s.enmCpuMode)
14330 {
14331 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14332 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14333 case IEMMODE_16BIT:
14334 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
14335 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14336 else
14337 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14338 break;
14339 }
14340 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
14341 szInstr, sizeof(szInstr), &cbInstr);
14342 }
14343
14344 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
14345 Log2(("**** %s\n"
14346 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14347 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14348 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14349 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14350 " %s\n"
14351 , pszFunction,
14352 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
14353 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
14354 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
14355 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
14356 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14357 szInstr));
14358
14359 if (LogIs3Enabled())
14360 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14361 }
14362 else
14363# endif
14364 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
14365 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
14366 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
14367}
14368#endif /* LOG_ENABLED */
14369
14370
14371#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14372/**
14373 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
14374 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
14375 *
14376 * @returns Modified rcStrict.
14377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14378 * @param rcStrict The instruction execution status.
14379 */
14380static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14381{
14382 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
14383 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
14384 {
14385 /* VMX preemption timer takes priority over NMI-window exits. */
14386 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
14387 {
14388 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
14389 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
14390 }
14391 /*
14392 * Check remaining intercepts.
14393 *
14394 * NMI-window and Interrupt-window VM-exits.
14395 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
14396 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
14397 *
14398 * See Intel spec. 26.7.6 "NMI-Window Exiting".
14399 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
14400 */
14401 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
14402 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14403 && !TRPMHasTrap(pVCpu))
14404 {
14405 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
14406 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
14407 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
14408 {
14409 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
14410 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
14411 }
14412 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
14413 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
14414 {
14415 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
14416 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
14417 }
14418 }
14419 }
14420 /* TPR-below threshold/APIC write has the highest priority. */
14421 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
14422 {
14423 rcStrict = iemVmxApicWriteEmulation(pVCpu);
14424 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14425 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
14426 }
14427 /* MTF takes priority over VMX-preemption timer. */
14428 else
14429 {
14430 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
14431 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
14432 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
14433 }
14434 return rcStrict;
14435}
14436#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
14437
14438
14439/**
14440 * Makes status code addjustments (pass up from I/O and access handler)
14441 * as well as maintaining statistics.
14442 *
14443 * @returns Strict VBox status code to pass up.
14444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14445 * @param rcStrict The status from executing an instruction.
14446 */
14447DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14448{
14449 if (rcStrict != VINF_SUCCESS)
14450 {
14451 if (RT_SUCCESS(rcStrict))
14452 {
14453 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14454 || rcStrict == VINF_IOM_R3_IOPORT_READ
14455 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14456 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14457 || rcStrict == VINF_IOM_R3_MMIO_READ
14458 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14459 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14460 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14461 || rcStrict == VINF_CPUM_R3_MSR_READ
14462 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14463 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14464 || rcStrict == VINF_EM_RAW_TO_R3
14465 || rcStrict == VINF_EM_TRIPLE_FAULT
14466 || rcStrict == VINF_GIM_R3_HYPERCALL
14467 /* raw-mode / virt handlers only: */
14468 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14469 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14470 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14471 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14472 || rcStrict == VINF_SELM_SYNC_GDT
14473 || rcStrict == VINF_CSAM_PENDING_ACTION
14474 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14475 /* nested hw.virt codes: */
14476 || rcStrict == VINF_VMX_VMEXIT
14477 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
14478 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
14479 || rcStrict == VINF_SVM_VMEXIT
14480 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14481/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
14482 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14483#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14484 if ( rcStrict == VINF_VMX_VMEXIT
14485 && rcPassUp == VINF_SUCCESS)
14486 rcStrict = VINF_SUCCESS;
14487 else
14488#endif
14489#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14490 if ( rcStrict == VINF_SVM_VMEXIT
14491 && rcPassUp == VINF_SUCCESS)
14492 rcStrict = VINF_SUCCESS;
14493 else
14494#endif
14495 if (rcPassUp == VINF_SUCCESS)
14496 pVCpu->iem.s.cRetInfStatuses++;
14497 else if ( rcPassUp < VINF_EM_FIRST
14498 || rcPassUp > VINF_EM_LAST
14499 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14500 {
14501 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14502 pVCpu->iem.s.cRetPassUpStatus++;
14503 rcStrict = rcPassUp;
14504 }
14505 else
14506 {
14507 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14508 pVCpu->iem.s.cRetInfStatuses++;
14509 }
14510 }
14511 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14512 pVCpu->iem.s.cRetAspectNotImplemented++;
14513 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14514 pVCpu->iem.s.cRetInstrNotImplemented++;
14515 else
14516 pVCpu->iem.s.cRetErrStatuses++;
14517 }
14518 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14519 {
14520 pVCpu->iem.s.cRetPassUpStatus++;
14521 rcStrict = pVCpu->iem.s.rcPassUp;
14522 }
14523
14524 return rcStrict;
14525}
14526
14527
14528/**
14529 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14530 * IEMExecOneWithPrefetchedByPC.
14531 *
14532 * Similar code is found in IEMExecLots.
14533 *
14534 * @return Strict VBox status code.
14535 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14536 * @param fExecuteInhibit If set, execute the instruction following CLI,
14537 * POP SS and MOV SS,GR.
14538 * @param pszFunction The calling function name.
14539 */
14540DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
14541{
14542 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14543 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14544 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14545 RT_NOREF_PV(pszFunction);
14546
14547#ifdef IEM_WITH_SETJMP
14548 VBOXSTRICTRC rcStrict;
14549 jmp_buf JmpBuf;
14550 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14551 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14552 if ((rcStrict = setjmp(JmpBuf)) == 0)
14553 {
14554 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14555 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14556 }
14557 else
14558 pVCpu->iem.s.cLongJumps++;
14559 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14560#else
14561 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14562 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14563#endif
14564 if (rcStrict == VINF_SUCCESS)
14565 pVCpu->iem.s.cInstructions++;
14566 if (pVCpu->iem.s.cActiveMappings > 0)
14567 {
14568 Assert(rcStrict != VINF_SUCCESS);
14569 iemMemRollback(pVCpu);
14570 }
14571 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14572 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14573 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14574
14575//#ifdef DEBUG
14576// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14577//#endif
14578
14579#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14580 /*
14581 * Perform any VMX nested-guest instruction boundary actions.
14582 *
14583 * If any of these causes a VM-exit, we must skip executing the next
14584 * instruction (would run into stale page tables). A VM-exit makes sure
14585 * there is no interrupt-inhibition, so that should ensure we don't go
14586 * to try execute the next instruction. Clearing fExecuteInhibit is
14587 * problematic because of the setjmp/longjmp clobbering above.
14588 */
14589 if ( rcStrict == VINF_SUCCESS
14590 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
14591 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
14592 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
14593#endif
14594
14595 /* Execute the next instruction as well if a cli, pop ss or
14596 mov ss, Gr has just completed successfully. */
14597 if ( fExecuteInhibit
14598 && rcStrict == VINF_SUCCESS
14599 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14600 && EMIsInhibitInterruptsActive(pVCpu))
14601 {
14602 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
14603 if (rcStrict == VINF_SUCCESS)
14604 {
14605#ifdef LOG_ENABLED
14606 iemLogCurInstr(pVCpu, false, pszFunction);
14607#endif
14608#ifdef IEM_WITH_SETJMP
14609 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14610 if ((rcStrict = setjmp(JmpBuf)) == 0)
14611 {
14612 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14613 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14614 }
14615 else
14616 pVCpu->iem.s.cLongJumps++;
14617 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14618#else
14619 IEM_OPCODE_GET_NEXT_U8(&b);
14620 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14621#endif
14622 if (rcStrict == VINF_SUCCESS)
14623 pVCpu->iem.s.cInstructions++;
14624 if (pVCpu->iem.s.cActiveMappings > 0)
14625 {
14626 Assert(rcStrict != VINF_SUCCESS);
14627 iemMemRollback(pVCpu);
14628 }
14629 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14630 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14631 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14632 }
14633 else if (pVCpu->iem.s.cActiveMappings > 0)
14634 iemMemRollback(pVCpu);
14635 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
14636 }
14637
14638 /*
14639 * Return value fiddling, statistics and sanity assertions.
14640 */
14641 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14642
14643 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14644 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14645 return rcStrict;
14646}
14647
14648
14649/**
14650 * Execute one instruction.
14651 *
14652 * @return Strict VBox status code.
14653 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14654 */
14655VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14656{
14657 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
14658#ifdef LOG_ENABLED
14659 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14660#endif
14661
14662 /*
14663 * Do the decoding and emulation.
14664 */
14665 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14666 if (rcStrict == VINF_SUCCESS)
14667 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14668 else if (pVCpu->iem.s.cActiveMappings > 0)
14669 iemMemRollback(pVCpu);
14670
14671 if (rcStrict != VINF_SUCCESS)
14672 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14673 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14674 return rcStrict;
14675}
14676
14677
14678VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14679{
14680 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14681
14682 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14683 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14684 if (rcStrict == VINF_SUCCESS)
14685 {
14686 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14687 if (pcbWritten)
14688 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14689 }
14690 else if (pVCpu->iem.s.cActiveMappings > 0)
14691 iemMemRollback(pVCpu);
14692
14693 return rcStrict;
14694}
14695
14696
14697VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14698 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14699{
14700 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14701
14702 VBOXSTRICTRC rcStrict;
14703 if ( cbOpcodeBytes
14704 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14705 {
14706 iemInitDecoder(pVCpu, false, false);
14707#ifdef IEM_WITH_CODE_TLB
14708 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14709 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14710 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14711 pVCpu->iem.s.offCurInstrStart = 0;
14712 pVCpu->iem.s.offInstrNextByte = 0;
14713#else
14714 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14715 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14716#endif
14717 rcStrict = VINF_SUCCESS;
14718 }
14719 else
14720 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14721 if (rcStrict == VINF_SUCCESS)
14722 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14723 else if (pVCpu->iem.s.cActiveMappings > 0)
14724 iemMemRollback(pVCpu);
14725
14726 return rcStrict;
14727}
14728
14729
14730VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14731{
14732 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14733
14734 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14735 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14736 if (rcStrict == VINF_SUCCESS)
14737 {
14738 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14739 if (pcbWritten)
14740 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14741 }
14742 else if (pVCpu->iem.s.cActiveMappings > 0)
14743 iemMemRollback(pVCpu);
14744
14745 return rcStrict;
14746}
14747
14748
14749VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14750 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14751{
14752 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14753
14754 VBOXSTRICTRC rcStrict;
14755 if ( cbOpcodeBytes
14756 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14757 {
14758 iemInitDecoder(pVCpu, true, false);
14759#ifdef IEM_WITH_CODE_TLB
14760 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14761 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14762 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14763 pVCpu->iem.s.offCurInstrStart = 0;
14764 pVCpu->iem.s.offInstrNextByte = 0;
14765#else
14766 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14767 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14768#endif
14769 rcStrict = VINF_SUCCESS;
14770 }
14771 else
14772 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14773 if (rcStrict == VINF_SUCCESS)
14774 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14775 else if (pVCpu->iem.s.cActiveMappings > 0)
14776 iemMemRollback(pVCpu);
14777
14778 return rcStrict;
14779}
14780
14781
14782/**
14783 * For debugging DISGetParamSize, may come in handy.
14784 *
14785 * @returns Strict VBox status code.
14786 * @param pVCpu The cross context virtual CPU structure of the
14787 * calling EMT.
14788 * @param pCtxCore The context core structure.
14789 * @param OpcodeBytesPC The PC of the opcode bytes.
14790 * @param pvOpcodeBytes Prefeched opcode bytes.
14791 * @param cbOpcodeBytes Number of prefetched bytes.
14792 * @param pcbWritten Where to return the number of bytes written.
14793 * Optional.
14794 */
14795VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14796 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14797 uint32_t *pcbWritten)
14798{
14799 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14800
14801 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14802 VBOXSTRICTRC rcStrict;
14803 if ( cbOpcodeBytes
14804 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14805 {
14806 iemInitDecoder(pVCpu, true, false);
14807#ifdef IEM_WITH_CODE_TLB
14808 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14809 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14810 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14811 pVCpu->iem.s.offCurInstrStart = 0;
14812 pVCpu->iem.s.offInstrNextByte = 0;
14813#else
14814 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14815 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14816#endif
14817 rcStrict = VINF_SUCCESS;
14818 }
14819 else
14820 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14821 if (rcStrict == VINF_SUCCESS)
14822 {
14823 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14824 if (pcbWritten)
14825 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14826 }
14827 else if (pVCpu->iem.s.cActiveMappings > 0)
14828 iemMemRollback(pVCpu);
14829
14830 return rcStrict;
14831}
14832
14833
14834/**
14835 * For handling split cacheline lock operations when the host has split-lock
14836 * detection enabled.
14837 *
14838 * This will cause the interpreter to disregard the lock prefix and implicit
14839 * locking (xchg).
14840 *
14841 * @returns Strict VBox status code.
14842 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14843 */
14844VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
14845{
14846 /*
14847 * Do the decoding and emulation.
14848 */
14849 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
14850 if (rcStrict == VINF_SUCCESS)
14851 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
14852 else if (pVCpu->iem.s.cActiveMappings > 0)
14853 iemMemRollback(pVCpu);
14854
14855 if (rcStrict != VINF_SUCCESS)
14856 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14857 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14858 return rcStrict;
14859}
14860
14861
14862VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14863{
14864 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14865 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14866
14867 /*
14868 * See if there is an interrupt pending in TRPM, inject it if we can.
14869 */
14870 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14871#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14872 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14873 if (fIntrEnabled)
14874 {
14875 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14876 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14877 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14878 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
14879 else
14880 {
14881 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14882 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14883 }
14884 }
14885#else
14886 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14887#endif
14888
14889 /** @todo What if we are injecting an exception and not an interrupt? Is that
14890 * possible here? For now we assert it is indeed only an interrupt. */
14891 if ( fIntrEnabled
14892 && TRPMHasTrap(pVCpu)
14893 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14894 {
14895 uint8_t u8TrapNo;
14896 TRPMEVENT enmType;
14897 uint32_t uErrCode;
14898 RTGCPTR uCr2;
14899 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
14900 AssertRC(rc2);
14901 Assert(enmType == TRPM_HARDWARE_INT);
14902 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14903 TRPMResetTrap(pVCpu);
14904#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14905 /* Injecting an event may cause a VM-exit. */
14906 if ( rcStrict != VINF_SUCCESS
14907 && rcStrict != VINF_IEM_RAISED_XCPT)
14908 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14909#else
14910 NOREF(rcStrict);
14911#endif
14912 }
14913
14914 /*
14915 * Initial decoder init w/ prefetch, then setup setjmp.
14916 */
14917 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14918 if (rcStrict == VINF_SUCCESS)
14919 {
14920#ifdef IEM_WITH_SETJMP
14921 jmp_buf JmpBuf;
14922 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14923 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14924 pVCpu->iem.s.cActiveMappings = 0;
14925 if ((rcStrict = setjmp(JmpBuf)) == 0)
14926#endif
14927 {
14928 /*
14929 * The run loop. We limit ourselves to 4096 instructions right now.
14930 */
14931 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14932 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14933 for (;;)
14934 {
14935 /*
14936 * Log the state.
14937 */
14938#ifdef LOG_ENABLED
14939 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14940#endif
14941
14942 /*
14943 * Do the decoding and emulation.
14944 */
14945 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14946 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14947 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14948 {
14949 Assert(pVCpu->iem.s.cActiveMappings == 0);
14950 pVCpu->iem.s.cInstructions++;
14951 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14952 {
14953 uint64_t fCpu = pVCpu->fLocalForcedActions
14954 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14955 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14956 | VMCPU_FF_TLB_FLUSH
14957 | VMCPU_FF_INHIBIT_INTERRUPTS
14958 | VMCPU_FF_BLOCK_NMIS
14959 | VMCPU_FF_UNHALT ));
14960
14961 if (RT_LIKELY( ( !fCpu
14962 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14963 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14964 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14965 {
14966 if (cMaxInstructionsGccStupidity-- > 0)
14967 {
14968 /* Poll timers every now an then according to the caller's specs. */
14969 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14970 || !TMTimerPollBool(pVM, pVCpu))
14971 {
14972 Assert(pVCpu->iem.s.cActiveMappings == 0);
14973 iemReInitDecoder(pVCpu);
14974 continue;
14975 }
14976 }
14977 }
14978 }
14979 Assert(pVCpu->iem.s.cActiveMappings == 0);
14980 }
14981 else if (pVCpu->iem.s.cActiveMappings > 0)
14982 iemMemRollback(pVCpu);
14983 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14984 break;
14985 }
14986 }
14987#ifdef IEM_WITH_SETJMP
14988 else
14989 {
14990 if (pVCpu->iem.s.cActiveMappings > 0)
14991 iemMemRollback(pVCpu);
14992# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14993 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14994# endif
14995 pVCpu->iem.s.cLongJumps++;
14996 }
14997 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14998#endif
14999
15000 /*
15001 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15002 */
15003 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
15004 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
15005 }
15006 else
15007 {
15008 if (pVCpu->iem.s.cActiveMappings > 0)
15009 iemMemRollback(pVCpu);
15010
15011#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
15012 /*
15013 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15014 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15015 */
15016 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15017#endif
15018 }
15019
15020 /*
15021 * Maybe re-enter raw-mode and log.
15022 */
15023 if (rcStrict != VINF_SUCCESS)
15024 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15025 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15026 if (pcInstructions)
15027 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15028 return rcStrict;
15029}
15030
15031
15032/**
15033 * Interface used by EMExecuteExec, does exit statistics and limits.
15034 *
15035 * @returns Strict VBox status code.
15036 * @param pVCpu The cross context virtual CPU structure.
15037 * @param fWillExit To be defined.
15038 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
15039 * @param cMaxInstructions Maximum number of instructions to execute.
15040 * @param cMaxInstructionsWithoutExits
15041 * The max number of instructions without exits.
15042 * @param pStats Where to return statistics.
15043 */
15044VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
15045 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
15046{
15047 NOREF(fWillExit); /** @todo define flexible exit crits */
15048
15049 /*
15050 * Initialize return stats.
15051 */
15052 pStats->cInstructions = 0;
15053 pStats->cExits = 0;
15054 pStats->cMaxExitDistance = 0;
15055 pStats->cReserved = 0;
15056
15057 /*
15058 * Initial decoder init w/ prefetch, then setup setjmp.
15059 */
15060 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
15061 if (rcStrict == VINF_SUCCESS)
15062 {
15063#ifdef IEM_WITH_SETJMP
15064 jmp_buf JmpBuf;
15065 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15066 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15067 pVCpu->iem.s.cActiveMappings = 0;
15068 if ((rcStrict = setjmp(JmpBuf)) == 0)
15069#endif
15070 {
15071#ifdef IN_RING0
15072 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
15073#endif
15074 uint32_t cInstructionSinceLastExit = 0;
15075
15076 /*
15077 * The run loop. We limit ourselves to 4096 instructions right now.
15078 */
15079 PVM pVM = pVCpu->CTX_SUFF(pVM);
15080 for (;;)
15081 {
15082 /*
15083 * Log the state.
15084 */
15085#ifdef LOG_ENABLED
15086 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
15087#endif
15088
15089 /*
15090 * Do the decoding and emulation.
15091 */
15092 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
15093
15094 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15095 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15096
15097 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
15098 && cInstructionSinceLastExit > 0 /* don't count the first */ )
15099 {
15100 pStats->cExits += 1;
15101 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
15102 pStats->cMaxExitDistance = cInstructionSinceLastExit;
15103 cInstructionSinceLastExit = 0;
15104 }
15105
15106 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15107 {
15108 Assert(pVCpu->iem.s.cActiveMappings == 0);
15109 pVCpu->iem.s.cInstructions++;
15110 pStats->cInstructions++;
15111 cInstructionSinceLastExit++;
15112 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15113 {
15114 uint64_t fCpu = pVCpu->fLocalForcedActions
15115 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15116 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15117 | VMCPU_FF_TLB_FLUSH
15118 | VMCPU_FF_INHIBIT_INTERRUPTS
15119 | VMCPU_FF_BLOCK_NMIS
15120 | VMCPU_FF_UNHALT ));
15121
15122 if (RT_LIKELY( ( ( !fCpu
15123 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15124 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
15125 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
15126 || pStats->cInstructions < cMinInstructions))
15127 {
15128 if (pStats->cInstructions < cMaxInstructions)
15129 {
15130 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
15131 {
15132#ifdef IN_RING0
15133 if ( !fCheckPreemptionPending
15134 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
15135#endif
15136 {
15137 Assert(pVCpu->iem.s.cActiveMappings == 0);
15138 iemReInitDecoder(pVCpu);
15139 continue;
15140 }
15141#ifdef IN_RING0
15142 rcStrict = VINF_EM_RAW_INTERRUPT;
15143 break;
15144#endif
15145 }
15146 }
15147 }
15148 Assert(!(fCpu & VMCPU_FF_IEM));
15149 }
15150 Assert(pVCpu->iem.s.cActiveMappings == 0);
15151 }
15152 else if (pVCpu->iem.s.cActiveMappings > 0)
15153 iemMemRollback(pVCpu);
15154 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15155 break;
15156 }
15157 }
15158#ifdef IEM_WITH_SETJMP
15159 else
15160 {
15161 if (pVCpu->iem.s.cActiveMappings > 0)
15162 iemMemRollback(pVCpu);
15163 pVCpu->iem.s.cLongJumps++;
15164 }
15165 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15166#endif
15167
15168 /*
15169 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15170 */
15171 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
15172 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
15173 }
15174 else
15175 {
15176 if (pVCpu->iem.s.cActiveMappings > 0)
15177 iemMemRollback(pVCpu);
15178
15179#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
15180 /*
15181 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
15182 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
15183 */
15184 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15185#endif
15186 }
15187
15188 /*
15189 * Maybe re-enter raw-mode and log.
15190 */
15191 if (rcStrict != VINF_SUCCESS)
15192 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
15193 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
15194 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
15195 return rcStrict;
15196}
15197
15198
15199/**
15200 * Injects a trap, fault, abort, software interrupt or external interrupt.
15201 *
15202 * The parameter list matches TRPMQueryTrapAll pretty closely.
15203 *
15204 * @returns Strict VBox status code.
15205 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15206 * @param u8TrapNo The trap number.
15207 * @param enmType What type is it (trap/fault/abort), software
15208 * interrupt or hardware interrupt.
15209 * @param uErrCode The error code if applicable.
15210 * @param uCr2 The CR2 value if applicable.
15211 * @param cbInstr The instruction length (only relevant for
15212 * software interrupts).
15213 */
15214VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15215 uint8_t cbInstr)
15216{
15217 iemInitDecoder(pVCpu, false, false);
15218#ifdef DBGFTRACE_ENABLED
15219 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15220 u8TrapNo, enmType, uErrCode, uCr2);
15221#endif
15222
15223 uint32_t fFlags;
15224 switch (enmType)
15225 {
15226 case TRPM_HARDWARE_INT:
15227 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15228 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15229 uErrCode = uCr2 = 0;
15230 break;
15231
15232 case TRPM_SOFTWARE_INT:
15233 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15234 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15235 uErrCode = uCr2 = 0;
15236 break;
15237
15238 case TRPM_TRAP:
15239 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15240 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15241 if (u8TrapNo == X86_XCPT_PF)
15242 fFlags |= IEM_XCPT_FLAGS_CR2;
15243 switch (u8TrapNo)
15244 {
15245 case X86_XCPT_DF:
15246 case X86_XCPT_TS:
15247 case X86_XCPT_NP:
15248 case X86_XCPT_SS:
15249 case X86_XCPT_PF:
15250 case X86_XCPT_AC:
15251 case X86_XCPT_GP:
15252 fFlags |= IEM_XCPT_FLAGS_ERR;
15253 break;
15254 }
15255 break;
15256
15257 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15258 }
15259
15260 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15261
15262 if (pVCpu->iem.s.cActiveMappings > 0)
15263 iemMemRollback(pVCpu);
15264
15265 return rcStrict;
15266}
15267
15268
15269/**
15270 * Injects the active TRPM event.
15271 *
15272 * @returns Strict VBox status code.
15273 * @param pVCpu The cross context virtual CPU structure.
15274 */
15275VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
15276{
15277#ifndef IEM_IMPLEMENTS_TASKSWITCH
15278 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15279#else
15280 uint8_t u8TrapNo;
15281 TRPMEVENT enmType;
15282 uint32_t uErrCode;
15283 RTGCUINTPTR uCr2;
15284 uint8_t cbInstr;
15285 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
15286 if (RT_FAILURE(rc))
15287 return rc;
15288
15289 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
15290 * ICEBP \#DB injection as a special case. */
15291 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15292#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15293 if (rcStrict == VINF_SVM_VMEXIT)
15294 rcStrict = VINF_SUCCESS;
15295#endif
15296#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15297 if (rcStrict == VINF_VMX_VMEXIT)
15298 rcStrict = VINF_SUCCESS;
15299#endif
15300 /** @todo Are there any other codes that imply the event was successfully
15301 * delivered to the guest? See @bugref{6607}. */
15302 if ( rcStrict == VINF_SUCCESS
15303 || rcStrict == VINF_IEM_RAISED_XCPT)
15304 TRPMResetTrap(pVCpu);
15305
15306 return rcStrict;
15307#endif
15308}
15309
15310
15311VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15312{
15313 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15314 return VERR_NOT_IMPLEMENTED;
15315}
15316
15317
15318VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15319{
15320 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15321 return VERR_NOT_IMPLEMENTED;
15322}
15323
15324
15325#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15326/**
15327 * Executes a IRET instruction with default operand size.
15328 *
15329 * This is for PATM.
15330 *
15331 * @returns VBox status code.
15332 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15333 * @param pCtxCore The register frame.
15334 */
15335VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
15336{
15337 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15338
15339 iemCtxCoreToCtx(pCtx, pCtxCore);
15340 iemInitDecoder(pVCpu);
15341 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15342 if (rcStrict == VINF_SUCCESS)
15343 iemCtxToCtxCore(pCtxCore, pCtx);
15344 else
15345 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15346 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15347 return rcStrict;
15348}
15349#endif
15350
15351
15352/**
15353 * Macro used by the IEMExec* method to check the given instruction length.
15354 *
15355 * Will return on failure!
15356 *
15357 * @param a_cbInstr The given instruction length.
15358 * @param a_cbMin The minimum length.
15359 */
15360#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15361 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15362 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15363
15364
15365/**
15366 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15367 *
15368 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15369 *
15370 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15372 * @param rcStrict The status code to fiddle.
15373 */
15374DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
15375{
15376 iemUninitExec(pVCpu);
15377 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15378}
15379
15380
15381/**
15382 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15383 *
15384 * This API ASSUMES that the caller has already verified that the guest code is
15385 * allowed to access the I/O port. (The I/O port is in the DX register in the
15386 * guest state.)
15387 *
15388 * @returns Strict VBox status code.
15389 * @param pVCpu The cross context virtual CPU structure.
15390 * @param cbValue The size of the I/O port access (1, 2, or 4).
15391 * @param enmAddrMode The addressing mode.
15392 * @param fRepPrefix Indicates whether a repeat prefix is used
15393 * (doesn't matter which for this instruction).
15394 * @param cbInstr The instruction length in bytes.
15395 * @param iEffSeg The effective segment address.
15396 * @param fIoChecked Whether the access to the I/O port has been
15397 * checked or not. It's typically checked in the
15398 * HM scenario.
15399 */
15400VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15401 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15402{
15403 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15404 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15405
15406 /*
15407 * State init.
15408 */
15409 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15410
15411 /*
15412 * Switch orgy for getting to the right handler.
15413 */
15414 VBOXSTRICTRC rcStrict;
15415 if (fRepPrefix)
15416 {
15417 switch (enmAddrMode)
15418 {
15419 case IEMMODE_16BIT:
15420 switch (cbValue)
15421 {
15422 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15423 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15424 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15425 default:
15426 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15427 }
15428 break;
15429
15430 case IEMMODE_32BIT:
15431 switch (cbValue)
15432 {
15433 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15434 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15435 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15436 default:
15437 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15438 }
15439 break;
15440
15441 case IEMMODE_64BIT:
15442 switch (cbValue)
15443 {
15444 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15445 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15446 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15447 default:
15448 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15449 }
15450 break;
15451
15452 default:
15453 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15454 }
15455 }
15456 else
15457 {
15458 switch (enmAddrMode)
15459 {
15460 case IEMMODE_16BIT:
15461 switch (cbValue)
15462 {
15463 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15464 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15465 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15466 default:
15467 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15468 }
15469 break;
15470
15471 case IEMMODE_32BIT:
15472 switch (cbValue)
15473 {
15474 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15475 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15476 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15477 default:
15478 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15479 }
15480 break;
15481
15482 case IEMMODE_64BIT:
15483 switch (cbValue)
15484 {
15485 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15486 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15487 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15488 default:
15489 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15490 }
15491 break;
15492
15493 default:
15494 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15495 }
15496 }
15497
15498 if (pVCpu->iem.s.cActiveMappings)
15499 iemMemRollback(pVCpu);
15500
15501 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15502}
15503
15504
15505/**
15506 * Interface for HM and EM for executing string I/O IN (read) instructions.
15507 *
15508 * This API ASSUMES that the caller has already verified that the guest code is
15509 * allowed to access the I/O port. (The I/O port is in the DX register in the
15510 * guest state.)
15511 *
15512 * @returns Strict VBox status code.
15513 * @param pVCpu The cross context virtual CPU structure.
15514 * @param cbValue The size of the I/O port access (1, 2, or 4).
15515 * @param enmAddrMode The addressing mode.
15516 * @param fRepPrefix Indicates whether a repeat prefix is used
15517 * (doesn't matter which for this instruction).
15518 * @param cbInstr The instruction length in bytes.
15519 * @param fIoChecked Whether the access to the I/O port has been
15520 * checked or not. It's typically checked in the
15521 * HM scenario.
15522 */
15523VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15524 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15525{
15526 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15527
15528 /*
15529 * State init.
15530 */
15531 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15532
15533 /*
15534 * Switch orgy for getting to the right handler.
15535 */
15536 VBOXSTRICTRC rcStrict;
15537 if (fRepPrefix)
15538 {
15539 switch (enmAddrMode)
15540 {
15541 case IEMMODE_16BIT:
15542 switch (cbValue)
15543 {
15544 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15545 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15546 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15547 default:
15548 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15549 }
15550 break;
15551
15552 case IEMMODE_32BIT:
15553 switch (cbValue)
15554 {
15555 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15556 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15557 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15558 default:
15559 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15560 }
15561 break;
15562
15563 case IEMMODE_64BIT:
15564 switch (cbValue)
15565 {
15566 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15567 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15568 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15569 default:
15570 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15571 }
15572 break;
15573
15574 default:
15575 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15576 }
15577 }
15578 else
15579 {
15580 switch (enmAddrMode)
15581 {
15582 case IEMMODE_16BIT:
15583 switch (cbValue)
15584 {
15585 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15586 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15587 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15588 default:
15589 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15590 }
15591 break;
15592
15593 case IEMMODE_32BIT:
15594 switch (cbValue)
15595 {
15596 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15597 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15598 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15599 default:
15600 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15601 }
15602 break;
15603
15604 case IEMMODE_64BIT:
15605 switch (cbValue)
15606 {
15607 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15608 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15609 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15610 default:
15611 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15612 }
15613 break;
15614
15615 default:
15616 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15617 }
15618 }
15619
15620 if ( pVCpu->iem.s.cActiveMappings == 0
15621 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
15622 { /* likely */ }
15623 else
15624 {
15625 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
15626 iemMemRollback(pVCpu);
15627 }
15628 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15629}
15630
15631
15632/**
15633 * Interface for rawmode to write execute an OUT instruction.
15634 *
15635 * @returns Strict VBox status code.
15636 * @param pVCpu The cross context virtual CPU structure.
15637 * @param cbInstr The instruction length in bytes.
15638 * @param u16Port The port to read.
15639 * @param fImm Whether the port is specified using an immediate operand or
15640 * using the implicit DX register.
15641 * @param cbReg The register size.
15642 *
15643 * @remarks In ring-0 not all of the state needs to be synced in.
15644 */
15645VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15646{
15647 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15648 Assert(cbReg <= 4 && cbReg != 3);
15649
15650 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15651 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15652 Assert(!pVCpu->iem.s.cActiveMappings);
15653 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15654}
15655
15656
15657/**
15658 * Interface for rawmode to write execute an IN instruction.
15659 *
15660 * @returns Strict VBox status code.
15661 * @param pVCpu The cross context virtual CPU structure.
15662 * @param cbInstr The instruction length in bytes.
15663 * @param u16Port The port to read.
15664 * @param fImm Whether the port is specified using an immediate operand or
15665 * using the implicit DX.
15666 * @param cbReg The register size.
15667 */
15668VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15669{
15670 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15671 Assert(cbReg <= 4 && cbReg != 3);
15672
15673 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15674 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15675 Assert(!pVCpu->iem.s.cActiveMappings);
15676 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15677}
15678
15679
15680/**
15681 * Interface for HM and EM to write to a CRx register.
15682 *
15683 * @returns Strict VBox status code.
15684 * @param pVCpu The cross context virtual CPU structure.
15685 * @param cbInstr The instruction length in bytes.
15686 * @param iCrReg The control register number (destination).
15687 * @param iGReg The general purpose register number (source).
15688 *
15689 * @remarks In ring-0 not all of the state needs to be synced in.
15690 */
15691VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15692{
15693 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15694 Assert(iCrReg < 16);
15695 Assert(iGReg < 16);
15696
15697 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15698 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15699 Assert(!pVCpu->iem.s.cActiveMappings);
15700 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15701}
15702
15703
15704/**
15705 * Interface for HM and EM to read from a CRx register.
15706 *
15707 * @returns Strict VBox status code.
15708 * @param pVCpu The cross context virtual CPU structure.
15709 * @param cbInstr The instruction length in bytes.
15710 * @param iGReg The general purpose register number (destination).
15711 * @param iCrReg The control register number (source).
15712 *
15713 * @remarks In ring-0 not all of the state needs to be synced in.
15714 */
15715VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15716{
15717 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15718 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15719 | CPUMCTX_EXTRN_APIC_TPR);
15720 Assert(iCrReg < 16);
15721 Assert(iGReg < 16);
15722
15723 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15724 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15725 Assert(!pVCpu->iem.s.cActiveMappings);
15726 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15727}
15728
15729
15730/**
15731 * Interface for HM and EM to clear the CR0[TS] bit.
15732 *
15733 * @returns Strict VBox status code.
15734 * @param pVCpu The cross context virtual CPU structure.
15735 * @param cbInstr The instruction length in bytes.
15736 *
15737 * @remarks In ring-0 not all of the state needs to be synced in.
15738 */
15739VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15740{
15741 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15742
15743 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15744 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15745 Assert(!pVCpu->iem.s.cActiveMappings);
15746 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15747}
15748
15749
15750/**
15751 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15752 *
15753 * @returns Strict VBox status code.
15754 * @param pVCpu The cross context virtual CPU structure.
15755 * @param cbInstr The instruction length in bytes.
15756 * @param uValue The value to load into CR0.
15757 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15758 * memory operand. Otherwise pass NIL_RTGCPTR.
15759 *
15760 * @remarks In ring-0 not all of the state needs to be synced in.
15761 */
15762VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15763{
15764 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15765
15766 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15767 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15768 Assert(!pVCpu->iem.s.cActiveMappings);
15769 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15770}
15771
15772
15773/**
15774 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15775 *
15776 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15777 *
15778 * @returns Strict VBox status code.
15779 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15780 * @param cbInstr The instruction length in bytes.
15781 * @remarks In ring-0 not all of the state needs to be synced in.
15782 * @thread EMT(pVCpu)
15783 */
15784VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15785{
15786 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15787
15788 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15789 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15790 Assert(!pVCpu->iem.s.cActiveMappings);
15791 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15792}
15793
15794
15795/**
15796 * Interface for HM and EM to emulate the WBINVD instruction.
15797 *
15798 * @returns Strict VBox status code.
15799 * @param pVCpu The cross context virtual CPU structure.
15800 * @param cbInstr The instruction length in bytes.
15801 *
15802 * @remarks In ring-0 not all of the state needs to be synced in.
15803 */
15804VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15805{
15806 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15807
15808 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15809 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15810 Assert(!pVCpu->iem.s.cActiveMappings);
15811 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15812}
15813
15814
15815/**
15816 * Interface for HM and EM to emulate the INVD instruction.
15817 *
15818 * @returns Strict VBox status code.
15819 * @param pVCpu The cross context virtual CPU structure.
15820 * @param cbInstr The instruction length in bytes.
15821 *
15822 * @remarks In ring-0 not all of the state needs to be synced in.
15823 */
15824VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15825{
15826 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15827
15828 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15829 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15830 Assert(!pVCpu->iem.s.cActiveMappings);
15831 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15832}
15833
15834
15835/**
15836 * Interface for HM and EM to emulate the INVLPG instruction.
15837 *
15838 * @returns Strict VBox status code.
15839 * @retval VINF_PGM_SYNC_CR3
15840 *
15841 * @param pVCpu The cross context virtual CPU structure.
15842 * @param cbInstr The instruction length in bytes.
15843 * @param GCPtrPage The effective address of the page to invalidate.
15844 *
15845 * @remarks In ring-0 not all of the state needs to be synced in.
15846 */
15847VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15848{
15849 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15850
15851 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15852 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15853 Assert(!pVCpu->iem.s.cActiveMappings);
15854 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15855}
15856
15857
15858/**
15859 * Interface for HM and EM to emulate the INVPCID instruction.
15860 *
15861 * @returns Strict VBox status code.
15862 * @retval VINF_PGM_SYNC_CR3
15863 *
15864 * @param pVCpu The cross context virtual CPU structure.
15865 * @param cbInstr The instruction length in bytes.
15866 * @param iEffSeg The effective segment register.
15867 * @param GCPtrDesc The effective address of the INVPCID descriptor.
15868 * @param uType The invalidation type.
15869 *
15870 * @remarks In ring-0 not all of the state needs to be synced in.
15871 */
15872VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
15873 uint64_t uType)
15874{
15875 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
15876
15877 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15878 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
15879 Assert(!pVCpu->iem.s.cActiveMappings);
15880 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15881}
15882
15883
15884/**
15885 * Interface for HM and EM to emulate the CPUID instruction.
15886 *
15887 * @returns Strict VBox status code.
15888 *
15889 * @param pVCpu The cross context virtual CPU structure.
15890 * @param cbInstr The instruction length in bytes.
15891 *
15892 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15893 */
15894VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15895{
15896 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15897 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15898
15899 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15900 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15901 Assert(!pVCpu->iem.s.cActiveMappings);
15902 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15903}
15904
15905
15906/**
15907 * Interface for HM and EM to emulate the RDPMC instruction.
15908 *
15909 * @returns Strict VBox status code.
15910 *
15911 * @param pVCpu The cross context virtual CPU structure.
15912 * @param cbInstr The instruction length in bytes.
15913 *
15914 * @remarks Not all of the state needs to be synced in.
15915 */
15916VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15917{
15918 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15919 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15920
15921 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15922 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15923 Assert(!pVCpu->iem.s.cActiveMappings);
15924 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15925}
15926
15927
15928/**
15929 * Interface for HM and EM to emulate the RDTSC instruction.
15930 *
15931 * @returns Strict VBox status code.
15932 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15933 *
15934 * @param pVCpu The cross context virtual CPU structure.
15935 * @param cbInstr The instruction length in bytes.
15936 *
15937 * @remarks Not all of the state needs to be synced in.
15938 */
15939VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15940{
15941 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15942 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15943
15944 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15945 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15946 Assert(!pVCpu->iem.s.cActiveMappings);
15947 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15948}
15949
15950
15951/**
15952 * Interface for HM and EM to emulate the RDTSCP instruction.
15953 *
15954 * @returns Strict VBox status code.
15955 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15956 *
15957 * @param pVCpu The cross context virtual CPU structure.
15958 * @param cbInstr The instruction length in bytes.
15959 *
15960 * @remarks Not all of the state needs to be synced in. Recommended
15961 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15962 */
15963VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15964{
15965 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15966 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15967
15968 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15969 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15970 Assert(!pVCpu->iem.s.cActiveMappings);
15971 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15972}
15973
15974
15975/**
15976 * Interface for HM and EM to emulate the RDMSR instruction.
15977 *
15978 * @returns Strict VBox status code.
15979 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15980 *
15981 * @param pVCpu The cross context virtual CPU structure.
15982 * @param cbInstr The instruction length in bytes.
15983 *
15984 * @remarks Not all of the state needs to be synced in. Requires RCX and
15985 * (currently) all MSRs.
15986 */
15987VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15988{
15989 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15990 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15991
15992 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15993 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15994 Assert(!pVCpu->iem.s.cActiveMappings);
15995 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15996}
15997
15998
15999/**
16000 * Interface for HM and EM to emulate the WRMSR instruction.
16001 *
16002 * @returns Strict VBox status code.
16003 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
16004 *
16005 * @param pVCpu The cross context virtual CPU structure.
16006 * @param cbInstr The instruction length in bytes.
16007 *
16008 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
16009 * and (currently) all MSRs.
16010 */
16011VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
16012{
16013 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
16014 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
16015 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
16016
16017 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16018 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
16019 Assert(!pVCpu->iem.s.cActiveMappings);
16020 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16021}
16022
16023
16024/**
16025 * Interface for HM and EM to emulate the MONITOR instruction.
16026 *
16027 * @returns Strict VBox status code.
16028 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
16029 *
16030 * @param pVCpu The cross context virtual CPU structure.
16031 * @param cbInstr The instruction length in bytes.
16032 *
16033 * @remarks Not all of the state needs to be synced in.
16034 * @remarks ASSUMES the default segment of DS and no segment override prefixes
16035 * are used.
16036 */
16037VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
16038{
16039 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16040 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
16041
16042 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16043 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
16044 Assert(!pVCpu->iem.s.cActiveMappings);
16045 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16046}
16047
16048
16049/**
16050 * Interface for HM and EM to emulate the MWAIT instruction.
16051 *
16052 * @returns Strict VBox status code.
16053 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
16054 *
16055 * @param pVCpu The cross context virtual CPU structure.
16056 * @param cbInstr The instruction length in bytes.
16057 *
16058 * @remarks Not all of the state needs to be synced in.
16059 */
16060VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
16061{
16062 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16063 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
16064
16065 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16066 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
16067 Assert(!pVCpu->iem.s.cActiveMappings);
16068 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16069}
16070
16071
16072/**
16073 * Interface for HM and EM to emulate the HLT instruction.
16074 *
16075 * @returns Strict VBox status code.
16076 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
16077 *
16078 * @param pVCpu The cross context virtual CPU structure.
16079 * @param cbInstr The instruction length in bytes.
16080 *
16081 * @remarks Not all of the state needs to be synced in.
16082 */
16083VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
16084{
16085 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
16086
16087 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16088 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
16089 Assert(!pVCpu->iem.s.cActiveMappings);
16090 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16091}
16092
16093
16094/**
16095 * Checks if IEM is in the process of delivering an event (interrupt or
16096 * exception).
16097 *
16098 * @returns true if we're in the process of raising an interrupt or exception,
16099 * false otherwise.
16100 * @param pVCpu The cross context virtual CPU structure.
16101 * @param puVector Where to store the vector associated with the
16102 * currently delivered event, optional.
16103 * @param pfFlags Where to store th event delivery flags (see
16104 * IEM_XCPT_FLAGS_XXX), optional.
16105 * @param puErr Where to store the error code associated with the
16106 * event, optional.
16107 * @param puCr2 Where to store the CR2 associated with the event,
16108 * optional.
16109 * @remarks The caller should check the flags to determine if the error code and
16110 * CR2 are valid for the event.
16111 */
16112VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
16113{
16114 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
16115 if (fRaisingXcpt)
16116 {
16117 if (puVector)
16118 *puVector = pVCpu->iem.s.uCurXcpt;
16119 if (pfFlags)
16120 *pfFlags = pVCpu->iem.s.fCurXcpt;
16121 if (puErr)
16122 *puErr = pVCpu->iem.s.uCurXcptErr;
16123 if (puCr2)
16124 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
16125 }
16126 return fRaisingXcpt;
16127}
16128
16129#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
16130
16131/**
16132 * Interface for HM and EM to emulate the CLGI instruction.
16133 *
16134 * @returns Strict VBox status code.
16135 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16136 * @param cbInstr The instruction length in bytes.
16137 * @thread EMT(pVCpu)
16138 */
16139VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
16140{
16141 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16142
16143 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16144 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
16145 Assert(!pVCpu->iem.s.cActiveMappings);
16146 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16147}
16148
16149
16150/**
16151 * Interface for HM and EM to emulate the STGI instruction.
16152 *
16153 * @returns Strict VBox status code.
16154 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16155 * @param cbInstr The instruction length in bytes.
16156 * @thread EMT(pVCpu)
16157 */
16158VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
16159{
16160 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16161
16162 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16163 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
16164 Assert(!pVCpu->iem.s.cActiveMappings);
16165 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16166}
16167
16168
16169/**
16170 * Interface for HM and EM to emulate the VMLOAD instruction.
16171 *
16172 * @returns Strict VBox status code.
16173 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16174 * @param cbInstr The instruction length in bytes.
16175 * @thread EMT(pVCpu)
16176 */
16177VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
16178{
16179 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16180
16181 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16182 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
16183 Assert(!pVCpu->iem.s.cActiveMappings);
16184 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16185}
16186
16187
16188/**
16189 * Interface for HM and EM to emulate the VMSAVE instruction.
16190 *
16191 * @returns Strict VBox status code.
16192 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16193 * @param cbInstr The instruction length in bytes.
16194 * @thread EMT(pVCpu)
16195 */
16196VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
16197{
16198 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16199
16200 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16201 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
16202 Assert(!pVCpu->iem.s.cActiveMappings);
16203 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16204}
16205
16206
16207/**
16208 * Interface for HM and EM to emulate the INVLPGA instruction.
16209 *
16210 * @returns Strict VBox status code.
16211 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16212 * @param cbInstr The instruction length in bytes.
16213 * @thread EMT(pVCpu)
16214 */
16215VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
16216{
16217 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16218
16219 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16220 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
16221 Assert(!pVCpu->iem.s.cActiveMappings);
16222 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16223}
16224
16225
16226/**
16227 * Interface for HM and EM to emulate the VMRUN instruction.
16228 *
16229 * @returns Strict VBox status code.
16230 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16231 * @param cbInstr The instruction length in bytes.
16232 * @thread EMT(pVCpu)
16233 */
16234VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
16235{
16236 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16237 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
16238
16239 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16240 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
16241 Assert(!pVCpu->iem.s.cActiveMappings);
16242 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16243}
16244
16245
16246/**
16247 * Interface for HM and EM to emulate \#VMEXIT.
16248 *
16249 * @returns Strict VBox status code.
16250 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16251 * @param uExitCode The exit code.
16252 * @param uExitInfo1 The exit info. 1 field.
16253 * @param uExitInfo2 The exit info. 2 field.
16254 * @thread EMT(pVCpu)
16255 */
16256VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
16257{
16258 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
16259 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
16260 if (pVCpu->iem.s.cActiveMappings)
16261 iemMemRollback(pVCpu);
16262 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16263}
16264
16265#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
16266
16267#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
16268
16269/**
16270 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
16271 *
16272 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
16273 * are performed. Bounds checks are strict builds only.
16274 *
16275 * @param pVmcs Pointer to the virtual VMCS.
16276 * @param u64VmcsField The VMCS field.
16277 * @param pu64Dst Where to store the VMCS value.
16278 *
16279 * @remarks May be called with interrupts disabled.
16280 * @todo This should probably be moved to CPUM someday.
16281 */
16282VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
16283{
16284 AssertPtr(pVmcs);
16285 AssertPtr(pu64Dst);
16286 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
16287}
16288
16289
16290/**
16291 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
16292 *
16293 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
16294 * are performed. Bounds checks are strict builds only.
16295 *
16296 * @param pVmcs Pointer to the virtual VMCS.
16297 * @param u64VmcsField The VMCS field.
16298 * @param u64Val The value to write.
16299 *
16300 * @remarks May be called with interrupts disabled.
16301 * @todo This should probably be moved to CPUM someday.
16302 */
16303VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
16304{
16305 AssertPtr(pVmcs);
16306 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
16307}
16308
16309
16310/**
16311 * Interface for HM and EM to virtualize x2APIC MSR accesses.
16312 *
16313 * @returns Strict VBox status code.
16314 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
16315 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
16316 * the x2APIC device.
16317 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
16318 *
16319 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16320 * @param idMsr The MSR being read.
16321 * @param pu64Value Pointer to the value being written or where to store the
16322 * value being read.
16323 * @param fWrite Whether this is an MSR write or read access.
16324 * @thread EMT(pVCpu)
16325 */
16326VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
16327{
16328 Assert(pu64Value);
16329
16330 VBOXSTRICTRC rcStrict;
16331 if (fWrite)
16332 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
16333 else
16334 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
16335 Assert(!pVCpu->iem.s.cActiveMappings);
16336 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16337
16338}
16339
16340
16341/**
16342 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
16343 *
16344 * @returns Strict VBox status code.
16345 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
16346 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
16347 *
16348 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16349 * @param pExitInfo Pointer to the VM-exit information.
16350 * @param pExitEventInfo Pointer to the VM-exit event information.
16351 * @thread EMT(pVCpu)
16352 */
16353VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16354{
16355 Assert(pExitInfo);
16356 Assert(pExitEventInfo);
16357 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16358 Assert(!pVCpu->iem.s.cActiveMappings);
16359 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16360
16361}
16362
16363
16364/**
16365 * Interface for HM and EM to perform an APIC-write emulation which may cause a
16366 * VM-exit.
16367 *
16368 * @returns Strict VBox status code.
16369 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16370 * @thread EMT(pVCpu)
16371 */
16372VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
16373{
16374 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
16375 Assert(!pVCpu->iem.s.cActiveMappings);
16376 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16377}
16378
16379
16380/**
16381 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
16382 *
16383 * @returns Strict VBox status code.
16384 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16385 * @thread EMT(pVCpu)
16386 */
16387VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
16388{
16389 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
16390 Assert(!pVCpu->iem.s.cActiveMappings);
16391 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16392}
16393
16394
16395/**
16396 * Interface for HM and EM to emulate VM-exit due to external interrupts.
16397 *
16398 * @returns Strict VBox status code.
16399 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16400 * @param uVector The external interrupt vector (pass 0 if the external
16401 * interrupt is still pending).
16402 * @param fIntPending Whether the external interrupt is pending or
16403 * acknowdledged in the interrupt controller.
16404 * @thread EMT(pVCpu)
16405 */
16406VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
16407{
16408 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
16409 Assert(!pVCpu->iem.s.cActiveMappings);
16410 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16411}
16412
16413
16414/**
16415 * Interface for HM and EM to emulate VM-exit due to exceptions.
16416 *
16417 * Exception includes NMIs, software exceptions (those generated by INT3 or
16418 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
16419 *
16420 * @returns Strict VBox status code.
16421 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16422 * @param pExitInfo Pointer to the VM-exit information.
16423 * @param pExitEventInfo Pointer to the VM-exit event information.
16424 * @thread EMT(pVCpu)
16425 */
16426VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16427{
16428 Assert(pExitInfo);
16429 Assert(pExitEventInfo);
16430 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16431 Assert(!pVCpu->iem.s.cActiveMappings);
16432 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16433}
16434
16435
16436/**
16437 * Interface for HM and EM to emulate VM-exit due to NMIs.
16438 *
16439 * @returns Strict VBox status code.
16440 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16441 * @thread EMT(pVCpu)
16442 */
16443VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
16444{
16445 VMXVEXITINFO ExitInfo;
16446 RT_ZERO(ExitInfo);
16447 ExitInfo.uReason = VMX_EXIT_XCPT_OR_NMI;
16448
16449 VMXVEXITEVENTINFO ExitEventInfo;
16450 RT_ZERO(ExitEventInfo);
16451 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
16452 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
16453 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
16454
16455 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
16456 Assert(!pVCpu->iem.s.cActiveMappings);
16457 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16458}
16459
16460
16461/**
16462 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
16463 *
16464 * @returns Strict VBox status code.
16465 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16466 * @thread EMT(pVCpu)
16467 */
16468VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
16469{
16470 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
16471 Assert(!pVCpu->iem.s.cActiveMappings);
16472 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16473}
16474
16475
16476/**
16477 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
16478 *
16479 * @returns Strict VBox status code.
16480 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16481 * @param uVector The SIPI vector.
16482 * @thread EMT(pVCpu)
16483 */
16484VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
16485{
16486 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
16487 Assert(!pVCpu->iem.s.cActiveMappings);
16488 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16489}
16490
16491
16492/**
16493 * Interface for HM and EM to emulate a VM-exit.
16494 *
16495 * If a specialized version of a VM-exit handler exists, that must be used instead.
16496 *
16497 * @returns Strict VBox status code.
16498 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16499 * @param uExitReason The VM-exit reason.
16500 * @param u64ExitQual The Exit qualification.
16501 * @thread EMT(pVCpu)
16502 */
16503VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
16504{
16505 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
16506 Assert(!pVCpu->iem.s.cActiveMappings);
16507 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16508}
16509
16510
16511/**
16512 * Interface for HM and EM to emulate a VM-exit due to an instruction.
16513 *
16514 * This is meant to be used for those instructions that VMX provides additional
16515 * decoding information beyond just the instruction length!
16516 *
16517 * @returns Strict VBox status code.
16518 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16519 * @param pExitInfo Pointer to the VM-exit information.
16520 * @thread EMT(pVCpu)
16521 */
16522VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16523{
16524 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
16525 Assert(!pVCpu->iem.s.cActiveMappings);
16526 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16527}
16528
16529
16530/**
16531 * Interface for HM and EM to emulate a VM-exit due to an instruction.
16532 *
16533 * This is meant to be used for those instructions that VMX provides only the
16534 * instruction length.
16535 *
16536 * @returns Strict VBox status code.
16537 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16538 * @param pExitInfo Pointer to the VM-exit information.
16539 * @param cbInstr The instruction length in bytes.
16540 * @thread EMT(pVCpu)
16541 */
16542VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
16543{
16544 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
16545 Assert(!pVCpu->iem.s.cActiveMappings);
16546 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16547}
16548
16549
16550/**
16551 * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
16552 * Virtualized-EOI, TPR-below threshold).
16553 *
16554 * @returns Strict VBox status code.
16555 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16556 * @param pExitInfo Pointer to the VM-exit information.
16557 * @thread EMT(pVCpu)
16558 */
16559VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16560{
16561 Assert(pExitInfo);
16562 VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
16563 Assert(!pVCpu->iem.s.cActiveMappings);
16564 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16565}
16566
16567
16568/**
16569 * Interface for HM and EM to emulate a VM-exit due to a task switch.
16570 *
16571 * @returns Strict VBox status code.
16572 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16573 * @param pExitInfo Pointer to the VM-exit information.
16574 * @param pExitEventInfo Pointer to the VM-exit event information.
16575 * @thread EMT(pVCpu)
16576 */
16577VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16578{
16579 Assert(pExitInfo);
16580 Assert(pExitEventInfo);
16581 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
16582 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16583 Assert(!pVCpu->iem.s.cActiveMappings);
16584 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16585}
16586
16587
16588/**
16589 * Interface for HM and EM to emulate the VMREAD instruction.
16590 *
16591 * @returns Strict VBox status code.
16592 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16593 * @param pExitInfo Pointer to the VM-exit information.
16594 * @thread EMT(pVCpu)
16595 */
16596VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16597{
16598 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16599 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16600 Assert(pExitInfo);
16601
16602 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16603
16604 VBOXSTRICTRC rcStrict;
16605 uint8_t const cbInstr = pExitInfo->cbInstr;
16606 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16607 uint64_t const u64FieldEnc = fIs64BitMode
16608 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16609 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16610 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16611 {
16612 if (fIs64BitMode)
16613 {
16614 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16615 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16616 }
16617 else
16618 {
16619 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16620 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16621 }
16622 }
16623 else
16624 {
16625 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16626 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16627 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16628 }
16629 Assert(!pVCpu->iem.s.cActiveMappings);
16630 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16631}
16632
16633
16634/**
16635 * Interface for HM and EM to emulate the VMWRITE instruction.
16636 *
16637 * @returns Strict VBox status code.
16638 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16639 * @param pExitInfo Pointer to the VM-exit information.
16640 * @thread EMT(pVCpu)
16641 */
16642VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16643{
16644 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16645 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16646 Assert(pExitInfo);
16647
16648 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16649
16650 uint64_t u64Val;
16651 uint8_t iEffSeg;
16652 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16653 {
16654 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16655 iEffSeg = UINT8_MAX;
16656 }
16657 else
16658 {
16659 u64Val = pExitInfo->GCPtrEffAddr;
16660 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16661 }
16662 uint8_t const cbInstr = pExitInfo->cbInstr;
16663 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16664 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16665 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16666 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16667 Assert(!pVCpu->iem.s.cActiveMappings);
16668 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16669}
16670
16671
16672/**
16673 * Interface for HM and EM to emulate the VMPTRLD instruction.
16674 *
16675 * @returns Strict VBox status code.
16676 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16677 * @param pExitInfo Pointer to the VM-exit information.
16678 * @thread EMT(pVCpu)
16679 */
16680VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16681{
16682 Assert(pExitInfo);
16683 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16684 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16685
16686 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16687
16688 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16689 uint8_t const cbInstr = pExitInfo->cbInstr;
16690 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16691 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16692 Assert(!pVCpu->iem.s.cActiveMappings);
16693 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16694}
16695
16696
16697/**
16698 * Interface for HM and EM to emulate the VMPTRST instruction.
16699 *
16700 * @returns Strict VBox status code.
16701 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16702 * @param pExitInfo Pointer to the VM-exit information.
16703 * @thread EMT(pVCpu)
16704 */
16705VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16706{
16707 Assert(pExitInfo);
16708 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16709 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16710
16711 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16712
16713 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16714 uint8_t const cbInstr = pExitInfo->cbInstr;
16715 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16716 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16717 Assert(!pVCpu->iem.s.cActiveMappings);
16718 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16719}
16720
16721
16722/**
16723 * Interface for HM and EM to emulate the VMCLEAR instruction.
16724 *
16725 * @returns Strict VBox status code.
16726 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16727 * @param pExitInfo Pointer to the VM-exit information.
16728 * @thread EMT(pVCpu)
16729 */
16730VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16731{
16732 Assert(pExitInfo);
16733 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16734 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16735
16736 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16737
16738 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16739 uint8_t const cbInstr = pExitInfo->cbInstr;
16740 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16741 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16742 Assert(!pVCpu->iem.s.cActiveMappings);
16743 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16744}
16745
16746
16747/**
16748 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16749 *
16750 * @returns Strict VBox status code.
16751 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16752 * @param cbInstr The instruction length in bytes.
16753 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16754 * VMXINSTRID_VMRESUME).
16755 * @thread EMT(pVCpu)
16756 */
16757VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16758{
16759 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16760 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16761
16762 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16763 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16764 Assert(!pVCpu->iem.s.cActiveMappings);
16765 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16766}
16767
16768
16769/**
16770 * Interface for HM and EM to emulate the VMXON instruction.
16771 *
16772 * @returns Strict VBox status code.
16773 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16774 * @param pExitInfo Pointer to the VM-exit information.
16775 * @thread EMT(pVCpu)
16776 */
16777VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16778{
16779 Assert(pExitInfo);
16780 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16781 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16782
16783 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16784
16785 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16786 uint8_t const cbInstr = pExitInfo->cbInstr;
16787 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16788 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16789 Assert(!pVCpu->iem.s.cActiveMappings);
16790 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16791}
16792
16793
16794/**
16795 * Interface for HM and EM to emulate the VMXOFF instruction.
16796 *
16797 * @returns Strict VBox status code.
16798 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16799 * @param cbInstr The instruction length in bytes.
16800 * @thread EMT(pVCpu)
16801 */
16802VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16803{
16804 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16805 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16806
16807 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16808 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16809 Assert(!pVCpu->iem.s.cActiveMappings);
16810 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16811}
16812
16813
16814/**
16815 * Interface for HM and EM to emulate the INVVPID instruction.
16816 *
16817 * @returns Strict VBox status code.
16818 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16819 * @param pExitInfo Pointer to the VM-exit information.
16820 * @thread EMT(pVCpu)
16821 */
16822VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16823{
16824 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16825 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16826 Assert(pExitInfo);
16827
16828 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16829
16830 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16831 uint8_t const cbInstr = pExitInfo->cbInstr;
16832 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16833 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16834 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16835 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16836 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16837 Assert(!pVCpu->iem.s.cActiveMappings);
16838 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16839}
16840
16841
16842# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
16843/**
16844 * Interface for HM and EM to emulate the INVEPT instruction.
16845 *
16846 * @returns Strict VBox status code.
16847 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16848 * @param pExitInfo Pointer to the VM-exit information.
16849 * @thread EMT(pVCpu)
16850 */
16851VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvept(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16852{
16853 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16854 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16855 Assert(pExitInfo);
16856
16857 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16858
16859 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16860 uint8_t const cbInstr = pExitInfo->cbInstr;
16861 RTGCPTR const GCPtrInveptDesc = pExitInfo->GCPtrEffAddr;
16862 uint64_t const u64InveptType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16863 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16864 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16865 VBOXSTRICTRC rcStrict = iemVmxInvept(pVCpu, cbInstr, iEffSeg, GCPtrInveptDesc, u64InveptType, pExitInfo);
16866 Assert(!pVCpu->iem.s.cActiveMappings);
16867 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16868}
16869
16870
16871/**
16872 * Interface for HM and EM to emulate a VM-exit due to an EPT violation.
16873 *
16874 * @returns Strict VBox status code.
16875 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16876 * @param pExitInfo Pointer to the VM-exit information.
16877 * @param pExitEventInfo Pointer to the VM-exit event information.
16878 * @thread EMT(pVCpu)
16879 */
16880VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo,
16881 PCVMXVEXITEVENTINFO pExitEventInfo)
16882{
16883 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16884
16885 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16886 VBOXSTRICTRC rcStrict = iemVmxVmexitEptViolationWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16887 Assert(!pVCpu->iem.s.cActiveMappings);
16888 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16889}
16890
16891
16892/**
16893 * Interface for HM and EM to emulate a VM-exit due to an EPT misconfiguration.
16894 *
16895 * @returns Strict VBox status code.
16896 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16897 * @param GCPhysAddr The nested-guest physical address causing the EPT
16898 * misconfiguration.
16899 * @param pExitEventInfo Pointer to the VM-exit event information.
16900 * @thread EMT(pVCpu)
16901 */
16902VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo)
16903{
16904 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16905
16906 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16907 VBOXSTRICTRC rcStrict = iemVmxVmexitEptMisconfigWithInfo(pVCpu, GCPhysAddr, pExitEventInfo);
16908 Assert(!pVCpu->iem.s.cActiveMappings);
16909 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16910}
16911
16912# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
16913
16914
16915/**
16916 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16917 *
16918 * @remarks The @a uUser argument is currently unused.
16919 */
16920PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16921 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16922 PGMACCESSORIGIN enmOrigin, uint64_t uUser)
16923{
16924 RT_NOREF3(pvPhys, enmOrigin, uUser);
16925
16926 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
16927 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16928 {
16929 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16930 Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16931
16932 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_DATA_W : IEM_ACCESS_DATA_R;
16933 uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
16934 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16935 if (RT_FAILURE(rcStrict))
16936 return rcStrict;
16937
16938 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16939 return VINF_SUCCESS;
16940 }
16941
16942 LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase));
16943 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16944 if (RT_FAILURE(rc))
16945 return rc;
16946
16947 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16948 return VINF_PGM_HANDLER_DO_DEFAULT;
16949}
16950
16951
16952# ifndef IN_RING3
16953/**
16954 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
16955 * \#PF access handler callback for guest VMX APIC-access page.}
16956 */
16957DECLCALLBACK(VBOXSTRICTRC) iemVmxApicAccessPagePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame,
16958 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
16959
16960{
16961 RT_NOREF4(pVM, pRegFrame, pvFault, uUser);
16962
16963 /*
16964 * Handle the VMX APIC-access page only when the guest is in VMX non-root mode.
16965 * Otherwise we must deregister the page and allow regular RAM access.
16966 * Failing to do so lands us with endless EPT misconfiguration VM-exits.
16967 */
16968 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
16969 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16970 {
16971 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16972 Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16973
16974 /*
16975 * Check if the access causes an APIC-access VM-exit.
16976 */
16977 uint32_t fAccess;
16978 if (uErr & X86_TRAP_PF_ID)
16979 fAccess = IEM_ACCESS_INSTRUCTION;
16980 else if (uErr & X86_TRAP_PF_RW)
16981 fAccess = IEM_ACCESS_DATA_W;
16982 else
16983 fAccess = IEM_ACCESS_DATA_R;
16984
16985 uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
16986 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, 0 /* cbAccess */, fAccess);
16987 if (fIntercept)
16988 {
16989 /*
16990 * Query the source VM-exit (from the execution engine) that caused this access
16991 * within the APIC-access page. Currently only HM is supported.
16992 */
16993 AssertMsgReturn(VM_IS_HM_ENABLED(pVM),
16994 ("VM-exit auxiliary info. fetching not supported for execution engine %d\n",
16995 pVM->bMainExecutionEngine), VERR_IEM_ASPECT_NOT_IMPLEMENTED);
16996 HMEXITAUX HmExitAux;
16997 RT_ZERO(HmExitAux);
16998 int const rc = HMR0GetExitAuxInfo(pVCpu, &HmExitAux, HMVMX_READ_EXIT_INSTR_LEN
16999 | HMVMX_READ_EXIT_QUALIFICATION
17000 | HMVMX_READ_IDT_VECTORING_INFO
17001 | HMVMX_READ_IDT_VECTORING_ERROR_CODE);
17002 AssertRCReturn(rc, rc);
17003
17004 /*
17005 * Verify the VM-exit reason must be an EPT violation.
17006 * Other accesses should go through the other handler (iemVmxApicAccessPageHandler).
17007 */
17008 AssertLogRelMsgReturn(HmExitAux.Vmx.uReason == VMX_EXIT_EPT_VIOLATION,
17009 ("Unexpected call to the VMX APIC-access page #PF handler for %#RGp (off=%u) uReason=%#RX32\n",
17010 GCPhysAccessBase, offAccess, HmExitAux.Vmx.uReason), VERR_IEM_IPE_9);
17011
17012 /*
17013 * Construct the virtual APIC-access VM-exit.
17014 */
17015 VMXAPICACCESS enmAccess;
17016 if (HmExitAux.Vmx.u64Qual & VMX_EXIT_QUAL_EPT_LINEAR_ADDR_VALID)
17017 {
17018 if (VMX_IDT_VECTORING_INFO_IS_VALID(HmExitAux.Vmx.uIdtVectoringInfo))
17019 enmAccess = VMXAPICACCESS_LINEAR_EVENT_DELIVERY;
17020 else if (fAccess == IEM_ACCESS_INSTRUCTION)
17021 enmAccess = VMXAPICACCESS_LINEAR_INSTR_FETCH;
17022 else if (fAccess & IEM_ACCESS_TYPE_WRITE)
17023 enmAccess = VMXAPICACCESS_LINEAR_WRITE;
17024 else
17025 enmAccess = VMXAPICACCESS_LINEAR_READ;
17026 }
17027 else
17028 {
17029 if (VMX_IDT_VECTORING_INFO_IS_VALID(HmExitAux.Vmx.uIdtVectoringInfo))
17030 enmAccess = VMXAPICACCESS_PHYSICAL_EVENT_DELIVERY;
17031 else
17032 {
17033 /** @todo How to distinguish between monitoring/trace vs other instructions
17034 * here? */
17035 enmAccess = VMXAPICACCESS_PHYSICAL_INSTR;
17036 }
17037 }
17038
17039 VMXVEXITINFO ExitInfo;
17040 RT_ZERO(ExitInfo);
17041 ExitInfo.uReason = VMX_EXIT_APIC_ACCESS;
17042 ExitInfo.u64Qual = RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_OFFSET, offAccess)
17043 | RT_BF_MAKE(VMX_BF_EXIT_QUAL_APIC_ACCESS_TYPE, enmAccess);
17044 ExitInfo.cbInstr = HmExitAux.Vmx.cbInstr;
17045
17046 VMXVEXITEVENTINFO ExitEventInfo;
17047 RT_ZERO(ExitEventInfo);
17048 ExitEventInfo.uIdtVectoringInfo = HmExitAux.Vmx.uIdtVectoringInfo;
17049 ExitEventInfo.uIdtVectoringErrCode = HmExitAux.Vmx.uIdtVectoringErrCode;
17050
17051 /*
17052 * Raise the APIC-access VM-exit.
17053 */
17054 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
17055 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
17056 }
17057
17058 /*
17059 * The access isn't intercepted, which means it needs to be virtualized.
17060 *
17061 * This requires emulating the instruction because we need the bytes being
17062 * read/written by the instruction not just the offset being accessed within
17063 * the APIC-access (which we derive from the faulting address).
17064 */
17065 return VINF_EM_RAW_EMULATE_INSTR;
17066 }
17067
17068 LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase));
17069 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
17070 if (RT_FAILURE(rc))
17071 return rc;
17072
17073 return VINF_SUCCESS;
17074}
17075# endif /* !IN_RING3 */
17076#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
17077
17078
17079#ifdef IN_RING3
17080
17081/**
17082 * Handles the unlikely and probably fatal merge cases.
17083 *
17084 * @returns Merged status code.
17085 * @param rcStrict Current EM status code.
17086 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
17087 * with @a rcStrict.
17088 * @param iMemMap The memory mapping index. For error reporting only.
17089 * @param pVCpu The cross context virtual CPU structure of the calling
17090 * thread, for error reporting only.
17091 */
17092DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
17093 unsigned iMemMap, PVMCPUCC pVCpu)
17094{
17095 if (RT_FAILURE_NP(rcStrict))
17096 return rcStrict;
17097
17098 if (RT_FAILURE_NP(rcStrictCommit))
17099 return rcStrictCommit;
17100
17101 if (rcStrict == rcStrictCommit)
17102 return rcStrictCommit;
17103
17104 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
17105 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
17106 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
17107 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
17108 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
17109 return VERR_IOM_FF_STATUS_IPE;
17110}
17111
17112
17113/**
17114 * Helper for IOMR3ProcessForceFlag.
17115 *
17116 * @returns Merged status code.
17117 * @param rcStrict Current EM status code.
17118 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
17119 * with @a rcStrict.
17120 * @param iMemMap The memory mapping index. For error reporting only.
17121 * @param pVCpu The cross context virtual CPU structure of the calling
17122 * thread, for error reporting only.
17123 */
17124DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
17125{
17126 /* Simple. */
17127 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
17128 return rcStrictCommit;
17129
17130 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
17131 return rcStrict;
17132
17133 /* EM scheduling status codes. */
17134 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
17135 && rcStrict <= VINF_EM_LAST))
17136 {
17137 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
17138 && rcStrictCommit <= VINF_EM_LAST))
17139 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
17140 }
17141
17142 /* Unlikely */
17143 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
17144}
17145
17146
17147/**
17148 * Called by force-flag handling code when VMCPU_FF_IEM is set.
17149 *
17150 * @returns Merge between @a rcStrict and what the commit operation returned.
17151 * @param pVM The cross context VM structure.
17152 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
17153 * @param rcStrict The status code returned by ring-0 or raw-mode.
17154 */
17155VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
17156{
17157 /*
17158 * Reset the pending commit.
17159 */
17160 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
17161 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
17162 ("%#x %#x %#x\n",
17163 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
17164 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
17165
17166 /*
17167 * Commit the pending bounce buffers (usually just one).
17168 */
17169 unsigned cBufs = 0;
17170 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
17171 while (iMemMap-- > 0)
17172 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
17173 {
17174 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
17175 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
17176 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
17177
17178 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
17179 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
17180 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
17181
17182 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
17183 {
17184 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
17185 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
17186 pbBuf,
17187 cbFirst,
17188 PGMACCESSORIGIN_IEM);
17189 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
17190 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
17191 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
17192 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
17193 }
17194
17195 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
17196 {
17197 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
17198 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
17199 pbBuf + cbFirst,
17200 cbSecond,
17201 PGMACCESSORIGIN_IEM);
17202 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
17203 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
17204 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
17205 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
17206 }
17207 cBufs++;
17208 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
17209 }
17210
17211 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
17212 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
17213 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
17214 pVCpu->iem.s.cActiveMappings = 0;
17215 return rcStrict;
17216}
17217
17218#endif /* IN_RING3 */
17219
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette