VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 93926

Last change on this file since 93926 was 93922, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 EPT VM-exit handling with HM ring-0 code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 661.0 KB
Line 
1/* $Id: IEMAll.cpp 93922 2022-02-24 15:14:31Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vmcc.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for EPT faults.
442 */
443# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
444 do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handler.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
450 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
463# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
464
465#endif
466
467#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
468/**
469 * Check if an SVM control/instruction intercept is set.
470 */
471# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
472 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
473
474/**
475 * Check if an SVM read CRx intercept is set.
476 */
477# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
478 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
479
480/**
481 * Check if an SVM write CRx intercept is set.
482 */
483# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
484 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
485
486/**
487 * Check if an SVM read DRx intercept is set.
488 */
489# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
490 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
491
492/**
493 * Check if an SVM write DRx intercept is set.
494 */
495# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
496 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
497
498/**
499 * Check if an SVM exception intercept is set.
500 */
501# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
502 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
503
504/**
505 * Invokes the SVM \#VMEXIT handler for the nested-guest.
506 */
507# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
508 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
509
510/**
511 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
512 * corresponding decode assist information.
513 */
514# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
515 do \
516 { \
517 uint64_t uExitInfo1; \
518 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
519 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
520 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
521 else \
522 uExitInfo1 = 0; \
523 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
524 } while (0)
525
526/** Check and handles SVM nested-guest instruction intercept and updates
527 * NRIP if needed.
528 */
529# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
530 do \
531 { \
532 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
533 { \
534 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
535 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
536 } \
537 } while (0)
538
539/** Checks and handles SVM nested-guest CR0 read intercept. */
540# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
541 do \
542 { \
543 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
544 { /* probably likely */ } \
545 else \
546 { \
547 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
548 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
549 } \
550 } while (0)
551
552/**
553 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
554 */
555# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
556 do { \
557 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
558 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
559 } while (0)
560
561#else
562# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
563# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
565# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
567# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
568# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
570# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
572# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
573
574#endif
575
576
577/*********************************************************************************************************************************
578* Global Variables *
579*********************************************************************************************************************************/
580extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
581
582
583/** Function table for the ADD instruction. */
584IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
585{
586 iemAImpl_add_u8, iemAImpl_add_u8_locked,
587 iemAImpl_add_u16, iemAImpl_add_u16_locked,
588 iemAImpl_add_u32, iemAImpl_add_u32_locked,
589 iemAImpl_add_u64, iemAImpl_add_u64_locked
590};
591
592/** Function table for the ADC instruction. */
593IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
594{
595 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
596 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
597 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
598 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
599};
600
601/** Function table for the SUB instruction. */
602IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
603{
604 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
605 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
606 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
607 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
608};
609
610/** Function table for the SBB instruction. */
611IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
612{
613 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
614 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
615 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
616 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
617};
618
619/** Function table for the OR instruction. */
620IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
621{
622 iemAImpl_or_u8, iemAImpl_or_u8_locked,
623 iemAImpl_or_u16, iemAImpl_or_u16_locked,
624 iemAImpl_or_u32, iemAImpl_or_u32_locked,
625 iemAImpl_or_u64, iemAImpl_or_u64_locked
626};
627
628/** Function table for the XOR instruction. */
629IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
630{
631 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
632 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
633 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
634 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
635};
636
637/** Function table for the AND instruction. */
638IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
639{
640 iemAImpl_and_u8, iemAImpl_and_u8_locked,
641 iemAImpl_and_u16, iemAImpl_and_u16_locked,
642 iemAImpl_and_u32, iemAImpl_and_u32_locked,
643 iemAImpl_and_u64, iemAImpl_and_u64_locked
644};
645
646/** Function table for the CMP instruction.
647 * @remarks Making operand order ASSUMPTIONS.
648 */
649IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
650{
651 iemAImpl_cmp_u8, NULL,
652 iemAImpl_cmp_u16, NULL,
653 iemAImpl_cmp_u32, NULL,
654 iemAImpl_cmp_u64, NULL
655};
656
657/** Function table for the TEST instruction.
658 * @remarks Making operand order ASSUMPTIONS.
659 */
660IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
661{
662 iemAImpl_test_u8, NULL,
663 iemAImpl_test_u16, NULL,
664 iemAImpl_test_u32, NULL,
665 iemAImpl_test_u64, NULL
666};
667
668/** Function table for the BT instruction. */
669IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
670{
671 NULL, NULL,
672 iemAImpl_bt_u16, NULL,
673 iemAImpl_bt_u32, NULL,
674 iemAImpl_bt_u64, NULL
675};
676
677/** Function table for the BTC instruction. */
678IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
679{
680 NULL, NULL,
681 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
682 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
683 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
684};
685
686/** Function table for the BTR instruction. */
687IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
688{
689 NULL, NULL,
690 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
691 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
692 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
693};
694
695/** Function table for the BTS instruction. */
696IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
697{
698 NULL, NULL,
699 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
700 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
701 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
702};
703
704/** Function table for the BSF instruction. */
705IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
706{
707 NULL, NULL,
708 iemAImpl_bsf_u16, NULL,
709 iemAImpl_bsf_u32, NULL,
710 iemAImpl_bsf_u64, NULL
711};
712
713/** Function table for the BSR instruction. */
714IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
715{
716 NULL, NULL,
717 iemAImpl_bsr_u16, NULL,
718 iemAImpl_bsr_u32, NULL,
719 iemAImpl_bsr_u64, NULL
720};
721
722/** Function table for the IMUL instruction. */
723IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
724{
725 NULL, NULL,
726 iemAImpl_imul_two_u16, NULL,
727 iemAImpl_imul_two_u32, NULL,
728 iemAImpl_imul_two_u64, NULL
729};
730
731/** Group 1 /r lookup table. */
732IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
733{
734 &g_iemAImpl_add,
735 &g_iemAImpl_or,
736 &g_iemAImpl_adc,
737 &g_iemAImpl_sbb,
738 &g_iemAImpl_and,
739 &g_iemAImpl_sub,
740 &g_iemAImpl_xor,
741 &g_iemAImpl_cmp
742};
743
744/** Function table for the INC instruction. */
745IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
746{
747 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
748 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
749 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
750 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
751};
752
753/** Function table for the DEC instruction. */
754IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
755{
756 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
757 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
758 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
759 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
760};
761
762/** Function table for the NEG instruction. */
763IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
764{
765 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
766 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
767 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
768 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
769};
770
771/** Function table for the NOT instruction. */
772IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
773{
774 iemAImpl_not_u8, iemAImpl_not_u8_locked,
775 iemAImpl_not_u16, iemAImpl_not_u16_locked,
776 iemAImpl_not_u32, iemAImpl_not_u32_locked,
777 iemAImpl_not_u64, iemAImpl_not_u64_locked
778};
779
780
781/** Function table for the ROL instruction. */
782IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
783{
784 iemAImpl_rol_u8,
785 iemAImpl_rol_u16,
786 iemAImpl_rol_u32,
787 iemAImpl_rol_u64
788};
789
790/** Function table for the ROR instruction. */
791IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
792{
793 iemAImpl_ror_u8,
794 iemAImpl_ror_u16,
795 iemAImpl_ror_u32,
796 iemAImpl_ror_u64
797};
798
799/** Function table for the RCL instruction. */
800IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
801{
802 iemAImpl_rcl_u8,
803 iemAImpl_rcl_u16,
804 iemAImpl_rcl_u32,
805 iemAImpl_rcl_u64
806};
807
808/** Function table for the RCR instruction. */
809IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
810{
811 iemAImpl_rcr_u8,
812 iemAImpl_rcr_u16,
813 iemAImpl_rcr_u32,
814 iemAImpl_rcr_u64
815};
816
817/** Function table for the SHL instruction. */
818IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
819{
820 iemAImpl_shl_u8,
821 iemAImpl_shl_u16,
822 iemAImpl_shl_u32,
823 iemAImpl_shl_u64
824};
825
826/** Function table for the SHR instruction. */
827IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
828{
829 iemAImpl_shr_u8,
830 iemAImpl_shr_u16,
831 iemAImpl_shr_u32,
832 iemAImpl_shr_u64
833};
834
835/** Function table for the SAR instruction. */
836IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
837{
838 iemAImpl_sar_u8,
839 iemAImpl_sar_u16,
840 iemAImpl_sar_u32,
841 iemAImpl_sar_u64
842};
843
844
845/** Function table for the MUL instruction. */
846IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
847{
848 iemAImpl_mul_u8,
849 iemAImpl_mul_u16,
850 iemAImpl_mul_u32,
851 iemAImpl_mul_u64
852};
853
854/** Function table for the IMUL instruction working implicitly on rAX. */
855IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
856{
857 iemAImpl_imul_u8,
858 iemAImpl_imul_u16,
859 iemAImpl_imul_u32,
860 iemAImpl_imul_u64
861};
862
863/** Function table for the DIV instruction. */
864IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
865{
866 iemAImpl_div_u8,
867 iemAImpl_div_u16,
868 iemAImpl_div_u32,
869 iemAImpl_div_u64
870};
871
872/** Function table for the MUL instruction. */
873IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
874{
875 iemAImpl_idiv_u8,
876 iemAImpl_idiv_u16,
877 iemAImpl_idiv_u32,
878 iemAImpl_idiv_u64
879};
880
881/** Function table for the SHLD instruction */
882IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
883{
884 iemAImpl_shld_u16,
885 iemAImpl_shld_u32,
886 iemAImpl_shld_u64,
887};
888
889/** Function table for the SHRD instruction */
890IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
891{
892 iemAImpl_shrd_u16,
893 iemAImpl_shrd_u32,
894 iemAImpl_shrd_u64,
895};
896
897
898/** Function table for the PUNPCKLBW instruction */
899IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
900/** Function table for the PUNPCKLBD instruction */
901IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
902/** Function table for the PUNPCKLDQ instruction */
903IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
904/** Function table for the PUNPCKLQDQ instruction */
905IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
906
907/** Function table for the PUNPCKHBW instruction */
908IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
909/** Function table for the PUNPCKHBD instruction */
910IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
911/** Function table for the PUNPCKHDQ instruction */
912IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
913/** Function table for the PUNPCKHQDQ instruction */
914IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
915
916/** Function table for the PXOR instruction */
917IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
918/** Function table for the PCMPEQB instruction */
919IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
920/** Function table for the PCMPEQW instruction */
921IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
922/** Function table for the PCMPEQD instruction */
923IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
924
925
926#if defined(IEM_LOG_MEMORY_WRITES)
927/** What IEM just wrote. */
928uint8_t g_abIemWrote[256];
929/** How much IEM just wrote. */
930size_t g_cbIemWrote;
931#endif
932
933
934/*********************************************************************************************************************************
935* Internal Functions *
936*********************************************************************************************************************************/
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
940IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
941/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
945IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
948IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
951IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
952IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
953IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
954#ifdef IEM_WITH_SETJMP
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
959DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
960#endif
961
962IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
971IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
972IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
975IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
976IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
977IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
978DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
979DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
980
981#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
982IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
986IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
987IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
988IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
989IEM_STATIC VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr);
990#endif
991
992#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
993IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
994IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
995#endif
996
997
998/**
999 * Sets the pass up status.
1000 *
1001 * @returns VINF_SUCCESS.
1002 * @param pVCpu The cross context virtual CPU structure of the
1003 * calling thread.
1004 * @param rcPassUp The pass up status. Must be informational.
1005 * VINF_SUCCESS is not allowed.
1006 */
1007IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
1008{
1009 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1010
1011 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1012 if (rcOldPassUp == VINF_SUCCESS)
1013 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1014 /* If both are EM scheduling codes, use EM priority rules. */
1015 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1016 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1017 {
1018 if (rcPassUp < rcOldPassUp)
1019 {
1020 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1021 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1022 }
1023 else
1024 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1025 }
1026 /* Override EM scheduling with specific status code. */
1027 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1028 {
1029 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1030 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1031 }
1032 /* Don't override specific status code, first come first served. */
1033 else
1034 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1035 return VINF_SUCCESS;
1036}
1037
1038
1039/**
1040 * Calculates the CPU mode.
1041 *
1042 * This is mainly for updating IEMCPU::enmCpuMode.
1043 *
1044 * @returns CPU mode.
1045 * @param pVCpu The cross context virtual CPU structure of the
1046 * calling thread.
1047 */
1048DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1049{
1050 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1051 return IEMMODE_64BIT;
1052 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1053 return IEMMODE_32BIT;
1054 return IEMMODE_16BIT;
1055}
1056
1057
1058/**
1059 * Initializes the execution state.
1060 *
1061 * @param pVCpu The cross context virtual CPU structure of the
1062 * calling thread.
1063 * @param fBypassHandlers Whether to bypass access handlers.
1064 *
1065 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1066 * side-effects in strict builds.
1067 */
1068DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1069{
1070 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1071 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1072 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1074 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1075 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1076 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1077 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1078 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1079 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1080
1081 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1082 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1083#ifdef VBOX_STRICT
1084 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1085 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1086 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1087 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1088 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1089 pVCpu->iem.s.uRexReg = 127;
1090 pVCpu->iem.s.uRexB = 127;
1091 pVCpu->iem.s.offModRm = 127;
1092 pVCpu->iem.s.uRexIndex = 127;
1093 pVCpu->iem.s.iEffSeg = 127;
1094 pVCpu->iem.s.idxPrefix = 127;
1095 pVCpu->iem.s.uVex3rdReg = 127;
1096 pVCpu->iem.s.uVexLength = 127;
1097 pVCpu->iem.s.fEvexStuff = 127;
1098 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1099# ifdef IEM_WITH_CODE_TLB
1100 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1101 pVCpu->iem.s.pbInstrBuf = NULL;
1102 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1103 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1104 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1105 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1106# else
1107 pVCpu->iem.s.offOpcode = 127;
1108 pVCpu->iem.s.cbOpcode = 127;
1109# endif
1110#endif
1111
1112 pVCpu->iem.s.cActiveMappings = 0;
1113 pVCpu->iem.s.iNextMapping = 0;
1114 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1115 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1116#if 0
1117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1118 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1119 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1120 {
1121 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1122 Assert(pVmcs);
1123 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1124 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1125 {
1126 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1127 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1128 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1129 AssertRC(rc);
1130 }
1131 }
1132#endif
1133#endif
1134}
1135
1136#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1137/**
1138 * Performs a minimal reinitialization of the execution state.
1139 *
1140 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1141 * 'world-switch' types operations on the CPU. Currently only nested
1142 * hardware-virtualization uses it.
1143 *
1144 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1145 */
1146IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1147{
1148 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1149 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1150
1151 pVCpu->iem.s.uCpl = uCpl;
1152 pVCpu->iem.s.enmCpuMode = enmMode;
1153 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1154 pVCpu->iem.s.enmEffAddrMode = enmMode;
1155 if (enmMode != IEMMODE_64BIT)
1156 {
1157 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1158 pVCpu->iem.s.enmEffOpSize = enmMode;
1159 }
1160 else
1161 {
1162 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1163 pVCpu->iem.s.enmEffOpSize = enmMode;
1164 }
1165 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1166#ifndef IEM_WITH_CODE_TLB
1167 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1168 pVCpu->iem.s.offOpcode = 0;
1169 pVCpu->iem.s.cbOpcode = 0;
1170#endif
1171 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1172}
1173#endif
1174
1175/**
1176 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1177 *
1178 * @param pVCpu The cross context virtual CPU structure of the
1179 * calling thread.
1180 */
1181DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1182{
1183 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1184#ifdef VBOX_STRICT
1185# ifdef IEM_WITH_CODE_TLB
1186 NOREF(pVCpu);
1187# else
1188 pVCpu->iem.s.cbOpcode = 0;
1189# endif
1190#else
1191 NOREF(pVCpu);
1192#endif
1193}
1194
1195
1196/**
1197 * Initializes the decoder state.
1198 *
1199 * iemReInitDecoder is mostly a copy of this function.
1200 *
1201 * @param pVCpu The cross context virtual CPU structure of the
1202 * calling thread.
1203 * @param fBypassHandlers Whether to bypass access handlers.
1204 * @param fDisregardLock Whether to disregard the LOCK prefix.
1205 */
1206DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1207{
1208 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1209 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1218
1219 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1220 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1221 pVCpu->iem.s.enmCpuMode = enmMode;
1222 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1223 pVCpu->iem.s.enmEffAddrMode = enmMode;
1224 if (enmMode != IEMMODE_64BIT)
1225 {
1226 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1227 pVCpu->iem.s.enmEffOpSize = enmMode;
1228 }
1229 else
1230 {
1231 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1232 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1233 }
1234 pVCpu->iem.s.fPrefixes = 0;
1235 pVCpu->iem.s.uRexReg = 0;
1236 pVCpu->iem.s.uRexB = 0;
1237 pVCpu->iem.s.uRexIndex = 0;
1238 pVCpu->iem.s.idxPrefix = 0;
1239 pVCpu->iem.s.uVex3rdReg = 0;
1240 pVCpu->iem.s.uVexLength = 0;
1241 pVCpu->iem.s.fEvexStuff = 0;
1242 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1243#ifdef IEM_WITH_CODE_TLB
1244 pVCpu->iem.s.pbInstrBuf = NULL;
1245 pVCpu->iem.s.offInstrNextByte = 0;
1246 pVCpu->iem.s.offCurInstrStart = 0;
1247# ifdef VBOX_STRICT
1248 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1249 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1250 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1251# endif
1252#else
1253 pVCpu->iem.s.offOpcode = 0;
1254 pVCpu->iem.s.cbOpcode = 0;
1255#endif
1256 pVCpu->iem.s.offModRm = 0;
1257 pVCpu->iem.s.cActiveMappings = 0;
1258 pVCpu->iem.s.iNextMapping = 0;
1259 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1260 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1261 pVCpu->iem.s.fDisregardLock = fDisregardLock;
1262
1263#ifdef DBGFTRACE_ENABLED
1264 switch (enmMode)
1265 {
1266 case IEMMODE_64BIT:
1267 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1268 break;
1269 case IEMMODE_32BIT:
1270 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1271 break;
1272 case IEMMODE_16BIT:
1273 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1274 break;
1275 }
1276#endif
1277}
1278
1279
1280/**
1281 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1282 *
1283 * This is mostly a copy of iemInitDecoder.
1284 *
1285 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1286 */
1287DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1288{
1289 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1290 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1291 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1292 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1293 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1294 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1295 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1296 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1297 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1298
1299 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1300 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1301 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1302 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1303 pVCpu->iem.s.enmEffAddrMode = enmMode;
1304 if (enmMode != IEMMODE_64BIT)
1305 {
1306 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1307 pVCpu->iem.s.enmEffOpSize = enmMode;
1308 }
1309 else
1310 {
1311 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1312 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1313 }
1314 pVCpu->iem.s.fPrefixes = 0;
1315 pVCpu->iem.s.uRexReg = 0;
1316 pVCpu->iem.s.uRexB = 0;
1317 pVCpu->iem.s.uRexIndex = 0;
1318 pVCpu->iem.s.idxPrefix = 0;
1319 pVCpu->iem.s.uVex3rdReg = 0;
1320 pVCpu->iem.s.uVexLength = 0;
1321 pVCpu->iem.s.fEvexStuff = 0;
1322 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1323#ifdef IEM_WITH_CODE_TLB
1324 if (pVCpu->iem.s.pbInstrBuf)
1325 {
1326 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1327 - pVCpu->iem.s.uInstrBufPc;
1328 if (off < pVCpu->iem.s.cbInstrBufTotal)
1329 {
1330 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1331 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1332 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1333 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1334 else
1335 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1336 }
1337 else
1338 {
1339 pVCpu->iem.s.pbInstrBuf = NULL;
1340 pVCpu->iem.s.offInstrNextByte = 0;
1341 pVCpu->iem.s.offCurInstrStart = 0;
1342 pVCpu->iem.s.cbInstrBuf = 0;
1343 pVCpu->iem.s.cbInstrBufTotal = 0;
1344 }
1345 }
1346 else
1347 {
1348 pVCpu->iem.s.offInstrNextByte = 0;
1349 pVCpu->iem.s.offCurInstrStart = 0;
1350 pVCpu->iem.s.cbInstrBuf = 0;
1351 pVCpu->iem.s.cbInstrBufTotal = 0;
1352 }
1353#else
1354 pVCpu->iem.s.cbOpcode = 0;
1355 pVCpu->iem.s.offOpcode = 0;
1356#endif
1357 pVCpu->iem.s.offModRm = 0;
1358 Assert(pVCpu->iem.s.cActiveMappings == 0);
1359 pVCpu->iem.s.iNextMapping = 0;
1360 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1361 Assert(pVCpu->iem.s.fBypassHandlers == false);
1362
1363#ifdef DBGFTRACE_ENABLED
1364 switch (enmMode)
1365 {
1366 case IEMMODE_64BIT:
1367 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1368 break;
1369 case IEMMODE_32BIT:
1370 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1371 break;
1372 case IEMMODE_16BIT:
1373 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1374 break;
1375 }
1376#endif
1377}
1378
1379
1380
1381/**
1382 * Prefetch opcodes the first time when starting executing.
1383 *
1384 * @returns Strict VBox status code.
1385 * @param pVCpu The cross context virtual CPU structure of the
1386 * calling thread.
1387 * @param fBypassHandlers Whether to bypass access handlers.
1388 * @param fDisregardLock Whether to disregard LOCK prefixes.
1389 *
1390 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
1391 * store them as such.
1392 */
1393IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1394{
1395 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
1396
1397#ifdef IEM_WITH_CODE_TLB
1398 /** @todo Do ITLB lookup here. */
1399
1400#else /* !IEM_WITH_CODE_TLB */
1401
1402 /*
1403 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1404 *
1405 * First translate CS:rIP to a physical address.
1406 */
1407 uint32_t cbToTryRead;
1408 RTGCPTR GCPtrPC;
1409 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1410 {
1411 cbToTryRead = GUEST_PAGE_SIZE;
1412 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1413 if (IEM_IS_CANONICAL(GCPtrPC))
1414 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
1415 else
1416 return iemRaiseGeneralProtectionFault0(pVCpu);
1417 }
1418 else
1419 {
1420 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1421 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1422 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1423 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1424 else
1425 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1426 if (cbToTryRead) { /* likely */ }
1427 else /* overflowed */
1428 {
1429 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1430 cbToTryRead = UINT32_MAX;
1431 }
1432 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1433 Assert(GCPtrPC <= UINT32_MAX);
1434 }
1435
1436 PGMPTWALK Walk;
1437 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
1438 if (RT_SUCCESS(rc))
1439 Assert(Walk.fSucceeded); /* probable. */
1440 else
1441 {
1442 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1443#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1444 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1445 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1446#endif
1447 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1448 }
1449 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1450 else
1451 {
1452 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1453#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1454 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1455 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1456#endif
1457 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1458 }
1459 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1460 else
1461 {
1462 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1463#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1464 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1465 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1466#endif
1467 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1468 }
1469 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
1470 /** @todo Check reserved bits and such stuff. PGM is better at doing
1471 * that, so do it when implementing the guest virtual address
1472 * TLB... */
1473
1474 /*
1475 * Read the bytes at this address.
1476 */
1477 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
1478 if (cbToTryRead > cbLeftOnPage)
1479 cbToTryRead = cbLeftOnPage;
1480 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1481 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1482
1483 if (!pVCpu->iem.s.fBypassHandlers)
1484 {
1485 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1486 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1487 { /* likely */ }
1488 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1489 {
1490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1491 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1492 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1493 }
1494 else
1495 {
1496 Log((RT_SUCCESS(rcStrict)
1497 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1498 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1499 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1500 return rcStrict;
1501 }
1502 }
1503 else
1504 {
1505 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1506 if (RT_SUCCESS(rc))
1507 { /* likely */ }
1508 else
1509 {
1510 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1511 GCPtrPC, GCPhys, rc, cbToTryRead));
1512 return rc;
1513 }
1514 }
1515 pVCpu->iem.s.cbOpcode = cbToTryRead;
1516#endif /* !IEM_WITH_CODE_TLB */
1517 return VINF_SUCCESS;
1518}
1519
1520
1521/**
1522 * Invalidates the IEM TLBs.
1523 *
1524 * This is called internally as well as by PGM when moving GC mappings.
1525 *
1526 * @returns
1527 * @param pVCpu The cross context virtual CPU structure of the calling
1528 * thread.
1529 * @param fVmm Set when PGM calls us with a remapping.
1530 */
1531VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
1532{
1533#ifdef IEM_WITH_CODE_TLB
1534 pVCpu->iem.s.cbInstrBufTotal = 0;
1535 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1536 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1537 { /* very likely */ }
1538 else
1539 {
1540 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1541 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1542 while (i-- > 0)
1543 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1544 }
1545#endif
1546
1547#ifdef IEM_WITH_DATA_TLB
1548 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1549 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1550 { /* very likely */ }
1551 else
1552 {
1553 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1554 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1555 while (i-- > 0)
1556 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1557 }
1558#endif
1559 NOREF(pVCpu); NOREF(fVmm);
1560}
1561
1562
1563/**
1564 * Invalidates a page in the TLBs.
1565 *
1566 * @param pVCpu The cross context virtual CPU structure of the calling
1567 * thread.
1568 * @param GCPtr The address of the page to invalidate
1569 */
1570VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1571{
1572#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1573 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1574 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1575 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1576 uintptr_t idx = (uint8_t)GCPtr;
1577
1578# ifdef IEM_WITH_CODE_TLB
1579 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1580 {
1581 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1582 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1583 pVCpu->iem.s.cbInstrBufTotal = 0;
1584 }
1585# endif
1586
1587# ifdef IEM_WITH_DATA_TLB
1588 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1589 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1590# endif
1591#else
1592 NOREF(pVCpu); NOREF(GCPtr);
1593#endif
1594}
1595
1596
1597/**
1598 * Invalidates the host physical aspects of the IEM TLBs.
1599 *
1600 * This is called internally as well as by PGM when moving GC mappings.
1601 *
1602 * @param pVCpu The cross context virtual CPU structure of the calling
1603 * thread.
1604 */
1605VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1606{
1607#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1608 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1609
1610# ifdef IEM_WITH_CODE_TLB
1611 pVCpu->iem.s.cbInstrBufTotal = 0;
1612# endif
1613 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1614 if (uTlbPhysRev != 0)
1615 {
1616 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1617 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1618 }
1619 else
1620 {
1621 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1622 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1623
1624 unsigned i;
1625# ifdef IEM_WITH_CODE_TLB
1626 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1627 while (i-- > 0)
1628 {
1629 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1630 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1631 }
1632# endif
1633# ifdef IEM_WITH_DATA_TLB
1634 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1635 while (i-- > 0)
1636 {
1637 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1638 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1639 }
1640# endif
1641 }
1642#else
1643 NOREF(pVCpu);
1644#endif
1645}
1646
1647
1648/**
1649 * Invalidates the host physical aspects of the IEM TLBs.
1650 *
1651 * This is called internally as well as by PGM when moving GC mappings.
1652 *
1653 * @param pVM The cross context VM structure.
1654 *
1655 * @remarks Caller holds the PGM lock.
1656 */
1657VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1658{
1659 RT_NOREF_PV(pVM);
1660}
1661
1662#ifdef IEM_WITH_CODE_TLB
1663
1664/**
1665 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1666 * failure and jumps.
1667 *
1668 * We end up here for a number of reasons:
1669 * - pbInstrBuf isn't yet initialized.
1670 * - Advancing beyond the buffer boundrary (e.g. cross page).
1671 * - Advancing beyond the CS segment limit.
1672 * - Fetching from non-mappable page (e.g. MMIO).
1673 *
1674 * @param pVCpu The cross context virtual CPU structure of the
1675 * calling thread.
1676 * @param pvDst Where to return the bytes.
1677 * @param cbDst Number of bytes to read.
1678 *
1679 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1680 */
1681IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
1682{
1683#ifdef IN_RING3
1684 for (;;)
1685 {
1686 Assert(cbDst <= 8);
1687 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1688
1689 /*
1690 * We might have a partial buffer match, deal with that first to make the
1691 * rest simpler. This is the first part of the cross page/buffer case.
1692 */
1693 if (pVCpu->iem.s.pbInstrBuf != NULL)
1694 {
1695 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1696 {
1697 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1698 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1699 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1700
1701 cbDst -= cbCopy;
1702 pvDst = (uint8_t *)pvDst + cbCopy;
1703 offBuf += cbCopy;
1704 pVCpu->iem.s.offInstrNextByte += offBuf;
1705 }
1706 }
1707
1708 /*
1709 * Check segment limit, figuring how much we're allowed to access at this point.
1710 *
1711 * We will fault immediately if RIP is past the segment limit / in non-canonical
1712 * territory. If we do continue, there are one or more bytes to read before we
1713 * end up in trouble and we need to do that first before faulting.
1714 */
1715 RTGCPTR GCPtrFirst;
1716 uint32_t cbMaxRead;
1717 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1718 {
1719 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1720 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1721 { /* likely */ }
1722 else
1723 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1724 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1725 }
1726 else
1727 {
1728 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1729 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1730 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1731 { /* likely */ }
1732 else
1733 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1734 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1735 if (cbMaxRead != 0)
1736 { /* likely */ }
1737 else
1738 {
1739 /* Overflowed because address is 0 and limit is max. */
1740 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1741 cbMaxRead = X86_PAGE_SIZE;
1742 }
1743 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1744 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1745 if (cbMaxRead2 < cbMaxRead)
1746 cbMaxRead = cbMaxRead2;
1747 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1748 }
1749
1750 /*
1751 * Get the TLB entry for this piece of code.
1752 */
1753 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1754 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1755 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1756 if (pTlbe->uTag == uTag)
1757 {
1758 /* likely when executing lots of code, otherwise unlikely */
1759# ifdef VBOX_WITH_STATISTICS
1760 pVCpu->iem.s.CodeTlb.cTlbHits++;
1761# endif
1762 }
1763 else
1764 {
1765 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1766 PGMPTWALK Walk;
1767 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
1768 if (RT_FAILURE(rc))
1769 {
1770#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1771 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
1772 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
1773#endif
1774 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1775 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1776 }
1777
1778 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1779 Assert(Walk.fSucceeded);
1780 pTlbe->uTag = uTag;
1781 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D))
1782 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
1783 pTlbe->GCPhys = Walk.GCPhys;
1784 pTlbe->pbMappingR3 = NULL;
1785 }
1786
1787 /*
1788 * Check TLB page table level access flags.
1789 */
1790 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1791 {
1792 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1793 {
1794 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1795 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1796 }
1797 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1798 {
1799 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1800 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1801 }
1802 }
1803
1804 /*
1805 * Look up the physical page info if necessary.
1806 */
1807 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1808 { /* not necessary */ }
1809 else
1810 {
1811 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1812 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1813 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1814 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1815 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1816 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1817 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1818 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1819 }
1820
1821# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1822 /*
1823 * Try do a direct read using the pbMappingR3 pointer.
1824 */
1825 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1826 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1827 {
1828 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1829 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1830 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1831 {
1832 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1833 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1834 }
1835 else
1836 {
1837 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1838 Assert(cbInstr < cbMaxRead);
1839 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1840 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1841 }
1842 if (cbDst <= cbMaxRead)
1843 {
1844 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1845 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1846 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1847 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1848 return;
1849 }
1850 pVCpu->iem.s.pbInstrBuf = NULL;
1851
1852 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1853 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1854 }
1855 else
1856# endif
1857#if 0
1858 /*
1859 * If there is no special read handling, so we can read a bit more and
1860 * put it in the prefetch buffer.
1861 */
1862 if ( cbDst < cbMaxRead
1863 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1864 {
1865 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1866 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1867 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1868 { /* likely */ }
1869 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1870 {
1871 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1872 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1873 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1874 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1875 }
1876 else
1877 {
1878 Log((RT_SUCCESS(rcStrict)
1879 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1880 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1881 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1882 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1883 }
1884 }
1885 /*
1886 * Special read handling, so only read exactly what's needed.
1887 * This is a highly unlikely scenario.
1888 */
1889 else
1890#endif
1891 {
1892 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1893 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1894 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1895 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1896 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1897 { /* likely */ }
1898 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1899 {
1900 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1901 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1902 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1903 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1904 }
1905 else
1906 {
1907 Log((RT_SUCCESS(rcStrict)
1908 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1909 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1910 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1911 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1912 }
1913 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1914 if (cbToRead == cbDst)
1915 return;
1916 }
1917
1918 /*
1919 * More to read, loop.
1920 */
1921 cbDst -= cbMaxRead;
1922 pvDst = (uint8_t *)pvDst + cbMaxRead;
1923 }
1924#else
1925 RT_NOREF(pvDst, cbDst);
1926 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1927#endif
1928}
1929
1930#else
1931
1932/**
1933 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1934 * exception if it fails.
1935 *
1936 * @returns Strict VBox status code.
1937 * @param pVCpu The cross context virtual CPU structure of the
1938 * calling thread.
1939 * @param cbMin The minimum number of bytes relative offOpcode
1940 * that must be read.
1941 */
1942IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
1943{
1944 /*
1945 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1946 *
1947 * First translate CS:rIP to a physical address.
1948 */
1949 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1950 uint32_t cbToTryRead;
1951 RTGCPTR GCPtrNext;
1952 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1953 {
1954 cbToTryRead = GUEST_PAGE_SIZE;
1955 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1956 if (!IEM_IS_CANONICAL(GCPtrNext))
1957 return iemRaiseGeneralProtectionFault0(pVCpu);
1958 }
1959 else
1960 {
1961 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1962 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1963 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1964 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1965 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1966 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1967 if (!cbToTryRead) /* overflowed */
1968 {
1969 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1970 cbToTryRead = UINT32_MAX;
1971 /** @todo check out wrapping around the code segment. */
1972 }
1973 if (cbToTryRead < cbMin - cbLeft)
1974 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1975 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1976 }
1977
1978 /* Only read up to the end of the page, and make sure we don't read more
1979 than the opcode buffer can hold. */
1980 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1981 if (cbToTryRead > cbLeftOnPage)
1982 cbToTryRead = cbLeftOnPage;
1983 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1984 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1985/** @todo r=bird: Convert assertion into undefined opcode exception? */
1986 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1987
1988 PGMPTWALK Walk;
1989 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1990 if (RT_FAILURE(rc))
1991 {
1992 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1993#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1994 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1995 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1996#endif
1997 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1998 }
1999 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2000 {
2001 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2002#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2003 if (Walk.fFailed & PGM_WALKFAIL_EPT)
2004 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
2005#endif
2006 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2007 }
2008 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2009 {
2010 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2011#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2012 if (Walk.fFailed & PGM_WALKFAIL_EPT)
2013 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
2014#endif
2015 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2016 }
2017 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
2018 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2019 /** @todo Check reserved bits and such stuff. PGM is better at doing
2020 * that, so do it when implementing the guest virtual address
2021 * TLB... */
2022
2023 /*
2024 * Read the bytes at this address.
2025 *
2026 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2027 * and since PATM should only patch the start of an instruction there
2028 * should be no need to check again here.
2029 */
2030 if (!pVCpu->iem.s.fBypassHandlers)
2031 {
2032 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2033 cbToTryRead, PGMACCESSORIGIN_IEM);
2034 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2035 { /* likely */ }
2036 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2037 {
2038 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2039 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2040 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2041 }
2042 else
2043 {
2044 Log((RT_SUCCESS(rcStrict)
2045 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2046 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2047 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2048 return rcStrict;
2049 }
2050 }
2051 else
2052 {
2053 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2054 if (RT_SUCCESS(rc))
2055 { /* likely */ }
2056 else
2057 {
2058 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2059 return rc;
2060 }
2061 }
2062 pVCpu->iem.s.cbOpcode += cbToTryRead;
2063 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2064
2065 return VINF_SUCCESS;
2066}
2067
2068#endif /* !IEM_WITH_CODE_TLB */
2069#ifndef IEM_WITH_SETJMP
2070
2071/**
2072 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2073 *
2074 * @returns Strict VBox status code.
2075 * @param pVCpu The cross context virtual CPU structure of the
2076 * calling thread.
2077 * @param pb Where to return the opcode byte.
2078 */
2079DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2080{
2081 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2082 if (rcStrict == VINF_SUCCESS)
2083 {
2084 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2085 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2086 pVCpu->iem.s.offOpcode = offOpcode + 1;
2087 }
2088 else
2089 *pb = 0;
2090 return rcStrict;
2091}
2092
2093
2094/**
2095 * Fetches the next opcode byte.
2096 *
2097 * @returns Strict VBox status code.
2098 * @param pVCpu The cross context virtual CPU structure of the
2099 * calling thread.
2100 * @param pu8 Where to return the opcode byte.
2101 */
2102DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2103{
2104 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2105 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2106 {
2107 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2108 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2109 return VINF_SUCCESS;
2110 }
2111 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2112}
2113
2114#else /* IEM_WITH_SETJMP */
2115
2116/**
2117 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2118 *
2119 * @returns The opcode byte.
2120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2121 */
2122DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2123{
2124# ifdef IEM_WITH_CODE_TLB
2125 uint8_t u8;
2126 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2127 return u8;
2128# else
2129 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2130 if (rcStrict == VINF_SUCCESS)
2131 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2132 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2133# endif
2134}
2135
2136
2137/**
2138 * Fetches the next opcode byte, longjmp on error.
2139 *
2140 * @returns The opcode byte.
2141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2142 */
2143DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2144{
2145# ifdef IEM_WITH_CODE_TLB
2146 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2147 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2148 if (RT_LIKELY( pbBuf != NULL
2149 && offBuf < pVCpu->iem.s.cbInstrBuf))
2150 {
2151 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2152 return pbBuf[offBuf];
2153 }
2154# else
2155 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2156 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2157 {
2158 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2159 return pVCpu->iem.s.abOpcode[offOpcode];
2160 }
2161# endif
2162 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2163}
2164
2165#endif /* IEM_WITH_SETJMP */
2166
2167/**
2168 * Fetches the next opcode byte, returns automatically on failure.
2169 *
2170 * @param a_pu8 Where to return the opcode byte.
2171 * @remark Implicitly references pVCpu.
2172 */
2173#ifndef IEM_WITH_SETJMP
2174# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2175 do \
2176 { \
2177 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2178 if (rcStrict2 == VINF_SUCCESS) \
2179 { /* likely */ } \
2180 else \
2181 return rcStrict2; \
2182 } while (0)
2183#else
2184# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2185#endif /* IEM_WITH_SETJMP */
2186
2187
2188#ifndef IEM_WITH_SETJMP
2189/**
2190 * Fetches the next signed byte from the opcode stream.
2191 *
2192 * @returns Strict VBox status code.
2193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2194 * @param pi8 Where to return the signed byte.
2195 */
2196DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2197{
2198 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2199}
2200#endif /* !IEM_WITH_SETJMP */
2201
2202
2203/**
2204 * Fetches the next signed byte from the opcode stream, returning automatically
2205 * on failure.
2206 *
2207 * @param a_pi8 Where to return the signed byte.
2208 * @remark Implicitly references pVCpu.
2209 */
2210#ifndef IEM_WITH_SETJMP
2211# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2212 do \
2213 { \
2214 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2215 if (rcStrict2 != VINF_SUCCESS) \
2216 return rcStrict2; \
2217 } while (0)
2218#else /* IEM_WITH_SETJMP */
2219# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2220
2221#endif /* IEM_WITH_SETJMP */
2222
2223#ifndef IEM_WITH_SETJMP
2224
2225/**
2226 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2227 *
2228 * @returns Strict VBox status code.
2229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2230 * @param pu16 Where to return the opcode dword.
2231 */
2232DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2233{
2234 uint8_t u8;
2235 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2236 if (rcStrict == VINF_SUCCESS)
2237 *pu16 = (int8_t)u8;
2238 return rcStrict;
2239}
2240
2241
2242/**
2243 * Fetches the next signed byte from the opcode stream, extending it to
2244 * unsigned 16-bit.
2245 *
2246 * @returns Strict VBox status code.
2247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2248 * @param pu16 Where to return the unsigned word.
2249 */
2250DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2251{
2252 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2253 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2254 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2255
2256 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2257 pVCpu->iem.s.offOpcode = offOpcode + 1;
2258 return VINF_SUCCESS;
2259}
2260
2261#endif /* !IEM_WITH_SETJMP */
2262
2263/**
2264 * Fetches the next signed byte from the opcode stream and sign-extending it to
2265 * a word, returning automatically on failure.
2266 *
2267 * @param a_pu16 Where to return the word.
2268 * @remark Implicitly references pVCpu.
2269 */
2270#ifndef IEM_WITH_SETJMP
2271# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2272 do \
2273 { \
2274 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2275 if (rcStrict2 != VINF_SUCCESS) \
2276 return rcStrict2; \
2277 } while (0)
2278#else
2279# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2280#endif
2281
2282#ifndef IEM_WITH_SETJMP
2283
2284/**
2285 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2286 *
2287 * @returns Strict VBox status code.
2288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2289 * @param pu32 Where to return the opcode dword.
2290 */
2291DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2292{
2293 uint8_t u8;
2294 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2295 if (rcStrict == VINF_SUCCESS)
2296 *pu32 = (int8_t)u8;
2297 return rcStrict;
2298}
2299
2300
2301/**
2302 * Fetches the next signed byte from the opcode stream, extending it to
2303 * unsigned 32-bit.
2304 *
2305 * @returns Strict VBox status code.
2306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2307 * @param pu32 Where to return the unsigned dword.
2308 */
2309DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2310{
2311 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2312 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2313 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2314
2315 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2316 pVCpu->iem.s.offOpcode = offOpcode + 1;
2317 return VINF_SUCCESS;
2318}
2319
2320#endif /* !IEM_WITH_SETJMP */
2321
2322/**
2323 * Fetches the next signed byte from the opcode stream and sign-extending it to
2324 * a word, returning automatically on failure.
2325 *
2326 * @param a_pu32 Where to return the word.
2327 * @remark Implicitly references pVCpu.
2328 */
2329#ifndef IEM_WITH_SETJMP
2330#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2331 do \
2332 { \
2333 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2334 if (rcStrict2 != VINF_SUCCESS) \
2335 return rcStrict2; \
2336 } while (0)
2337#else
2338# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2339#endif
2340
2341#ifndef IEM_WITH_SETJMP
2342
2343/**
2344 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2345 *
2346 * @returns Strict VBox status code.
2347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2348 * @param pu64 Where to return the opcode qword.
2349 */
2350DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2351{
2352 uint8_t u8;
2353 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2354 if (rcStrict == VINF_SUCCESS)
2355 *pu64 = (int8_t)u8;
2356 return rcStrict;
2357}
2358
2359
2360/**
2361 * Fetches the next signed byte from the opcode stream, extending it to
2362 * unsigned 64-bit.
2363 *
2364 * @returns Strict VBox status code.
2365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2366 * @param pu64 Where to return the unsigned qword.
2367 */
2368DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2369{
2370 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2371 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2372 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2373
2374 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2375 pVCpu->iem.s.offOpcode = offOpcode + 1;
2376 return VINF_SUCCESS;
2377}
2378
2379#endif /* !IEM_WITH_SETJMP */
2380
2381
2382/**
2383 * Fetches the next signed byte from the opcode stream and sign-extending it to
2384 * a word, returning automatically on failure.
2385 *
2386 * @param a_pu64 Where to return the word.
2387 * @remark Implicitly references pVCpu.
2388 */
2389#ifndef IEM_WITH_SETJMP
2390# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2391 do \
2392 { \
2393 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2394 if (rcStrict2 != VINF_SUCCESS) \
2395 return rcStrict2; \
2396 } while (0)
2397#else
2398# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2399#endif
2400
2401
2402#ifndef IEM_WITH_SETJMP
2403/**
2404 * Fetches the next opcode byte.
2405 *
2406 * @returns Strict VBox status code.
2407 * @param pVCpu The cross context virtual CPU structure of the
2408 * calling thread.
2409 * @param pu8 Where to return the opcode byte.
2410 */
2411DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2412{
2413 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2414 pVCpu->iem.s.offModRm = offOpcode;
2415 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2416 {
2417 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2418 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2419 return VINF_SUCCESS;
2420 }
2421 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2422}
2423#else /* IEM_WITH_SETJMP */
2424/**
2425 * Fetches the next opcode byte, longjmp on error.
2426 *
2427 * @returns The opcode byte.
2428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2429 */
2430DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2431{
2432# ifdef IEM_WITH_CODE_TLB
2433 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2434 pVCpu->iem.s.offModRm = offBuf;
2435 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2436 if (RT_LIKELY( pbBuf != NULL
2437 && offBuf < pVCpu->iem.s.cbInstrBuf))
2438 {
2439 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2440 return pbBuf[offBuf];
2441 }
2442# else
2443 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2444 pVCpu->iem.s.offModRm = offOpcode;
2445 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2446 {
2447 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2448 return pVCpu->iem.s.abOpcode[offOpcode];
2449 }
2450# endif
2451 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2452}
2453#endif /* IEM_WITH_SETJMP */
2454
2455/**
2456 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2457 * on failure.
2458 *
2459 * Will note down the position of the ModR/M byte for VT-x exits.
2460 *
2461 * @param a_pbRm Where to return the RM opcode byte.
2462 * @remark Implicitly references pVCpu.
2463 */
2464#ifndef IEM_WITH_SETJMP
2465# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2466 do \
2467 { \
2468 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2469 if (rcStrict2 == VINF_SUCCESS) \
2470 { /* likely */ } \
2471 else \
2472 return rcStrict2; \
2473 } while (0)
2474#else
2475# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2476#endif /* IEM_WITH_SETJMP */
2477
2478
2479#ifndef IEM_WITH_SETJMP
2480
2481/**
2482 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2483 *
2484 * @returns Strict VBox status code.
2485 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2486 * @param pu16 Where to return the opcode word.
2487 */
2488DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2489{
2490 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2491 if (rcStrict == VINF_SUCCESS)
2492 {
2493 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2494# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2495 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2496# else
2497 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2498# endif
2499 pVCpu->iem.s.offOpcode = offOpcode + 2;
2500 }
2501 else
2502 *pu16 = 0;
2503 return rcStrict;
2504}
2505
2506
2507/**
2508 * Fetches the next opcode word.
2509 *
2510 * @returns Strict VBox status code.
2511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2512 * @param pu16 Where to return the opcode word.
2513 */
2514DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
2515{
2516 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2517 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2518 {
2519 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2520# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2521 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2522# else
2523 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2524# endif
2525 return VINF_SUCCESS;
2526 }
2527 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2528}
2529
2530#else /* IEM_WITH_SETJMP */
2531
2532/**
2533 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2534 *
2535 * @returns The opcode word.
2536 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2537 */
2538DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
2539{
2540# ifdef IEM_WITH_CODE_TLB
2541 uint16_t u16;
2542 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2543 return u16;
2544# else
2545 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2546 if (rcStrict == VINF_SUCCESS)
2547 {
2548 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2549 pVCpu->iem.s.offOpcode += 2;
2550# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2551 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2552# else
2553 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2554# endif
2555 }
2556 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2557# endif
2558}
2559
2560
2561/**
2562 * Fetches the next opcode word, longjmp on error.
2563 *
2564 * @returns The opcode word.
2565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2566 */
2567DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
2568{
2569# ifdef IEM_WITH_CODE_TLB
2570 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2571 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2572 if (RT_LIKELY( pbBuf != NULL
2573 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2574 {
2575 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2576# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2577 return *(uint16_t const *)&pbBuf[offBuf];
2578# else
2579 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2580# endif
2581 }
2582# else
2583 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2584 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2585 {
2586 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2587# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2588 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2589# else
2590 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2591# endif
2592 }
2593# endif
2594 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2595}
2596
2597#endif /* IEM_WITH_SETJMP */
2598
2599
2600/**
2601 * Fetches the next opcode word, returns automatically on failure.
2602 *
2603 * @param a_pu16 Where to return the opcode word.
2604 * @remark Implicitly references pVCpu.
2605 */
2606#ifndef IEM_WITH_SETJMP
2607# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2608 do \
2609 { \
2610 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2611 if (rcStrict2 != VINF_SUCCESS) \
2612 return rcStrict2; \
2613 } while (0)
2614#else
2615# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2616#endif
2617
2618#ifndef IEM_WITH_SETJMP
2619
2620/**
2621 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2622 *
2623 * @returns Strict VBox status code.
2624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2625 * @param pu32 Where to return the opcode double word.
2626 */
2627DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2628{
2629 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2630 if (rcStrict == VINF_SUCCESS)
2631 {
2632 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2633 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2634 pVCpu->iem.s.offOpcode = offOpcode + 2;
2635 }
2636 else
2637 *pu32 = 0;
2638 return rcStrict;
2639}
2640
2641
2642/**
2643 * Fetches the next opcode word, zero extending it to a double word.
2644 *
2645 * @returns Strict VBox status code.
2646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2647 * @param pu32 Where to return the opcode double word.
2648 */
2649DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2650{
2651 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2652 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2653 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2654
2655 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2656 pVCpu->iem.s.offOpcode = offOpcode + 2;
2657 return VINF_SUCCESS;
2658}
2659
2660#endif /* !IEM_WITH_SETJMP */
2661
2662
2663/**
2664 * Fetches the next opcode word and zero extends it to a double word, returns
2665 * automatically on failure.
2666 *
2667 * @param a_pu32 Where to return the opcode double word.
2668 * @remark Implicitly references pVCpu.
2669 */
2670#ifndef IEM_WITH_SETJMP
2671# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2672 do \
2673 { \
2674 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2675 if (rcStrict2 != VINF_SUCCESS) \
2676 return rcStrict2; \
2677 } while (0)
2678#else
2679# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2680#endif
2681
2682#ifndef IEM_WITH_SETJMP
2683
2684/**
2685 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2686 *
2687 * @returns Strict VBox status code.
2688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2689 * @param pu64 Where to return the opcode quad word.
2690 */
2691DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2692{
2693 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2694 if (rcStrict == VINF_SUCCESS)
2695 {
2696 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2697 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2698 pVCpu->iem.s.offOpcode = offOpcode + 2;
2699 }
2700 else
2701 *pu64 = 0;
2702 return rcStrict;
2703}
2704
2705
2706/**
2707 * Fetches the next opcode word, zero extending it to a quad word.
2708 *
2709 * @returns Strict VBox status code.
2710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2711 * @param pu64 Where to return the opcode quad word.
2712 */
2713DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2714{
2715 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2716 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2717 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2718
2719 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2720 pVCpu->iem.s.offOpcode = offOpcode + 2;
2721 return VINF_SUCCESS;
2722}
2723
2724#endif /* !IEM_WITH_SETJMP */
2725
2726/**
2727 * Fetches the next opcode word and zero extends it to a quad word, returns
2728 * automatically on failure.
2729 *
2730 * @param a_pu64 Where to return the opcode quad word.
2731 * @remark Implicitly references pVCpu.
2732 */
2733#ifndef IEM_WITH_SETJMP
2734# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2735 do \
2736 { \
2737 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2738 if (rcStrict2 != VINF_SUCCESS) \
2739 return rcStrict2; \
2740 } while (0)
2741#else
2742# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2743#endif
2744
2745
2746#ifndef IEM_WITH_SETJMP
2747/**
2748 * Fetches the next signed word from the opcode stream.
2749 *
2750 * @returns Strict VBox status code.
2751 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2752 * @param pi16 Where to return the signed word.
2753 */
2754DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
2755{
2756 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2757}
2758#endif /* !IEM_WITH_SETJMP */
2759
2760
2761/**
2762 * Fetches the next signed word from the opcode stream, returning automatically
2763 * on failure.
2764 *
2765 * @param a_pi16 Where to return the signed word.
2766 * @remark Implicitly references pVCpu.
2767 */
2768#ifndef IEM_WITH_SETJMP
2769# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2770 do \
2771 { \
2772 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2773 if (rcStrict2 != VINF_SUCCESS) \
2774 return rcStrict2; \
2775 } while (0)
2776#else
2777# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2778#endif
2779
2780#ifndef IEM_WITH_SETJMP
2781
2782/**
2783 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2784 *
2785 * @returns Strict VBox status code.
2786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2787 * @param pu32 Where to return the opcode dword.
2788 */
2789DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2790{
2791 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2792 if (rcStrict == VINF_SUCCESS)
2793 {
2794 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2795# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2796 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2797# else
2798 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2799 pVCpu->iem.s.abOpcode[offOpcode + 1],
2800 pVCpu->iem.s.abOpcode[offOpcode + 2],
2801 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2802# endif
2803 pVCpu->iem.s.offOpcode = offOpcode + 4;
2804 }
2805 else
2806 *pu32 = 0;
2807 return rcStrict;
2808}
2809
2810
2811/**
2812 * Fetches the next opcode dword.
2813 *
2814 * @returns Strict VBox status code.
2815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2816 * @param pu32 Where to return the opcode double word.
2817 */
2818DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
2819{
2820 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2821 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2822 {
2823 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2824# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2825 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2826# else
2827 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2828 pVCpu->iem.s.abOpcode[offOpcode + 1],
2829 pVCpu->iem.s.abOpcode[offOpcode + 2],
2830 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2831# endif
2832 return VINF_SUCCESS;
2833 }
2834 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2835}
2836
2837#else /* !IEM_WITH_SETJMP */
2838
2839/**
2840 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2841 *
2842 * @returns The opcode dword.
2843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2844 */
2845DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
2846{
2847# ifdef IEM_WITH_CODE_TLB
2848 uint32_t u32;
2849 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2850 return u32;
2851# else
2852 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2853 if (rcStrict == VINF_SUCCESS)
2854 {
2855 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2856 pVCpu->iem.s.offOpcode = offOpcode + 4;
2857# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2858 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2859# else
2860 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2861 pVCpu->iem.s.abOpcode[offOpcode + 1],
2862 pVCpu->iem.s.abOpcode[offOpcode + 2],
2863 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2864# endif
2865 }
2866 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2867# endif
2868}
2869
2870
2871/**
2872 * Fetches the next opcode dword, longjmp on error.
2873 *
2874 * @returns The opcode dword.
2875 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2876 */
2877DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
2878{
2879# ifdef IEM_WITH_CODE_TLB
2880 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2881 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2882 if (RT_LIKELY( pbBuf != NULL
2883 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2884 {
2885 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2886# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2887 return *(uint32_t const *)&pbBuf[offBuf];
2888# else
2889 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2890 pbBuf[offBuf + 1],
2891 pbBuf[offBuf + 2],
2892 pbBuf[offBuf + 3]);
2893# endif
2894 }
2895# else
2896 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2897 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2898 {
2899 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2900# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2901 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2902# else
2903 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2904 pVCpu->iem.s.abOpcode[offOpcode + 1],
2905 pVCpu->iem.s.abOpcode[offOpcode + 2],
2906 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2907# endif
2908 }
2909# endif
2910 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2911}
2912
2913#endif /* !IEM_WITH_SETJMP */
2914
2915
2916/**
2917 * Fetches the next opcode dword, returns automatically on failure.
2918 *
2919 * @param a_pu32 Where to return the opcode dword.
2920 * @remark Implicitly references pVCpu.
2921 */
2922#ifndef IEM_WITH_SETJMP
2923# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2924 do \
2925 { \
2926 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2927 if (rcStrict2 != VINF_SUCCESS) \
2928 return rcStrict2; \
2929 } while (0)
2930#else
2931# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2932#endif
2933
2934#ifndef IEM_WITH_SETJMP
2935
2936/**
2937 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2938 *
2939 * @returns Strict VBox status code.
2940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2941 * @param pu64 Where to return the opcode dword.
2942 */
2943DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2944{
2945 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2946 if (rcStrict == VINF_SUCCESS)
2947 {
2948 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2949 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2950 pVCpu->iem.s.abOpcode[offOpcode + 1],
2951 pVCpu->iem.s.abOpcode[offOpcode + 2],
2952 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2953 pVCpu->iem.s.offOpcode = offOpcode + 4;
2954 }
2955 else
2956 *pu64 = 0;
2957 return rcStrict;
2958}
2959
2960
2961/**
2962 * Fetches the next opcode dword, zero extending it to a quad word.
2963 *
2964 * @returns Strict VBox status code.
2965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2966 * @param pu64 Where to return the opcode quad word.
2967 */
2968DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2969{
2970 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2971 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2972 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2973
2974 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2975 pVCpu->iem.s.abOpcode[offOpcode + 1],
2976 pVCpu->iem.s.abOpcode[offOpcode + 2],
2977 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2978 pVCpu->iem.s.offOpcode = offOpcode + 4;
2979 return VINF_SUCCESS;
2980}
2981
2982#endif /* !IEM_WITH_SETJMP */
2983
2984
2985/**
2986 * Fetches the next opcode dword and zero extends it to a quad word, returns
2987 * automatically on failure.
2988 *
2989 * @param a_pu64 Where to return the opcode quad word.
2990 * @remark Implicitly references pVCpu.
2991 */
2992#ifndef IEM_WITH_SETJMP
2993# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2994 do \
2995 { \
2996 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2997 if (rcStrict2 != VINF_SUCCESS) \
2998 return rcStrict2; \
2999 } while (0)
3000#else
3001# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3002#endif
3003
3004
3005#ifndef IEM_WITH_SETJMP
3006/**
3007 * Fetches the next signed double word from the opcode stream.
3008 *
3009 * @returns Strict VBox status code.
3010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3011 * @param pi32 Where to return the signed double word.
3012 */
3013DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
3014{
3015 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3016}
3017#endif
3018
3019/**
3020 * Fetches the next signed double word from the opcode stream, returning
3021 * automatically on failure.
3022 *
3023 * @param a_pi32 Where to return the signed double word.
3024 * @remark Implicitly references pVCpu.
3025 */
3026#ifndef IEM_WITH_SETJMP
3027# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3028 do \
3029 { \
3030 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3031 if (rcStrict2 != VINF_SUCCESS) \
3032 return rcStrict2; \
3033 } while (0)
3034#else
3035# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3036#endif
3037
3038#ifndef IEM_WITH_SETJMP
3039
3040/**
3041 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3042 *
3043 * @returns Strict VBox status code.
3044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3045 * @param pu64 Where to return the opcode qword.
3046 */
3047DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3048{
3049 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3050 if (rcStrict == VINF_SUCCESS)
3051 {
3052 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3053 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3054 pVCpu->iem.s.abOpcode[offOpcode + 1],
3055 pVCpu->iem.s.abOpcode[offOpcode + 2],
3056 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3057 pVCpu->iem.s.offOpcode = offOpcode + 4;
3058 }
3059 else
3060 *pu64 = 0;
3061 return rcStrict;
3062}
3063
3064
3065/**
3066 * Fetches the next opcode dword, sign extending it into a quad word.
3067 *
3068 * @returns Strict VBox status code.
3069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3070 * @param pu64 Where to return the opcode quad word.
3071 */
3072DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3073{
3074 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3075 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3076 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3077
3078 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3079 pVCpu->iem.s.abOpcode[offOpcode + 1],
3080 pVCpu->iem.s.abOpcode[offOpcode + 2],
3081 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3082 *pu64 = i32;
3083 pVCpu->iem.s.offOpcode = offOpcode + 4;
3084 return VINF_SUCCESS;
3085}
3086
3087#endif /* !IEM_WITH_SETJMP */
3088
3089
3090/**
3091 * Fetches the next opcode double word and sign extends it to a quad word,
3092 * returns automatically on failure.
3093 *
3094 * @param a_pu64 Where to return the opcode quad word.
3095 * @remark Implicitly references pVCpu.
3096 */
3097#ifndef IEM_WITH_SETJMP
3098# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3099 do \
3100 { \
3101 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3102 if (rcStrict2 != VINF_SUCCESS) \
3103 return rcStrict2; \
3104 } while (0)
3105#else
3106# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3107#endif
3108
3109#ifndef IEM_WITH_SETJMP
3110
3111/**
3112 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3113 *
3114 * @returns Strict VBox status code.
3115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3116 * @param pu64 Where to return the opcode qword.
3117 */
3118DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3119{
3120 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3121 if (rcStrict == VINF_SUCCESS)
3122 {
3123 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3124# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3125 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3126# else
3127 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3128 pVCpu->iem.s.abOpcode[offOpcode + 1],
3129 pVCpu->iem.s.abOpcode[offOpcode + 2],
3130 pVCpu->iem.s.abOpcode[offOpcode + 3],
3131 pVCpu->iem.s.abOpcode[offOpcode + 4],
3132 pVCpu->iem.s.abOpcode[offOpcode + 5],
3133 pVCpu->iem.s.abOpcode[offOpcode + 6],
3134 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3135# endif
3136 pVCpu->iem.s.offOpcode = offOpcode + 8;
3137 }
3138 else
3139 *pu64 = 0;
3140 return rcStrict;
3141}
3142
3143
3144/**
3145 * Fetches the next opcode qword.
3146 *
3147 * @returns Strict VBox status code.
3148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3149 * @param pu64 Where to return the opcode qword.
3150 */
3151DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3152{
3153 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3154 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3155 {
3156# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3157 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3158# else
3159 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3160 pVCpu->iem.s.abOpcode[offOpcode + 1],
3161 pVCpu->iem.s.abOpcode[offOpcode + 2],
3162 pVCpu->iem.s.abOpcode[offOpcode + 3],
3163 pVCpu->iem.s.abOpcode[offOpcode + 4],
3164 pVCpu->iem.s.abOpcode[offOpcode + 5],
3165 pVCpu->iem.s.abOpcode[offOpcode + 6],
3166 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3167# endif
3168 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3169 return VINF_SUCCESS;
3170 }
3171 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3172}
3173
3174#else /* IEM_WITH_SETJMP */
3175
3176/**
3177 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3178 *
3179 * @returns The opcode qword.
3180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3181 */
3182DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3183{
3184# ifdef IEM_WITH_CODE_TLB
3185 uint64_t u64;
3186 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3187 return u64;
3188# else
3189 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3190 if (rcStrict == VINF_SUCCESS)
3191 {
3192 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3193 pVCpu->iem.s.offOpcode = offOpcode + 8;
3194# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3195 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3196# else
3197 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3198 pVCpu->iem.s.abOpcode[offOpcode + 1],
3199 pVCpu->iem.s.abOpcode[offOpcode + 2],
3200 pVCpu->iem.s.abOpcode[offOpcode + 3],
3201 pVCpu->iem.s.abOpcode[offOpcode + 4],
3202 pVCpu->iem.s.abOpcode[offOpcode + 5],
3203 pVCpu->iem.s.abOpcode[offOpcode + 6],
3204 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3205# endif
3206 }
3207 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3208# endif
3209}
3210
3211
3212/**
3213 * Fetches the next opcode qword, longjmp on error.
3214 *
3215 * @returns The opcode qword.
3216 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3217 */
3218DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3219{
3220# ifdef IEM_WITH_CODE_TLB
3221 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3222 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3223 if (RT_LIKELY( pbBuf != NULL
3224 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3225 {
3226 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3227# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3228 return *(uint64_t const *)&pbBuf[offBuf];
3229# else
3230 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3231 pbBuf[offBuf + 1],
3232 pbBuf[offBuf + 2],
3233 pbBuf[offBuf + 3],
3234 pbBuf[offBuf + 4],
3235 pbBuf[offBuf + 5],
3236 pbBuf[offBuf + 6],
3237 pbBuf[offBuf + 7]);
3238# endif
3239 }
3240# else
3241 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3242 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3243 {
3244 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3245# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3246 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3247# else
3248 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3249 pVCpu->iem.s.abOpcode[offOpcode + 1],
3250 pVCpu->iem.s.abOpcode[offOpcode + 2],
3251 pVCpu->iem.s.abOpcode[offOpcode + 3],
3252 pVCpu->iem.s.abOpcode[offOpcode + 4],
3253 pVCpu->iem.s.abOpcode[offOpcode + 5],
3254 pVCpu->iem.s.abOpcode[offOpcode + 6],
3255 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3256# endif
3257 }
3258# endif
3259 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3260}
3261
3262#endif /* IEM_WITH_SETJMP */
3263
3264/**
3265 * Fetches the next opcode quad word, returns automatically on failure.
3266 *
3267 * @param a_pu64 Where to return the opcode quad word.
3268 * @remark Implicitly references pVCpu.
3269 */
3270#ifndef IEM_WITH_SETJMP
3271# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3272 do \
3273 { \
3274 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3275 if (rcStrict2 != VINF_SUCCESS) \
3276 return rcStrict2; \
3277 } while (0)
3278#else
3279# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3280#endif
3281
3282
3283/** @name Misc Worker Functions.
3284 * @{
3285 */
3286
3287/**
3288 * Gets the exception class for the specified exception vector.
3289 *
3290 * @returns The class of the specified exception.
3291 * @param uVector The exception vector.
3292 */
3293IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3294{
3295 Assert(uVector <= X86_XCPT_LAST);
3296 switch (uVector)
3297 {
3298 case X86_XCPT_DE:
3299 case X86_XCPT_TS:
3300 case X86_XCPT_NP:
3301 case X86_XCPT_SS:
3302 case X86_XCPT_GP:
3303 case X86_XCPT_SX: /* AMD only */
3304 return IEMXCPTCLASS_CONTRIBUTORY;
3305
3306 case X86_XCPT_PF:
3307 case X86_XCPT_VE: /* Intel only */
3308 return IEMXCPTCLASS_PAGE_FAULT;
3309
3310 case X86_XCPT_DF:
3311 return IEMXCPTCLASS_DOUBLE_FAULT;
3312 }
3313 return IEMXCPTCLASS_BENIGN;
3314}
3315
3316
3317/**
3318 * Evaluates how to handle an exception caused during delivery of another event
3319 * (exception / interrupt).
3320 *
3321 * @returns How to handle the recursive exception.
3322 * @param pVCpu The cross context virtual CPU structure of the
3323 * calling thread.
3324 * @param fPrevFlags The flags of the previous event.
3325 * @param uPrevVector The vector of the previous event.
3326 * @param fCurFlags The flags of the current exception.
3327 * @param uCurVector The vector of the current exception.
3328 * @param pfXcptRaiseInfo Where to store additional information about the
3329 * exception condition. Optional.
3330 */
3331VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3332 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3333{
3334 /*
3335 * Only CPU exceptions can be raised while delivering other events, software interrupt
3336 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3337 */
3338 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3339 Assert(pVCpu); RT_NOREF(pVCpu);
3340 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3341
3342 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3343 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3344 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3345 {
3346 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3347 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3348 {
3349 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3350 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3351 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3352 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3353 {
3354 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3355 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3356 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3357 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3358 uCurVector, pVCpu->cpum.GstCtx.cr2));
3359 }
3360 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3361 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3362 {
3363 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3364 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3365 }
3366 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3367 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3368 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3369 {
3370 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3371 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3372 }
3373 }
3374 else
3375 {
3376 if (uPrevVector == X86_XCPT_NMI)
3377 {
3378 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3379 if (uCurVector == X86_XCPT_PF)
3380 {
3381 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3382 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3383 }
3384 }
3385 else if ( uPrevVector == X86_XCPT_AC
3386 && uCurVector == X86_XCPT_AC)
3387 {
3388 enmRaise = IEMXCPTRAISE_CPU_HANG;
3389 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3390 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3391 }
3392 }
3393 }
3394 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3395 {
3396 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3397 if (uCurVector == X86_XCPT_PF)
3398 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3399 }
3400 else
3401 {
3402 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3403 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3404 }
3405
3406 if (pfXcptRaiseInfo)
3407 *pfXcptRaiseInfo = fRaiseInfo;
3408 return enmRaise;
3409}
3410
3411
3412/**
3413 * Enters the CPU shutdown state initiated by a triple fault or other
3414 * unrecoverable conditions.
3415 *
3416 * @returns Strict VBox status code.
3417 * @param pVCpu The cross context virtual CPU structure of the
3418 * calling thread.
3419 */
3420IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3421{
3422 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3423 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3424
3425 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3426 {
3427 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3428 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3429 }
3430
3431 RT_NOREF(pVCpu);
3432 return VINF_EM_TRIPLE_FAULT;
3433}
3434
3435
3436/**
3437 * Validates a new SS segment.
3438 *
3439 * @returns VBox strict status code.
3440 * @param pVCpu The cross context virtual CPU structure of the
3441 * calling thread.
3442 * @param NewSS The new SS selctor.
3443 * @param uCpl The CPL to load the stack for.
3444 * @param pDesc Where to return the descriptor.
3445 */
3446IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3447{
3448 /* Null selectors are not allowed (we're not called for dispatching
3449 interrupts with SS=0 in long mode). */
3450 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3451 {
3452 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3453 return iemRaiseTaskSwitchFault0(pVCpu);
3454 }
3455
3456 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3457 if ((NewSS & X86_SEL_RPL) != uCpl)
3458 {
3459 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3460 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3461 }
3462
3463 /*
3464 * Read the descriptor.
3465 */
3466 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3467 if (rcStrict != VINF_SUCCESS)
3468 return rcStrict;
3469
3470 /*
3471 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3472 */
3473 if (!pDesc->Legacy.Gen.u1DescType)
3474 {
3475 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3476 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3477 }
3478
3479 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3480 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3481 {
3482 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3483 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3484 }
3485 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3486 {
3487 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3488 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3489 }
3490
3491 /* Is it there? */
3492 /** @todo testcase: Is this checked before the canonical / limit check below? */
3493 if (!pDesc->Legacy.Gen.u1Present)
3494 {
3495 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3496 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3497 }
3498
3499 return VINF_SUCCESS;
3500}
3501
3502
3503/**
3504 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3505 * not (kind of obsolete now).
3506 *
3507 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3508 */
3509#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3510
3511/**
3512 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
3513 *
3514 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3515 * @param a_fEfl The new EFLAGS.
3516 */
3517#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3518
3519/** @} */
3520
3521
3522/** @name Raising Exceptions.
3523 *
3524 * @{
3525 */
3526
3527
3528/**
3529 * Loads the specified stack far pointer from the TSS.
3530 *
3531 * @returns VBox strict status code.
3532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3533 * @param uCpl The CPL to load the stack for.
3534 * @param pSelSS Where to return the new stack segment.
3535 * @param puEsp Where to return the new stack pointer.
3536 */
3537IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3538{
3539 VBOXSTRICTRC rcStrict;
3540 Assert(uCpl < 4);
3541
3542 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3543 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3544 {
3545 /*
3546 * 16-bit TSS (X86TSS16).
3547 */
3548 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3549 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3550 {
3551 uint32_t off = uCpl * 4 + 2;
3552 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3553 {
3554 /** @todo check actual access pattern here. */
3555 uint32_t u32Tmp = 0; /* gcc maybe... */
3556 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3557 if (rcStrict == VINF_SUCCESS)
3558 {
3559 *puEsp = RT_LOWORD(u32Tmp);
3560 *pSelSS = RT_HIWORD(u32Tmp);
3561 return VINF_SUCCESS;
3562 }
3563 }
3564 else
3565 {
3566 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3567 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3568 }
3569 break;
3570 }
3571
3572 /*
3573 * 32-bit TSS (X86TSS32).
3574 */
3575 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3576 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3577 {
3578 uint32_t off = uCpl * 8 + 4;
3579 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3580 {
3581/** @todo check actual access pattern here. */
3582 uint64_t u64Tmp;
3583 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3584 if (rcStrict == VINF_SUCCESS)
3585 {
3586 *puEsp = u64Tmp & UINT32_MAX;
3587 *pSelSS = (RTSEL)(u64Tmp >> 32);
3588 return VINF_SUCCESS;
3589 }
3590 }
3591 else
3592 {
3593 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3594 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3595 }
3596 break;
3597 }
3598
3599 default:
3600 AssertFailed();
3601 rcStrict = VERR_IEM_IPE_4;
3602 break;
3603 }
3604
3605 *puEsp = 0; /* make gcc happy */
3606 *pSelSS = 0; /* make gcc happy */
3607 return rcStrict;
3608}
3609
3610
3611/**
3612 * Loads the specified stack pointer from the 64-bit TSS.
3613 *
3614 * @returns VBox strict status code.
3615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3616 * @param uCpl The CPL to load the stack for.
3617 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3618 * @param puRsp Where to return the new stack pointer.
3619 */
3620IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3621{
3622 Assert(uCpl < 4);
3623 Assert(uIst < 8);
3624 *puRsp = 0; /* make gcc happy */
3625
3626 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3627 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3628
3629 uint32_t off;
3630 if (uIst)
3631 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3632 else
3633 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3634 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3635 {
3636 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3637 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3638 }
3639
3640 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3641}
3642
3643
3644/**
3645 * Adjust the CPU state according to the exception being raised.
3646 *
3647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3648 * @param u8Vector The exception that has been raised.
3649 */
3650DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
3651{
3652 switch (u8Vector)
3653 {
3654 case X86_XCPT_DB:
3655 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3656 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3657 break;
3658 /** @todo Read the AMD and Intel exception reference... */
3659 }
3660}
3661
3662
3663/**
3664 * Implements exceptions and interrupts for real mode.
3665 *
3666 * @returns VBox strict status code.
3667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3668 * @param cbInstr The number of bytes to offset rIP by in the return
3669 * address.
3670 * @param u8Vector The interrupt / exception vector number.
3671 * @param fFlags The flags.
3672 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3673 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3674 */
3675IEM_STATIC VBOXSTRICTRC
3676iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
3677 uint8_t cbInstr,
3678 uint8_t u8Vector,
3679 uint32_t fFlags,
3680 uint16_t uErr,
3681 uint64_t uCr2)
3682{
3683 NOREF(uErr); NOREF(uCr2);
3684 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3685
3686 /*
3687 * Read the IDT entry.
3688 */
3689 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3690 {
3691 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3692 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3693 }
3694 RTFAR16 Idte;
3695 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3696 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3697 {
3698 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3699 return rcStrict;
3700 }
3701
3702 /*
3703 * Push the stack frame.
3704 */
3705 uint16_t *pu16Frame;
3706 uint64_t uNewRsp;
3707 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3708 if (rcStrict != VINF_SUCCESS)
3709 return rcStrict;
3710
3711 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3712#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3713 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3714 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3715 fEfl |= UINT16_C(0xf000);
3716#endif
3717 pu16Frame[2] = (uint16_t)fEfl;
3718 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3719 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3720 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3721 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3722 return rcStrict;
3723
3724 /*
3725 * Load the vector address into cs:ip and make exception specific state
3726 * adjustments.
3727 */
3728 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3729 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3730 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3731 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3732 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3733 pVCpu->cpum.GstCtx.rip = Idte.off;
3734 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3735 IEMMISC_SET_EFL(pVCpu, fEfl);
3736
3737 /** @todo do we actually do this in real mode? */
3738 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3739 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3740
3741 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3742}
3743
3744
3745/**
3746 * Loads a NULL data selector into when coming from V8086 mode.
3747 *
3748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3749 * @param pSReg Pointer to the segment register.
3750 */
3751IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
3752{
3753 pSReg->Sel = 0;
3754 pSReg->ValidSel = 0;
3755 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3756 {
3757 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3758 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3759 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3760 }
3761 else
3762 {
3763 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3764 /** @todo check this on AMD-V */
3765 pSReg->u64Base = 0;
3766 pSReg->u32Limit = 0;
3767 }
3768}
3769
3770
3771/**
3772 * Loads a segment selector during a task switch in V8086 mode.
3773 *
3774 * @param pSReg Pointer to the segment register.
3775 * @param uSel The selector value to load.
3776 */
3777IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3778{
3779 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3780 pSReg->Sel = uSel;
3781 pSReg->ValidSel = uSel;
3782 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3783 pSReg->u64Base = uSel << 4;
3784 pSReg->u32Limit = 0xffff;
3785 pSReg->Attr.u = 0xf3;
3786}
3787
3788
3789/**
3790 * Loads a NULL data selector into a selector register, both the hidden and
3791 * visible parts, in protected mode.
3792 *
3793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3794 * @param pSReg Pointer to the segment register.
3795 * @param uRpl The RPL.
3796 */
3797IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3798{
3799 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3800 * data selector in protected mode. */
3801 pSReg->Sel = uRpl;
3802 pSReg->ValidSel = uRpl;
3803 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3804 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3805 {
3806 /* VT-x (Intel 3960x) observed doing something like this. */
3807 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3808 pSReg->u32Limit = UINT32_MAX;
3809 pSReg->u64Base = 0;
3810 }
3811 else
3812 {
3813 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3814 pSReg->u32Limit = 0;
3815 pSReg->u64Base = 0;
3816 }
3817}
3818
3819
3820/**
3821 * Loads a segment selector during a task switch in protected mode.
3822 *
3823 * In this task switch scenario, we would throw \#TS exceptions rather than
3824 * \#GPs.
3825 *
3826 * @returns VBox strict status code.
3827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3828 * @param pSReg Pointer to the segment register.
3829 * @param uSel The new selector value.
3830 *
3831 * @remarks This does _not_ handle CS or SS.
3832 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3833 */
3834IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3835{
3836 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3837
3838 /* Null data selector. */
3839 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3840 {
3841 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3842 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3843 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3844 return VINF_SUCCESS;
3845 }
3846
3847 /* Fetch the descriptor. */
3848 IEMSELDESC Desc;
3849 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3850 if (rcStrict != VINF_SUCCESS)
3851 {
3852 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3853 VBOXSTRICTRC_VAL(rcStrict)));
3854 return rcStrict;
3855 }
3856
3857 /* Must be a data segment or readable code segment. */
3858 if ( !Desc.Legacy.Gen.u1DescType
3859 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3860 {
3861 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3862 Desc.Legacy.Gen.u4Type));
3863 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3864 }
3865
3866 /* Check privileges for data segments and non-conforming code segments. */
3867 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3868 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3869 {
3870 /* The RPL and the new CPL must be less than or equal to the DPL. */
3871 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3872 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3873 {
3874 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3875 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3876 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3877 }
3878 }
3879
3880 /* Is it there? */
3881 if (!Desc.Legacy.Gen.u1Present)
3882 {
3883 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3884 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3885 }
3886
3887 /* The base and limit. */
3888 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3889 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3890
3891 /*
3892 * Ok, everything checked out fine. Now set the accessed bit before
3893 * committing the result into the registers.
3894 */
3895 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3896 {
3897 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3898 if (rcStrict != VINF_SUCCESS)
3899 return rcStrict;
3900 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3901 }
3902
3903 /* Commit */
3904 pSReg->Sel = uSel;
3905 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3906 pSReg->u32Limit = cbLimit;
3907 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3908 pSReg->ValidSel = uSel;
3909 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3910 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3911 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3912
3913 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3914 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3915 return VINF_SUCCESS;
3916}
3917
3918
3919/**
3920 * Performs a task switch.
3921 *
3922 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3923 * caller is responsible for performing the necessary checks (like DPL, TSS
3924 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3925 * reference for JMP, CALL, IRET.
3926 *
3927 * If the task switch is the due to a software interrupt or hardware exception,
3928 * the caller is responsible for validating the TSS selector and descriptor. See
3929 * Intel Instruction reference for INT n.
3930 *
3931 * @returns VBox strict status code.
3932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3933 * @param enmTaskSwitch The cause of the task switch.
3934 * @param uNextEip The EIP effective after the task switch.
3935 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3936 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3937 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3938 * @param SelTSS The TSS selector of the new task.
3939 * @param pNewDescTSS Pointer to the new TSS descriptor.
3940 */
3941IEM_STATIC VBOXSTRICTRC
3942iemTaskSwitch(PVMCPUCC pVCpu,
3943 IEMTASKSWITCH enmTaskSwitch,
3944 uint32_t uNextEip,
3945 uint32_t fFlags,
3946 uint16_t uErr,
3947 uint64_t uCr2,
3948 RTSEL SelTSS,
3949 PIEMSELDESC pNewDescTSS)
3950{
3951 Assert(!IEM_IS_REAL_MODE(pVCpu));
3952 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3953 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3954
3955 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3956 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3957 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3958 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3959 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3960
3961 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3962 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3963
3964 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3965 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3966
3967 /* Update CR2 in case it's a page-fault. */
3968 /** @todo This should probably be done much earlier in IEM/PGM. See
3969 * @bugref{5653#c49}. */
3970 if (fFlags & IEM_XCPT_FLAGS_CR2)
3971 pVCpu->cpum.GstCtx.cr2 = uCr2;
3972
3973 /*
3974 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3975 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3976 */
3977 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3978 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3979 if (uNewTSSLimit < uNewTSSLimitMin)
3980 {
3981 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3982 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3983 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3984 }
3985
3986 /*
3987 * Task switches in VMX non-root mode always cause task switches.
3988 * The new TSS must have been read and validated (DPL, limits etc.) before a
3989 * task-switch VM-exit commences.
3990 *
3991 * See Intel spec. 25.4.2 "Treatment of Task Switches".
3992 */
3993 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3994 {
3995 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
3996 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
3997 }
3998
3999 /*
4000 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4001 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4002 */
4003 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4004 {
4005 uint32_t const uExitInfo1 = SelTSS;
4006 uint32_t uExitInfo2 = uErr;
4007 switch (enmTaskSwitch)
4008 {
4009 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4010 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4011 default: break;
4012 }
4013 if (fFlags & IEM_XCPT_FLAGS_ERR)
4014 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4015 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4016 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4017
4018 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4019 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4020 RT_NOREF2(uExitInfo1, uExitInfo2);
4021 }
4022
4023 /*
4024 * Check the current TSS limit. The last written byte to the current TSS during the
4025 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4026 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4027 *
4028 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4029 * end up with smaller than "legal" TSS limits.
4030 */
4031 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4032 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4033 if (uCurTSSLimit < uCurTSSLimitMin)
4034 {
4035 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4036 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4037 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4038 }
4039
4040 /*
4041 * Verify that the new TSS can be accessed and map it. Map only the required contents
4042 * and not the entire TSS.
4043 */
4044 void *pvNewTSS;
4045 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
4046 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4047 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4048 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4049 * not perform correct translation if this happens. See Intel spec. 7.2.1
4050 * "Task-State Segment". */
4051 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4052 if (rcStrict != VINF_SUCCESS)
4053 {
4054 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4055 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4056 return rcStrict;
4057 }
4058
4059 /*
4060 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4061 */
4062 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4063 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4064 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4065 {
4066 PX86DESC pDescCurTSS;
4067 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4068 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4069 if (rcStrict != VINF_SUCCESS)
4070 {
4071 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4072 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4073 return rcStrict;
4074 }
4075
4076 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4077 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4078 if (rcStrict != VINF_SUCCESS)
4079 {
4080 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4081 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4082 return rcStrict;
4083 }
4084
4085 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4086 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4087 {
4088 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4089 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4090 u32EFlags &= ~X86_EFL_NT;
4091 }
4092 }
4093
4094 /*
4095 * Save the CPU state into the current TSS.
4096 */
4097 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4098 if (GCPtrNewTSS == GCPtrCurTSS)
4099 {
4100 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4101 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4102 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4103 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4104 pVCpu->cpum.GstCtx.ldtr.Sel));
4105 }
4106 if (fIsNewTSS386)
4107 {
4108 /*
4109 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4110 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4111 */
4112 void *pvCurTSS32;
4113 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4114 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4115 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4116 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4117 if (rcStrict != VINF_SUCCESS)
4118 {
4119 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4120 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4121 return rcStrict;
4122 }
4123
4124 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4125 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4126 pCurTSS32->eip = uNextEip;
4127 pCurTSS32->eflags = u32EFlags;
4128 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4129 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4130 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4131 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4132 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4133 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4134 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4135 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4136 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4137 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4138 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4139 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4140 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4141 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4142
4143 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4144 if (rcStrict != VINF_SUCCESS)
4145 {
4146 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4147 VBOXSTRICTRC_VAL(rcStrict)));
4148 return rcStrict;
4149 }
4150 }
4151 else
4152 {
4153 /*
4154 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4155 */
4156 void *pvCurTSS16;
4157 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4158 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4159 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4160 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4161 if (rcStrict != VINF_SUCCESS)
4162 {
4163 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4164 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4165 return rcStrict;
4166 }
4167
4168 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4169 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4170 pCurTSS16->ip = uNextEip;
4171 pCurTSS16->flags = u32EFlags;
4172 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4173 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4174 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4175 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4176 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4177 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4178 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4179 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4180 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4181 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4182 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4183 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4184
4185 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4186 if (rcStrict != VINF_SUCCESS)
4187 {
4188 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4189 VBOXSTRICTRC_VAL(rcStrict)));
4190 return rcStrict;
4191 }
4192 }
4193
4194 /*
4195 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4196 */
4197 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4198 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4199 {
4200 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4201 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4202 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4203 }
4204
4205 /*
4206 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4207 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4208 */
4209 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4210 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4211 bool fNewDebugTrap;
4212 if (fIsNewTSS386)
4213 {
4214 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
4215 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4216 uNewEip = pNewTSS32->eip;
4217 uNewEflags = pNewTSS32->eflags;
4218 uNewEax = pNewTSS32->eax;
4219 uNewEcx = pNewTSS32->ecx;
4220 uNewEdx = pNewTSS32->edx;
4221 uNewEbx = pNewTSS32->ebx;
4222 uNewEsp = pNewTSS32->esp;
4223 uNewEbp = pNewTSS32->ebp;
4224 uNewEsi = pNewTSS32->esi;
4225 uNewEdi = pNewTSS32->edi;
4226 uNewES = pNewTSS32->es;
4227 uNewCS = pNewTSS32->cs;
4228 uNewSS = pNewTSS32->ss;
4229 uNewDS = pNewTSS32->ds;
4230 uNewFS = pNewTSS32->fs;
4231 uNewGS = pNewTSS32->gs;
4232 uNewLdt = pNewTSS32->selLdt;
4233 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4234 }
4235 else
4236 {
4237 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
4238 uNewCr3 = 0;
4239 uNewEip = pNewTSS16->ip;
4240 uNewEflags = pNewTSS16->flags;
4241 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4242 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4243 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4244 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4245 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4246 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4247 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4248 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4249 uNewES = pNewTSS16->es;
4250 uNewCS = pNewTSS16->cs;
4251 uNewSS = pNewTSS16->ss;
4252 uNewDS = pNewTSS16->ds;
4253 uNewFS = 0;
4254 uNewGS = 0;
4255 uNewLdt = pNewTSS16->selLdt;
4256 fNewDebugTrap = false;
4257 }
4258
4259 if (GCPtrNewTSS == GCPtrCurTSS)
4260 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4261 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4262
4263 /*
4264 * We're done accessing the new TSS.
4265 */
4266 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4267 if (rcStrict != VINF_SUCCESS)
4268 {
4269 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4270 return rcStrict;
4271 }
4272
4273 /*
4274 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4275 */
4276 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4277 {
4278 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4279 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4280 if (rcStrict != VINF_SUCCESS)
4281 {
4282 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4283 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4284 return rcStrict;
4285 }
4286
4287 /* Check that the descriptor indicates the new TSS is available (not busy). */
4288 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4289 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4290 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4291
4292 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4293 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4294 if (rcStrict != VINF_SUCCESS)
4295 {
4296 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4297 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4298 return rcStrict;
4299 }
4300 }
4301
4302 /*
4303 * From this point on, we're technically in the new task. We will defer exceptions
4304 * until the completion of the task switch but before executing any instructions in the new task.
4305 */
4306 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4307 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4308 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4309 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4310 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4311 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4312 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4313
4314 /* Set the busy bit in TR. */
4315 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4316
4317 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4318 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4319 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4320 {
4321 uNewEflags |= X86_EFL_NT;
4322 }
4323
4324 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4325 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4326 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4327
4328 pVCpu->cpum.GstCtx.eip = uNewEip;
4329 pVCpu->cpum.GstCtx.eax = uNewEax;
4330 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4331 pVCpu->cpum.GstCtx.edx = uNewEdx;
4332 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4333 pVCpu->cpum.GstCtx.esp = uNewEsp;
4334 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4335 pVCpu->cpum.GstCtx.esi = uNewEsi;
4336 pVCpu->cpum.GstCtx.edi = uNewEdi;
4337
4338 uNewEflags &= X86_EFL_LIVE_MASK;
4339 uNewEflags |= X86_EFL_RA1_MASK;
4340 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4341
4342 /*
4343 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4344 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4345 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4346 */
4347 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4348 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4349
4350 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4351 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4352
4353 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4354 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4355
4356 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4357 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4358
4359 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4360 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4361
4362 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4363 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4364 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4365
4366 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4367 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4368 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4369 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4370
4371 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4372 {
4373 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4374 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4375 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4376 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4377 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4378 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4379 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4380 }
4381
4382 /*
4383 * Switch CR3 for the new task.
4384 */
4385 if ( fIsNewTSS386
4386 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4387 {
4388 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4389 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4390 AssertRCSuccessReturn(rc, rc);
4391
4392 /* Inform PGM. */
4393 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
4394 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4395 AssertRCReturn(rc, rc);
4396 /* ignore informational status codes */
4397
4398 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4399 }
4400
4401 /*
4402 * Switch LDTR for the new task.
4403 */
4404 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4405 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4406 else
4407 {
4408 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4409
4410 IEMSELDESC DescNewLdt;
4411 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4412 if (rcStrict != VINF_SUCCESS)
4413 {
4414 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4415 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4416 return rcStrict;
4417 }
4418 if ( !DescNewLdt.Legacy.Gen.u1Present
4419 || DescNewLdt.Legacy.Gen.u1DescType
4420 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4421 {
4422 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4423 uNewLdt, DescNewLdt.Legacy.u));
4424 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4425 }
4426
4427 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4428 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4429 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4430 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4431 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4432 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4433 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4434 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4435 }
4436
4437 IEMSELDESC DescSS;
4438 if (IEM_IS_V86_MODE(pVCpu))
4439 {
4440 pVCpu->iem.s.uCpl = 3;
4441 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4442 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4443 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4444 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4445 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4446 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4447
4448 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
4449 DescSS.Legacy.u = 0;
4450 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4451 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4452 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4453 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4454 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4455 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4456 DescSS.Legacy.Gen.u2Dpl = 3;
4457 }
4458 else
4459 {
4460 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
4461
4462 /*
4463 * Load the stack segment for the new task.
4464 */
4465 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4466 {
4467 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4468 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4469 }
4470
4471 /* Fetch the descriptor. */
4472 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4473 if (rcStrict != VINF_SUCCESS)
4474 {
4475 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4476 VBOXSTRICTRC_VAL(rcStrict)));
4477 return rcStrict;
4478 }
4479
4480 /* SS must be a data segment and writable. */
4481 if ( !DescSS.Legacy.Gen.u1DescType
4482 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4483 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4484 {
4485 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4486 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4487 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4488 }
4489
4490 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4491 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4492 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4493 {
4494 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4495 uNewCpl));
4496 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4497 }
4498
4499 /* Is it there? */
4500 if (!DescSS.Legacy.Gen.u1Present)
4501 {
4502 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4503 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4504 }
4505
4506 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4507 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4508
4509 /* Set the accessed bit before committing the result into SS. */
4510 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4511 {
4512 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4513 if (rcStrict != VINF_SUCCESS)
4514 return rcStrict;
4515 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4516 }
4517
4518 /* Commit SS. */
4519 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4520 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4521 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4522 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4523 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4524 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4525 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4526
4527 /* CPL has changed, update IEM before loading rest of segments. */
4528 pVCpu->iem.s.uCpl = uNewCpl;
4529
4530 /*
4531 * Load the data segments for the new task.
4532 */
4533 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4534 if (rcStrict != VINF_SUCCESS)
4535 return rcStrict;
4536 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4537 if (rcStrict != VINF_SUCCESS)
4538 return rcStrict;
4539 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4540 if (rcStrict != VINF_SUCCESS)
4541 return rcStrict;
4542 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4543 if (rcStrict != VINF_SUCCESS)
4544 return rcStrict;
4545
4546 /*
4547 * Load the code segment for the new task.
4548 */
4549 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4550 {
4551 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4552 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4553 }
4554
4555 /* Fetch the descriptor. */
4556 IEMSELDESC DescCS;
4557 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4558 if (rcStrict != VINF_SUCCESS)
4559 {
4560 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4561 return rcStrict;
4562 }
4563
4564 /* CS must be a code segment. */
4565 if ( !DescCS.Legacy.Gen.u1DescType
4566 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4567 {
4568 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4569 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4570 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4571 }
4572
4573 /* For conforming CS, DPL must be less than or equal to the RPL. */
4574 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4575 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4576 {
4577 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4578 DescCS.Legacy.Gen.u2Dpl));
4579 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4580 }
4581
4582 /* For non-conforming CS, DPL must match RPL. */
4583 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4584 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4585 {
4586 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4587 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4588 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4589 }
4590
4591 /* Is it there? */
4592 if (!DescCS.Legacy.Gen.u1Present)
4593 {
4594 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4595 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4596 }
4597
4598 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4599 u64Base = X86DESC_BASE(&DescCS.Legacy);
4600
4601 /* Set the accessed bit before committing the result into CS. */
4602 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4603 {
4604 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4605 if (rcStrict != VINF_SUCCESS)
4606 return rcStrict;
4607 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4608 }
4609
4610 /* Commit CS. */
4611 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4612 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4613 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4614 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4615 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4616 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4617 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4618 }
4619
4620 /** @todo Debug trap. */
4621 if (fIsNewTSS386 && fNewDebugTrap)
4622 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4623
4624 /*
4625 * Construct the error code masks based on what caused this task switch.
4626 * See Intel Instruction reference for INT.
4627 */
4628 uint16_t uExt;
4629 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4630 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4631 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4632 {
4633 uExt = 1;
4634 }
4635 else
4636 uExt = 0;
4637
4638 /*
4639 * Push any error code on to the new stack.
4640 */
4641 if (fFlags & IEM_XCPT_FLAGS_ERR)
4642 {
4643 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4644 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4645 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4646
4647 /* Check that there is sufficient space on the stack. */
4648 /** @todo Factor out segment limit checking for normal/expand down segments
4649 * into a separate function. */
4650 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4651 {
4652 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4653 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4654 {
4655 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4656 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4657 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4658 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4659 }
4660 }
4661 else
4662 {
4663 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4664 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4665 {
4666 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4667 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4668 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4669 }
4670 }
4671
4672
4673 if (fIsNewTSS386)
4674 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4675 else
4676 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4677 if (rcStrict != VINF_SUCCESS)
4678 {
4679 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4680 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4681 return rcStrict;
4682 }
4683 }
4684
4685 /* Check the new EIP against the new CS limit. */
4686 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4687 {
4688 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4689 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4690 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4691 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4692 }
4693
4694 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4695 pVCpu->cpum.GstCtx.ss.Sel));
4696 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4697}
4698
4699
4700/**
4701 * Implements exceptions and interrupts for protected mode.
4702 *
4703 * @returns VBox strict status code.
4704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4705 * @param cbInstr The number of bytes to offset rIP by in the return
4706 * address.
4707 * @param u8Vector The interrupt / exception vector number.
4708 * @param fFlags The flags.
4709 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4710 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4711 */
4712IEM_STATIC VBOXSTRICTRC
4713iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
4714 uint8_t cbInstr,
4715 uint8_t u8Vector,
4716 uint32_t fFlags,
4717 uint16_t uErr,
4718 uint64_t uCr2)
4719{
4720 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4721
4722 /*
4723 * Read the IDT entry.
4724 */
4725 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4726 {
4727 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4728 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4729 }
4730 X86DESC Idte;
4731 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4732 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4733 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4734 {
4735 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4736 return rcStrict;
4737 }
4738 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4739 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4740 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4741
4742 /*
4743 * Check the descriptor type, DPL and such.
4744 * ASSUMES this is done in the same order as described for call-gate calls.
4745 */
4746 if (Idte.Gate.u1DescType)
4747 {
4748 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4749 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4750 }
4751 bool fTaskGate = false;
4752 uint8_t f32BitGate = true;
4753 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4754 switch (Idte.Gate.u4Type)
4755 {
4756 case X86_SEL_TYPE_SYS_UNDEFINED:
4757 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4758 case X86_SEL_TYPE_SYS_LDT:
4759 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4760 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4761 case X86_SEL_TYPE_SYS_UNDEFINED2:
4762 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4763 case X86_SEL_TYPE_SYS_UNDEFINED3:
4764 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4765 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4766 case X86_SEL_TYPE_SYS_UNDEFINED4:
4767 {
4768 /** @todo check what actually happens when the type is wrong...
4769 * esp. call gates. */
4770 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4771 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4772 }
4773
4774 case X86_SEL_TYPE_SYS_286_INT_GATE:
4775 f32BitGate = false;
4776 RT_FALL_THRU();
4777 case X86_SEL_TYPE_SYS_386_INT_GATE:
4778 fEflToClear |= X86_EFL_IF;
4779 break;
4780
4781 case X86_SEL_TYPE_SYS_TASK_GATE:
4782 fTaskGate = true;
4783#ifndef IEM_IMPLEMENTS_TASKSWITCH
4784 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4785#endif
4786 break;
4787
4788 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4789 f32BitGate = false;
4790 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4791 break;
4792
4793 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4794 }
4795
4796 /* Check DPL against CPL if applicable. */
4797 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4798 {
4799 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4800 {
4801 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4802 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4803 }
4804 }
4805
4806 /* Is it there? */
4807 if (!Idte.Gate.u1Present)
4808 {
4809 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4810 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4811 }
4812
4813 /* Is it a task-gate? */
4814 if (fTaskGate)
4815 {
4816 /*
4817 * Construct the error code masks based on what caused this task switch.
4818 * See Intel Instruction reference for INT.
4819 */
4820 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4821 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4822 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4823 RTSEL SelTSS = Idte.Gate.u16Sel;
4824
4825 /*
4826 * Fetch the TSS descriptor in the GDT.
4827 */
4828 IEMSELDESC DescTSS;
4829 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4830 if (rcStrict != VINF_SUCCESS)
4831 {
4832 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4833 VBOXSTRICTRC_VAL(rcStrict)));
4834 return rcStrict;
4835 }
4836
4837 /* The TSS descriptor must be a system segment and be available (not busy). */
4838 if ( DescTSS.Legacy.Gen.u1DescType
4839 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4840 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4841 {
4842 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4843 u8Vector, SelTSS, DescTSS.Legacy.au64));
4844 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4845 }
4846
4847 /* The TSS must be present. */
4848 if (!DescTSS.Legacy.Gen.u1Present)
4849 {
4850 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4851 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4852 }
4853
4854 /* Do the actual task switch. */
4855 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4856 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4857 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4858 }
4859
4860 /* A null CS is bad. */
4861 RTSEL NewCS = Idte.Gate.u16Sel;
4862 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4863 {
4864 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4865 return iemRaiseGeneralProtectionFault0(pVCpu);
4866 }
4867
4868 /* Fetch the descriptor for the new CS. */
4869 IEMSELDESC DescCS;
4870 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4871 if (rcStrict != VINF_SUCCESS)
4872 {
4873 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4874 return rcStrict;
4875 }
4876
4877 /* Must be a code segment. */
4878 if (!DescCS.Legacy.Gen.u1DescType)
4879 {
4880 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4881 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4882 }
4883 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4884 {
4885 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4886 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4887 }
4888
4889 /* Don't allow lowering the privilege level. */
4890 /** @todo Does the lowering of privileges apply to software interrupts
4891 * only? This has bearings on the more-privileged or
4892 * same-privilege stack behavior further down. A testcase would
4893 * be nice. */
4894 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4895 {
4896 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4897 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4898 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4899 }
4900
4901 /* Make sure the selector is present. */
4902 if (!DescCS.Legacy.Gen.u1Present)
4903 {
4904 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4905 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4906 }
4907
4908 /* Check the new EIP against the new CS limit. */
4909 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4910 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4911 ? Idte.Gate.u16OffsetLow
4912 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4913 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4914 if (uNewEip > cbLimitCS)
4915 {
4916 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4917 u8Vector, uNewEip, cbLimitCS, NewCS));
4918 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4919 }
4920 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4921
4922 /* Calc the flag image to push. */
4923 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4924 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4925 fEfl &= ~X86_EFL_RF;
4926 else
4927 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4928
4929 /* From V8086 mode only go to CPL 0. */
4930 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4931 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4932 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4933 {
4934 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4935 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4936 }
4937
4938 /*
4939 * If the privilege level changes, we need to get a new stack from the TSS.
4940 * This in turns means validating the new SS and ESP...
4941 */
4942 if (uNewCpl != pVCpu->iem.s.uCpl)
4943 {
4944 RTSEL NewSS;
4945 uint32_t uNewEsp;
4946 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4947 if (rcStrict != VINF_SUCCESS)
4948 return rcStrict;
4949
4950 IEMSELDESC DescSS;
4951 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4952 if (rcStrict != VINF_SUCCESS)
4953 return rcStrict;
4954 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4955 if (!DescSS.Legacy.Gen.u1DefBig)
4956 {
4957 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4958 uNewEsp = (uint16_t)uNewEsp;
4959 }
4960
4961 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4962
4963 /* Check that there is sufficient space for the stack frame. */
4964 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4965 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4966 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4967 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4968
4969 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4970 {
4971 if ( uNewEsp - 1 > cbLimitSS
4972 || uNewEsp < cbStackFrame)
4973 {
4974 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4975 u8Vector, NewSS, uNewEsp, cbStackFrame));
4976 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4977 }
4978 }
4979 else
4980 {
4981 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4982 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4983 {
4984 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4985 u8Vector, NewSS, uNewEsp, cbStackFrame));
4986 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4987 }
4988 }
4989
4990 /*
4991 * Start making changes.
4992 */
4993
4994 /* Set the new CPL so that stack accesses use it. */
4995 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4996 pVCpu->iem.s.uCpl = uNewCpl;
4997
4998 /* Create the stack frame. */
4999 RTPTRUNION uStackFrame;
5000 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5001 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5002 if (rcStrict != VINF_SUCCESS)
5003 return rcStrict;
5004 void * const pvStackFrame = uStackFrame.pv;
5005 if (f32BitGate)
5006 {
5007 if (fFlags & IEM_XCPT_FLAGS_ERR)
5008 *uStackFrame.pu32++ = uErr;
5009 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5010 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5011 uStackFrame.pu32[2] = fEfl;
5012 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5013 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5014 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5015 if (fEfl & X86_EFL_VM)
5016 {
5017 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5018 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5019 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5020 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5021 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5022 }
5023 }
5024 else
5025 {
5026 if (fFlags & IEM_XCPT_FLAGS_ERR)
5027 *uStackFrame.pu16++ = uErr;
5028 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5029 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5030 uStackFrame.pu16[2] = fEfl;
5031 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5032 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5033 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5034 if (fEfl & X86_EFL_VM)
5035 {
5036 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5037 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5038 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5039 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5040 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5041 }
5042 }
5043 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5044 if (rcStrict != VINF_SUCCESS)
5045 return rcStrict;
5046
5047 /* Mark the selectors 'accessed' (hope this is the correct time). */
5048 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5049 * after pushing the stack frame? (Write protect the gdt + stack to
5050 * find out.) */
5051 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5052 {
5053 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5054 if (rcStrict != VINF_SUCCESS)
5055 return rcStrict;
5056 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5057 }
5058
5059 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5060 {
5061 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5062 if (rcStrict != VINF_SUCCESS)
5063 return rcStrict;
5064 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5065 }
5066
5067 /*
5068 * Start comitting the register changes (joins with the DPL=CPL branch).
5069 */
5070 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5071 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5072 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5073 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5074 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5075 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5076 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5077 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5078 * SP is loaded).
5079 * Need to check the other combinations too:
5080 * - 16-bit TSS, 32-bit handler
5081 * - 32-bit TSS, 16-bit handler */
5082 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5083 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5084 else
5085 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5086
5087 if (fEfl & X86_EFL_VM)
5088 {
5089 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5090 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5091 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5092 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5093 }
5094 }
5095 /*
5096 * Same privilege, no stack change and smaller stack frame.
5097 */
5098 else
5099 {
5100 uint64_t uNewRsp;
5101 RTPTRUNION uStackFrame;
5102 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5103 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5104 if (rcStrict != VINF_SUCCESS)
5105 return rcStrict;
5106 void * const pvStackFrame = uStackFrame.pv;
5107
5108 if (f32BitGate)
5109 {
5110 if (fFlags & IEM_XCPT_FLAGS_ERR)
5111 *uStackFrame.pu32++ = uErr;
5112 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5113 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5114 uStackFrame.pu32[2] = fEfl;
5115 }
5116 else
5117 {
5118 if (fFlags & IEM_XCPT_FLAGS_ERR)
5119 *uStackFrame.pu16++ = uErr;
5120 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5121 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5122 uStackFrame.pu16[2] = fEfl;
5123 }
5124 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5125 if (rcStrict != VINF_SUCCESS)
5126 return rcStrict;
5127
5128 /* Mark the CS selector as 'accessed'. */
5129 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5130 {
5131 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5132 if (rcStrict != VINF_SUCCESS)
5133 return rcStrict;
5134 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5135 }
5136
5137 /*
5138 * Start committing the register changes (joins with the other branch).
5139 */
5140 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5141 }
5142
5143 /* ... register committing continues. */
5144 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5145 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5146 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5147 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5148 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5149 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5150
5151 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5152 fEfl &= ~fEflToClear;
5153 IEMMISC_SET_EFL(pVCpu, fEfl);
5154
5155 if (fFlags & IEM_XCPT_FLAGS_CR2)
5156 pVCpu->cpum.GstCtx.cr2 = uCr2;
5157
5158 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5159 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5160
5161 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5162}
5163
5164
5165/**
5166 * Implements exceptions and interrupts for long mode.
5167 *
5168 * @returns VBox strict status code.
5169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5170 * @param cbInstr The number of bytes to offset rIP by in the return
5171 * address.
5172 * @param u8Vector The interrupt / exception vector number.
5173 * @param fFlags The flags.
5174 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5175 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5176 */
5177IEM_STATIC VBOXSTRICTRC
5178iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5179 uint8_t cbInstr,
5180 uint8_t u8Vector,
5181 uint32_t fFlags,
5182 uint16_t uErr,
5183 uint64_t uCr2)
5184{
5185 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5186
5187 /*
5188 * Read the IDT entry.
5189 */
5190 uint16_t offIdt = (uint16_t)u8Vector << 4;
5191 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5192 {
5193 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5194 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5195 }
5196 X86DESC64 Idte;
5197#ifdef _MSC_VER /* Shut up silly compiler warning. */
5198 Idte.au64[0] = 0;
5199 Idte.au64[1] = 0;
5200#endif
5201 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5202 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5203 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5204 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5205 {
5206 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5207 return rcStrict;
5208 }
5209 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5210 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5211 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5212
5213 /*
5214 * Check the descriptor type, DPL and such.
5215 * ASSUMES this is done in the same order as described for call-gate calls.
5216 */
5217 if (Idte.Gate.u1DescType)
5218 {
5219 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5220 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5221 }
5222 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5223 switch (Idte.Gate.u4Type)
5224 {
5225 case AMD64_SEL_TYPE_SYS_INT_GATE:
5226 fEflToClear |= X86_EFL_IF;
5227 break;
5228 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5229 break;
5230
5231 default:
5232 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5233 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5234 }
5235
5236 /* Check DPL against CPL if applicable. */
5237 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5238 {
5239 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5240 {
5241 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5242 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5243 }
5244 }
5245
5246 /* Is it there? */
5247 if (!Idte.Gate.u1Present)
5248 {
5249 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5250 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5251 }
5252
5253 /* A null CS is bad. */
5254 RTSEL NewCS = Idte.Gate.u16Sel;
5255 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5256 {
5257 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5258 return iemRaiseGeneralProtectionFault0(pVCpu);
5259 }
5260
5261 /* Fetch the descriptor for the new CS. */
5262 IEMSELDESC DescCS;
5263 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5264 if (rcStrict != VINF_SUCCESS)
5265 {
5266 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5267 return rcStrict;
5268 }
5269
5270 /* Must be a 64-bit code segment. */
5271 if (!DescCS.Long.Gen.u1DescType)
5272 {
5273 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5274 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5275 }
5276 if ( !DescCS.Long.Gen.u1Long
5277 || DescCS.Long.Gen.u1DefBig
5278 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5279 {
5280 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5281 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5282 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5283 }
5284
5285 /* Don't allow lowering the privilege level. For non-conforming CS
5286 selectors, the CS.DPL sets the privilege level the trap/interrupt
5287 handler runs at. For conforming CS selectors, the CPL remains
5288 unchanged, but the CS.DPL must be <= CPL. */
5289 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5290 * when CPU in Ring-0. Result \#GP? */
5291 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5292 {
5293 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5294 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5295 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5296 }
5297
5298
5299 /* Make sure the selector is present. */
5300 if (!DescCS.Legacy.Gen.u1Present)
5301 {
5302 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5303 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5304 }
5305
5306 /* Check that the new RIP is canonical. */
5307 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5308 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5309 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5310 if (!IEM_IS_CANONICAL(uNewRip))
5311 {
5312 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5313 return iemRaiseGeneralProtectionFault0(pVCpu);
5314 }
5315
5316 /*
5317 * If the privilege level changes or if the IST isn't zero, we need to get
5318 * a new stack from the TSS.
5319 */
5320 uint64_t uNewRsp;
5321 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5322 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5323 if ( uNewCpl != pVCpu->iem.s.uCpl
5324 || Idte.Gate.u3IST != 0)
5325 {
5326 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5327 if (rcStrict != VINF_SUCCESS)
5328 return rcStrict;
5329 }
5330 else
5331 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5332 uNewRsp &= ~(uint64_t)0xf;
5333
5334 /*
5335 * Calc the flag image to push.
5336 */
5337 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5338 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5339 fEfl &= ~X86_EFL_RF;
5340 else
5341 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5342
5343 /*
5344 * Start making changes.
5345 */
5346 /* Set the new CPL so that stack accesses use it. */
5347 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5348 pVCpu->iem.s.uCpl = uNewCpl;
5349
5350 /* Create the stack frame. */
5351 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5352 RTPTRUNION uStackFrame;
5353 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5354 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5355 if (rcStrict != VINF_SUCCESS)
5356 return rcStrict;
5357 void * const pvStackFrame = uStackFrame.pv;
5358
5359 if (fFlags & IEM_XCPT_FLAGS_ERR)
5360 *uStackFrame.pu64++ = uErr;
5361 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5362 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5363 uStackFrame.pu64[2] = fEfl;
5364 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5365 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5366 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5367 if (rcStrict != VINF_SUCCESS)
5368 return rcStrict;
5369
5370 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5371 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5372 * after pushing the stack frame? (Write protect the gdt + stack to
5373 * find out.) */
5374 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5375 {
5376 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5377 if (rcStrict != VINF_SUCCESS)
5378 return rcStrict;
5379 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5380 }
5381
5382 /*
5383 * Start comitting the register changes.
5384 */
5385 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5386 * hidden registers when interrupting 32-bit or 16-bit code! */
5387 if (uNewCpl != uOldCpl)
5388 {
5389 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5390 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5391 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5392 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5393 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5394 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5395 }
5396 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5397 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5398 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5399 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5400 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5401 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5402 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5403 pVCpu->cpum.GstCtx.rip = uNewRip;
5404
5405 fEfl &= ~fEflToClear;
5406 IEMMISC_SET_EFL(pVCpu, fEfl);
5407
5408 if (fFlags & IEM_XCPT_FLAGS_CR2)
5409 pVCpu->cpum.GstCtx.cr2 = uCr2;
5410
5411 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5412 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5413
5414 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5415}
5416
5417
5418/**
5419 * Implements exceptions and interrupts.
5420 *
5421 * All exceptions and interrupts goes thru this function!
5422 *
5423 * @returns VBox strict status code.
5424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5425 * @param cbInstr The number of bytes to offset rIP by in the return
5426 * address.
5427 * @param u8Vector The interrupt / exception vector number.
5428 * @param fFlags The flags.
5429 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5430 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5431 */
5432DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5433iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5434 uint8_t cbInstr,
5435 uint8_t u8Vector,
5436 uint32_t fFlags,
5437 uint16_t uErr,
5438 uint64_t uCr2)
5439{
5440 /*
5441 * Get all the state that we might need here.
5442 */
5443 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5444 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5445
5446#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5447 /*
5448 * Flush prefetch buffer
5449 */
5450 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5451#endif
5452
5453 /*
5454 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5455 */
5456 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5457 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5458 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5459 | IEM_XCPT_FLAGS_BP_INSTR
5460 | IEM_XCPT_FLAGS_ICEBP_INSTR
5461 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5462 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5463 {
5464 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5465 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5466 u8Vector = X86_XCPT_GP;
5467 uErr = 0;
5468 }
5469#ifdef DBGFTRACE_ENABLED
5470 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5471 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5472 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5473#endif
5474
5475 /*
5476 * Evaluate whether NMI blocking should be in effect.
5477 * Normally, NMI blocking is in effect whenever we inject an NMI.
5478 */
5479 bool fBlockNmi;
5480 if ( u8Vector == X86_XCPT_NMI
5481 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5482 fBlockNmi = true;
5483 else
5484 fBlockNmi = false;
5485
5486#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5487 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5488 {
5489 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5490 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5491 return rcStrict0;
5492
5493 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5494 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5495 {
5496 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5497 fBlockNmi = false;
5498 }
5499 }
5500#endif
5501
5502#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5503 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5504 {
5505 /*
5506 * If the event is being injected as part of VMRUN, it isn't subject to event
5507 * intercepts in the nested-guest. However, secondary exceptions that occur
5508 * during injection of any event -are- subject to exception intercepts.
5509 *
5510 * See AMD spec. 15.20 "Event Injection".
5511 */
5512 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5513 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5514 else
5515 {
5516 /*
5517 * Check and handle if the event being raised is intercepted.
5518 */
5519 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5520 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5521 return rcStrict0;
5522 }
5523 }
5524#endif
5525
5526 /*
5527 * Set NMI blocking if necessary.
5528 */
5529 if ( fBlockNmi
5530 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5531 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5532
5533 /*
5534 * Do recursion accounting.
5535 */
5536 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5537 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5538 if (pVCpu->iem.s.cXcptRecursions == 0)
5539 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5540 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5541 else
5542 {
5543 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5544 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5545 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5546
5547 if (pVCpu->iem.s.cXcptRecursions >= 4)
5548 {
5549#ifdef DEBUG_bird
5550 AssertFailed();
5551#endif
5552 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5553 }
5554
5555 /*
5556 * Evaluate the sequence of recurring events.
5557 */
5558 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5559 NULL /* pXcptRaiseInfo */);
5560 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5561 { /* likely */ }
5562 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5563 {
5564 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5565 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5566 u8Vector = X86_XCPT_DF;
5567 uErr = 0;
5568#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5569 /* VMX nested-guest #DF intercept needs to be checked here. */
5570 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5571 {
5572 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5573 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5574 return rcStrict0;
5575 }
5576#endif
5577 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5578 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5579 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5580 }
5581 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5582 {
5583 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5584 return iemInitiateCpuShutdown(pVCpu);
5585 }
5586 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5587 {
5588 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5589 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5590 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5591 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5592 return VERR_EM_GUEST_CPU_HANG;
5593 }
5594 else
5595 {
5596 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5597 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5598 return VERR_IEM_IPE_9;
5599 }
5600
5601 /*
5602 * The 'EXT' bit is set when an exception occurs during deliver of an external
5603 * event (such as an interrupt or earlier exception)[1]. Privileged software
5604 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5605 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5606 *
5607 * [1] - Intel spec. 6.13 "Error Code"
5608 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5609 * [3] - Intel Instruction reference for INT n.
5610 */
5611 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5612 && (fFlags & IEM_XCPT_FLAGS_ERR)
5613 && u8Vector != X86_XCPT_PF
5614 && u8Vector != X86_XCPT_DF)
5615 {
5616 uErr |= X86_TRAP_ERR_EXTERNAL;
5617 }
5618 }
5619
5620 pVCpu->iem.s.cXcptRecursions++;
5621 pVCpu->iem.s.uCurXcpt = u8Vector;
5622 pVCpu->iem.s.fCurXcpt = fFlags;
5623 pVCpu->iem.s.uCurXcptErr = uErr;
5624 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5625
5626 /*
5627 * Extensive logging.
5628 */
5629#if defined(LOG_ENABLED) && defined(IN_RING3)
5630 if (LogIs3Enabled())
5631 {
5632 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5633 PVM pVM = pVCpu->CTX_SUFF(pVM);
5634 char szRegs[4096];
5635 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5636 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5637 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5638 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5639 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5640 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5641 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5642 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5643 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5644 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5645 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5646 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5647 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5648 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5649 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5650 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5651 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5652 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5653 " efer=%016VR{efer}\n"
5654 " pat=%016VR{pat}\n"
5655 " sf_mask=%016VR{sf_mask}\n"
5656 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5657 " lstar=%016VR{lstar}\n"
5658 " star=%016VR{star} cstar=%016VR{cstar}\n"
5659 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5660 );
5661
5662 char szInstr[256];
5663 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5664 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5665 szInstr, sizeof(szInstr), NULL);
5666 Log3(("%s%s\n", szRegs, szInstr));
5667 }
5668#endif /* LOG_ENABLED */
5669
5670 /*
5671 * Call the mode specific worker function.
5672 */
5673 VBOXSTRICTRC rcStrict;
5674 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5675 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5676 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5677 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5678 else
5679 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5680
5681 /* Flush the prefetch buffer. */
5682#ifdef IEM_WITH_CODE_TLB
5683 pVCpu->iem.s.pbInstrBuf = NULL;
5684#else
5685 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5686#endif
5687
5688 /*
5689 * Unwind.
5690 */
5691 pVCpu->iem.s.cXcptRecursions--;
5692 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5693 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5694 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5695 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5696 pVCpu->iem.s.cXcptRecursions + 1));
5697 return rcStrict;
5698}
5699
5700#ifdef IEM_WITH_SETJMP
5701/**
5702 * See iemRaiseXcptOrInt. Will not return.
5703 */
5704IEM_STATIC DECL_NO_RETURN(void)
5705iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
5706 uint8_t cbInstr,
5707 uint8_t u8Vector,
5708 uint32_t fFlags,
5709 uint16_t uErr,
5710 uint64_t uCr2)
5711{
5712 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5713 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5714}
5715#endif
5716
5717
5718/** \#DE - 00. */
5719DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
5720{
5721 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5722}
5723
5724
5725/** \#DB - 01.
5726 * @note This automatically clear DR7.GD. */
5727DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
5728{
5729 /** @todo set/clear RF. */
5730 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5731 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5732}
5733
5734
5735/** \#BR - 05. */
5736DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
5737{
5738 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5739}
5740
5741
5742/** \#UD - 06. */
5743DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
5744{
5745 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5746}
5747
5748
5749/** \#NM - 07. */
5750DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
5751{
5752 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5753}
5754
5755
5756/** \#TS(err) - 0a. */
5757DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5758{
5759 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5760}
5761
5762
5763/** \#TS(tr) - 0a. */
5764DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
5765{
5766 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5767 pVCpu->cpum.GstCtx.tr.Sel, 0);
5768}
5769
5770
5771/** \#TS(0) - 0a. */
5772DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
5773{
5774 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5775 0, 0);
5776}
5777
5778
5779/** \#TS(err) - 0a. */
5780DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5781{
5782 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5783 uSel & X86_SEL_MASK_OFF_RPL, 0);
5784}
5785
5786
5787/** \#NP(err) - 0b. */
5788DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5789{
5790 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5791}
5792
5793
5794/** \#NP(sel) - 0b. */
5795DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5796{
5797 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5798 uSel & ~X86_SEL_RPL, 0);
5799}
5800
5801
5802/** \#SS(seg) - 0c. */
5803DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5804{
5805 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5806 uSel & ~X86_SEL_RPL, 0);
5807}
5808
5809
5810/** \#SS(err) - 0c. */
5811DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5812{
5813 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5814}
5815
5816
5817/** \#GP(n) - 0d. */
5818DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
5819{
5820 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5821}
5822
5823
5824/** \#GP(0) - 0d. */
5825DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
5826{
5827 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5828}
5829
5830#ifdef IEM_WITH_SETJMP
5831/** \#GP(0) - 0d. */
5832DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
5833{
5834 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5835}
5836#endif
5837
5838
5839/** \#GP(sel) - 0d. */
5840DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5841{
5842 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5843 Sel & ~X86_SEL_RPL, 0);
5844}
5845
5846
5847/** \#GP(0) - 0d. */
5848DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
5849{
5850 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5851}
5852
5853
5854/** \#GP(sel) - 0d. */
5855DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5856{
5857 NOREF(iSegReg); NOREF(fAccess);
5858 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5859 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5860}
5861
5862#ifdef IEM_WITH_SETJMP
5863/** \#GP(sel) - 0d, longjmp. */
5864DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5865{
5866 NOREF(iSegReg); NOREF(fAccess);
5867 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5868 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5869}
5870#endif
5871
5872/** \#GP(sel) - 0d. */
5873DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5874{
5875 NOREF(Sel);
5876 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5877}
5878
5879#ifdef IEM_WITH_SETJMP
5880/** \#GP(sel) - 0d, longjmp. */
5881DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
5882{
5883 NOREF(Sel);
5884 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5885}
5886#endif
5887
5888
5889/** \#GP(sel) - 0d. */
5890DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5891{
5892 NOREF(iSegReg); NOREF(fAccess);
5893 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5894}
5895
5896#ifdef IEM_WITH_SETJMP
5897/** \#GP(sel) - 0d, longjmp. */
5898DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
5899 uint32_t fAccess)
5900{
5901 NOREF(iSegReg); NOREF(fAccess);
5902 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5903}
5904#endif
5905
5906
5907/** \#PF(n) - 0e. */
5908DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5909{
5910 uint16_t uErr;
5911 switch (rc)
5912 {
5913 case VERR_PAGE_NOT_PRESENT:
5914 case VERR_PAGE_TABLE_NOT_PRESENT:
5915 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5916 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5917 uErr = 0;
5918 break;
5919
5920 default:
5921 AssertMsgFailed(("%Rrc\n", rc));
5922 RT_FALL_THRU();
5923 case VERR_ACCESS_DENIED:
5924 uErr = X86_TRAP_PF_P;
5925 break;
5926
5927 /** @todo reserved */
5928 }
5929
5930 if (pVCpu->iem.s.uCpl == 3)
5931 uErr |= X86_TRAP_PF_US;
5932
5933 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5934 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5935 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5936 uErr |= X86_TRAP_PF_ID;
5937
5938#if 0 /* This is so much non-sense, really. Why was it done like that? */
5939 /* Note! RW access callers reporting a WRITE protection fault, will clear
5940 the READ flag before calling. So, read-modify-write accesses (RW)
5941 can safely be reported as READ faults. */
5942 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5943 uErr |= X86_TRAP_PF_RW;
5944#else
5945 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5946 {
5947 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
5948 /// (regardless of outcome of the comparison in the latter case).
5949 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
5950 uErr |= X86_TRAP_PF_RW;
5951 }
5952#endif
5953
5954 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5955 uErr, GCPtrWhere);
5956}
5957
5958#ifdef IEM_WITH_SETJMP
5959/** \#PF(n) - 0e, longjmp. */
5960IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5961{
5962 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5963}
5964#endif
5965
5966
5967/** \#MF(0) - 10. */
5968DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
5969{
5970 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5971}
5972
5973
5974/** \#AC(0) - 11. */
5975DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
5976{
5977 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5978}
5979
5980
5981/**
5982 * Macro for calling iemCImplRaiseDivideError().
5983 *
5984 * This enables us to add/remove arguments and force different levels of
5985 * inlining as we wish.
5986 *
5987 * @return Strict VBox status code.
5988 */
5989#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5990IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5991{
5992 NOREF(cbInstr);
5993 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5994}
5995
5996
5997/**
5998 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5999 *
6000 * This enables us to add/remove arguments and force different levels of
6001 * inlining as we wish.
6002 *
6003 * @return Strict VBox status code.
6004 */
6005#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6006IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6007{
6008 NOREF(cbInstr);
6009 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6010}
6011
6012
6013/**
6014 * Macro for calling iemCImplRaiseInvalidOpcode().
6015 *
6016 * This enables us to add/remove arguments and force different levels of
6017 * inlining as we wish.
6018 *
6019 * @return Strict VBox status code.
6020 */
6021#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6022IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6023{
6024 NOREF(cbInstr);
6025 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6026}
6027
6028
6029/** @} */
6030
6031
6032/*
6033 *
6034 * Helpers routines.
6035 * Helpers routines.
6036 * Helpers routines.
6037 *
6038 */
6039
6040/**
6041 * Recalculates the effective operand size.
6042 *
6043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6044 */
6045IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
6046{
6047 switch (pVCpu->iem.s.enmCpuMode)
6048 {
6049 case IEMMODE_16BIT:
6050 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6051 break;
6052 case IEMMODE_32BIT:
6053 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6054 break;
6055 case IEMMODE_64BIT:
6056 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6057 {
6058 case 0:
6059 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6060 break;
6061 case IEM_OP_PRF_SIZE_OP:
6062 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6063 break;
6064 case IEM_OP_PRF_SIZE_REX_W:
6065 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6066 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6067 break;
6068 }
6069 break;
6070 default:
6071 AssertFailed();
6072 }
6073}
6074
6075
6076/**
6077 * Sets the default operand size to 64-bit and recalculates the effective
6078 * operand size.
6079 *
6080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6081 */
6082IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6083{
6084 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6085 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6086 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6087 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6088 else
6089 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6090}
6091
6092
6093/*
6094 *
6095 * Common opcode decoders.
6096 * Common opcode decoders.
6097 * Common opcode decoders.
6098 *
6099 */
6100//#include <iprt/mem.h>
6101
6102/**
6103 * Used to add extra details about a stub case.
6104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6105 */
6106IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6107{
6108#if defined(LOG_ENABLED) && defined(IN_RING3)
6109 PVM pVM = pVCpu->CTX_SUFF(pVM);
6110 char szRegs[4096];
6111 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6112 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6113 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6114 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6115 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6116 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6117 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6118 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6119 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6120 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6121 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6122 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6123 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6124 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6125 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6126 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6127 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6128 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6129 " efer=%016VR{efer}\n"
6130 " pat=%016VR{pat}\n"
6131 " sf_mask=%016VR{sf_mask}\n"
6132 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6133 " lstar=%016VR{lstar}\n"
6134 " star=%016VR{star} cstar=%016VR{cstar}\n"
6135 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6136 );
6137
6138 char szInstr[256];
6139 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6140 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6141 szInstr, sizeof(szInstr), NULL);
6142
6143 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6144#else
6145 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6146#endif
6147}
6148
6149/**
6150 * Complains about a stub.
6151 *
6152 * Providing two versions of this macro, one for daily use and one for use when
6153 * working on IEM.
6154 */
6155#if 0
6156# define IEMOP_BITCH_ABOUT_STUB() \
6157 do { \
6158 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6159 iemOpStubMsg2(pVCpu); \
6160 RTAssertPanic(); \
6161 } while (0)
6162#else
6163# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6164#endif
6165
6166/** Stubs an opcode. */
6167#define FNIEMOP_STUB(a_Name) \
6168 FNIEMOP_DEF(a_Name) \
6169 { \
6170 RT_NOREF_PV(pVCpu); \
6171 IEMOP_BITCH_ABOUT_STUB(); \
6172 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6173 } \
6174 typedef int ignore_semicolon
6175
6176/** Stubs an opcode. */
6177#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6178 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6179 { \
6180 RT_NOREF_PV(pVCpu); \
6181 RT_NOREF_PV(a_Name0); \
6182 IEMOP_BITCH_ABOUT_STUB(); \
6183 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6184 } \
6185 typedef int ignore_semicolon
6186
6187/** Stubs an opcode which currently should raise \#UD. */
6188#define FNIEMOP_UD_STUB(a_Name) \
6189 FNIEMOP_DEF(a_Name) \
6190 { \
6191 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6192 return IEMOP_RAISE_INVALID_OPCODE(); \
6193 } \
6194 typedef int ignore_semicolon
6195
6196/** Stubs an opcode which currently should raise \#UD. */
6197#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6198 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6199 { \
6200 RT_NOREF_PV(pVCpu); \
6201 RT_NOREF_PV(a_Name0); \
6202 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6203 return IEMOP_RAISE_INVALID_OPCODE(); \
6204 } \
6205 typedef int ignore_semicolon
6206
6207
6208
6209/** @name Register Access.
6210 * @{
6211 */
6212
6213/**
6214 * Gets a reference (pointer) to the specified hidden segment register.
6215 *
6216 * @returns Hidden register reference.
6217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6218 * @param iSegReg The segment register.
6219 */
6220IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6221{
6222 Assert(iSegReg < X86_SREG_COUNT);
6223 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6224 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6225
6226 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6227 return pSReg;
6228}
6229
6230
6231/**
6232 * Ensures that the given hidden segment register is up to date.
6233 *
6234 * @returns Hidden register reference.
6235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6236 * @param pSReg The segment register.
6237 */
6238IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6239{
6240 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6241 NOREF(pVCpu);
6242 return pSReg;
6243}
6244
6245
6246/**
6247 * Gets a reference (pointer) to the specified segment register (the selector
6248 * value).
6249 *
6250 * @returns Pointer to the selector variable.
6251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6252 * @param iSegReg The segment register.
6253 */
6254DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6255{
6256 Assert(iSegReg < X86_SREG_COUNT);
6257 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6258 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6259}
6260
6261
6262/**
6263 * Fetches the selector value of a segment register.
6264 *
6265 * @returns The selector value.
6266 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6267 * @param iSegReg The segment register.
6268 */
6269DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6270{
6271 Assert(iSegReg < X86_SREG_COUNT);
6272 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6273 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6274}
6275
6276
6277/**
6278 * Fetches the base address value of a segment register.
6279 *
6280 * @returns The selector value.
6281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6282 * @param iSegReg The segment register.
6283 */
6284DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6285{
6286 Assert(iSegReg < X86_SREG_COUNT);
6287 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6288 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6289}
6290
6291
6292/**
6293 * Gets a reference (pointer) to the specified general purpose register.
6294 *
6295 * @returns Register reference.
6296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6297 * @param iReg The general purpose register.
6298 */
6299DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6300{
6301 Assert(iReg < 16);
6302 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6303}
6304
6305
6306/**
6307 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6308 *
6309 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6310 *
6311 * @returns Register reference.
6312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6313 * @param iReg The register.
6314 */
6315DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6316{
6317 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6318 {
6319 Assert(iReg < 16);
6320 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6321 }
6322 /* high 8-bit register. */
6323 Assert(iReg < 8);
6324 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6325}
6326
6327
6328/**
6329 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6330 *
6331 * @returns Register reference.
6332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6333 * @param iReg The register.
6334 */
6335DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6336{
6337 Assert(iReg < 16);
6338 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6339}
6340
6341
6342/**
6343 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6344 *
6345 * @returns Register reference.
6346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6347 * @param iReg The register.
6348 */
6349DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6350{
6351 Assert(iReg < 16);
6352 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6353}
6354
6355
6356/**
6357 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6358 *
6359 * @returns Register reference.
6360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6361 * @param iReg The register.
6362 */
6363DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6364{
6365 Assert(iReg < 64);
6366 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6367}
6368
6369
6370/**
6371 * Gets a reference (pointer) to the specified segment register's base address.
6372 *
6373 * @returns Segment register base address reference.
6374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6375 * @param iSegReg The segment selector.
6376 */
6377DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6378{
6379 Assert(iSegReg < X86_SREG_COUNT);
6380 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6381 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6382}
6383
6384
6385/**
6386 * Fetches the value of a 8-bit general purpose register.
6387 *
6388 * @returns The register value.
6389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6390 * @param iReg The register.
6391 */
6392DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6393{
6394 return *iemGRegRefU8(pVCpu, iReg);
6395}
6396
6397
6398/**
6399 * Fetches the value of a 16-bit general purpose register.
6400 *
6401 * @returns The register value.
6402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6403 * @param iReg The register.
6404 */
6405DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6406{
6407 Assert(iReg < 16);
6408 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6409}
6410
6411
6412/**
6413 * Fetches the value of a 32-bit general purpose register.
6414 *
6415 * @returns The register value.
6416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6417 * @param iReg The register.
6418 */
6419DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6420{
6421 Assert(iReg < 16);
6422 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6423}
6424
6425
6426/**
6427 * Fetches the value of a 64-bit general purpose register.
6428 *
6429 * @returns The register value.
6430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6431 * @param iReg The register.
6432 */
6433DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6434{
6435 Assert(iReg < 16);
6436 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6437}
6438
6439
6440/**
6441 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6442 *
6443 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6444 * segment limit.
6445 *
6446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6447 * @param offNextInstr The offset of the next instruction.
6448 */
6449IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6450{
6451 switch (pVCpu->iem.s.enmEffOpSize)
6452 {
6453 case IEMMODE_16BIT:
6454 {
6455 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6456 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6457 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6458 return iemRaiseGeneralProtectionFault0(pVCpu);
6459 pVCpu->cpum.GstCtx.rip = uNewIp;
6460 break;
6461 }
6462
6463 case IEMMODE_32BIT:
6464 {
6465 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6466 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6467
6468 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6469 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6470 return iemRaiseGeneralProtectionFault0(pVCpu);
6471 pVCpu->cpum.GstCtx.rip = uNewEip;
6472 break;
6473 }
6474
6475 case IEMMODE_64BIT:
6476 {
6477 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6478
6479 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6480 if (!IEM_IS_CANONICAL(uNewRip))
6481 return iemRaiseGeneralProtectionFault0(pVCpu);
6482 pVCpu->cpum.GstCtx.rip = uNewRip;
6483 break;
6484 }
6485
6486 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6487 }
6488
6489 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6490
6491#ifndef IEM_WITH_CODE_TLB
6492 /* Flush the prefetch buffer. */
6493 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6494#endif
6495
6496 return VINF_SUCCESS;
6497}
6498
6499
6500/**
6501 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6502 *
6503 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6504 * segment limit.
6505 *
6506 * @returns Strict VBox status code.
6507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6508 * @param offNextInstr The offset of the next instruction.
6509 */
6510IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
6511{
6512 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6513
6514 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6515 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6516 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6517 return iemRaiseGeneralProtectionFault0(pVCpu);
6518 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6519 pVCpu->cpum.GstCtx.rip = uNewIp;
6520 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6521
6522#ifndef IEM_WITH_CODE_TLB
6523 /* Flush the prefetch buffer. */
6524 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6525#endif
6526
6527 return VINF_SUCCESS;
6528}
6529
6530
6531/**
6532 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6533 *
6534 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6535 * segment limit.
6536 *
6537 * @returns Strict VBox status code.
6538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6539 * @param offNextInstr The offset of the next instruction.
6540 */
6541IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
6542{
6543 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6544
6545 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6546 {
6547 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6548
6549 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6550 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6551 return iemRaiseGeneralProtectionFault0(pVCpu);
6552 pVCpu->cpum.GstCtx.rip = uNewEip;
6553 }
6554 else
6555 {
6556 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6557
6558 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6559 if (!IEM_IS_CANONICAL(uNewRip))
6560 return iemRaiseGeneralProtectionFault0(pVCpu);
6561 pVCpu->cpum.GstCtx.rip = uNewRip;
6562 }
6563 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6564
6565#ifndef IEM_WITH_CODE_TLB
6566 /* Flush the prefetch buffer. */
6567 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6568#endif
6569
6570 return VINF_SUCCESS;
6571}
6572
6573
6574/**
6575 * Performs a near jump to the specified address.
6576 *
6577 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6578 * segment limit.
6579 *
6580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6581 * @param uNewRip The new RIP value.
6582 */
6583IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
6584{
6585 switch (pVCpu->iem.s.enmEffOpSize)
6586 {
6587 case IEMMODE_16BIT:
6588 {
6589 Assert(uNewRip <= UINT16_MAX);
6590 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6591 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6592 return iemRaiseGeneralProtectionFault0(pVCpu);
6593 /** @todo Test 16-bit jump in 64-bit mode. */
6594 pVCpu->cpum.GstCtx.rip = uNewRip;
6595 break;
6596 }
6597
6598 case IEMMODE_32BIT:
6599 {
6600 Assert(uNewRip <= UINT32_MAX);
6601 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6602 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6603
6604 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6605 return iemRaiseGeneralProtectionFault0(pVCpu);
6606 pVCpu->cpum.GstCtx.rip = uNewRip;
6607 break;
6608 }
6609
6610 case IEMMODE_64BIT:
6611 {
6612 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6613
6614 if (!IEM_IS_CANONICAL(uNewRip))
6615 return iemRaiseGeneralProtectionFault0(pVCpu);
6616 pVCpu->cpum.GstCtx.rip = uNewRip;
6617 break;
6618 }
6619
6620 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6621 }
6622
6623 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6624
6625#ifndef IEM_WITH_CODE_TLB
6626 /* Flush the prefetch buffer. */
6627 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6628#endif
6629
6630 return VINF_SUCCESS;
6631}
6632
6633
6634/**
6635 * Get the address of the top of the stack.
6636 *
6637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6638 */
6639DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6640{
6641 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6642 return pVCpu->cpum.GstCtx.rsp;
6643 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6644 return pVCpu->cpum.GstCtx.esp;
6645 return pVCpu->cpum.GstCtx.sp;
6646}
6647
6648
6649/**
6650 * Updates the RIP/EIP/IP to point to the next instruction.
6651 *
6652 * This function leaves the EFLAGS.RF flag alone.
6653 *
6654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6655 * @param cbInstr The number of bytes to add.
6656 */
6657IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6658{
6659 switch (pVCpu->iem.s.enmCpuMode)
6660 {
6661 case IEMMODE_16BIT:
6662 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6663 pVCpu->cpum.GstCtx.eip += cbInstr;
6664 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6665 break;
6666
6667 case IEMMODE_32BIT:
6668 pVCpu->cpum.GstCtx.eip += cbInstr;
6669 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6670 break;
6671
6672 case IEMMODE_64BIT:
6673 pVCpu->cpum.GstCtx.rip += cbInstr;
6674 break;
6675 default: AssertFailed();
6676 }
6677}
6678
6679
6680#if 0
6681/**
6682 * Updates the RIP/EIP/IP to point to the next instruction.
6683 *
6684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6685 */
6686IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
6687{
6688 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6689}
6690#endif
6691
6692
6693
6694/**
6695 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6696 *
6697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6698 * @param cbInstr The number of bytes to add.
6699 */
6700IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6701{
6702 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6703
6704 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6705#if ARCH_BITS >= 64
6706 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6707 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6708 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6709#else
6710 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6711 pVCpu->cpum.GstCtx.rip += cbInstr;
6712 else
6713 pVCpu->cpum.GstCtx.eip += cbInstr;
6714#endif
6715}
6716
6717
6718/**
6719 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6720 *
6721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6722 */
6723IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
6724{
6725 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6726}
6727
6728
6729/**
6730 * Adds to the stack pointer.
6731 *
6732 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6733 * @param cbToAdd The number of bytes to add (8-bit!).
6734 */
6735DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
6736{
6737 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6738 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6739 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6740 pVCpu->cpum.GstCtx.esp += cbToAdd;
6741 else
6742 pVCpu->cpum.GstCtx.sp += cbToAdd;
6743}
6744
6745
6746/**
6747 * Subtracts from the stack pointer.
6748 *
6749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6750 * @param cbToSub The number of bytes to subtract (8-bit!).
6751 */
6752DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
6753{
6754 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6755 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6756 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6757 pVCpu->cpum.GstCtx.esp -= cbToSub;
6758 else
6759 pVCpu->cpum.GstCtx.sp -= cbToSub;
6760}
6761
6762
6763/**
6764 * Adds to the temporary stack pointer.
6765 *
6766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6767 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6768 * @param cbToAdd The number of bytes to add (16-bit).
6769 */
6770DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6771{
6772 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6773 pTmpRsp->u += cbToAdd;
6774 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6775 pTmpRsp->DWords.dw0 += cbToAdd;
6776 else
6777 pTmpRsp->Words.w0 += cbToAdd;
6778}
6779
6780
6781/**
6782 * Subtracts from the temporary stack pointer.
6783 *
6784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6785 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6786 * @param cbToSub The number of bytes to subtract.
6787 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6788 * expecting that.
6789 */
6790DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6791{
6792 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6793 pTmpRsp->u -= cbToSub;
6794 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6795 pTmpRsp->DWords.dw0 -= cbToSub;
6796 else
6797 pTmpRsp->Words.w0 -= cbToSub;
6798}
6799
6800
6801/**
6802 * Calculates the effective stack address for a push of the specified size as
6803 * well as the new RSP value (upper bits may be masked).
6804 *
6805 * @returns Effective stack addressf for the push.
6806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6807 * @param cbItem The size of the stack item to pop.
6808 * @param puNewRsp Where to return the new RSP value.
6809 */
6810DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6811{
6812 RTUINT64U uTmpRsp;
6813 RTGCPTR GCPtrTop;
6814 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6815
6816 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6817 GCPtrTop = uTmpRsp.u -= cbItem;
6818 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6819 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6820 else
6821 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6822 *puNewRsp = uTmpRsp.u;
6823 return GCPtrTop;
6824}
6825
6826
6827/**
6828 * Gets the current stack pointer and calculates the value after a pop of the
6829 * specified size.
6830 *
6831 * @returns Current stack pointer.
6832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6833 * @param cbItem The size of the stack item to pop.
6834 * @param puNewRsp Where to return the new RSP value.
6835 */
6836DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6837{
6838 RTUINT64U uTmpRsp;
6839 RTGCPTR GCPtrTop;
6840 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6841
6842 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6843 {
6844 GCPtrTop = uTmpRsp.u;
6845 uTmpRsp.u += cbItem;
6846 }
6847 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6848 {
6849 GCPtrTop = uTmpRsp.DWords.dw0;
6850 uTmpRsp.DWords.dw0 += cbItem;
6851 }
6852 else
6853 {
6854 GCPtrTop = uTmpRsp.Words.w0;
6855 uTmpRsp.Words.w0 += cbItem;
6856 }
6857 *puNewRsp = uTmpRsp.u;
6858 return GCPtrTop;
6859}
6860
6861
6862/**
6863 * Calculates the effective stack address for a push of the specified size as
6864 * well as the new temporary RSP value (upper bits may be masked).
6865 *
6866 * @returns Effective stack addressf for the push.
6867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6868 * @param pTmpRsp The temporary stack pointer. This is updated.
6869 * @param cbItem The size of the stack item to pop.
6870 */
6871DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6872{
6873 RTGCPTR GCPtrTop;
6874
6875 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6876 GCPtrTop = pTmpRsp->u -= cbItem;
6877 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6878 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6879 else
6880 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6881 return GCPtrTop;
6882}
6883
6884
6885/**
6886 * Gets the effective stack address for a pop of the specified size and
6887 * calculates and updates the temporary RSP.
6888 *
6889 * @returns Current stack pointer.
6890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6891 * @param pTmpRsp The temporary stack pointer. This is updated.
6892 * @param cbItem The size of the stack item to pop.
6893 */
6894DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6895{
6896 RTGCPTR GCPtrTop;
6897 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6898 {
6899 GCPtrTop = pTmpRsp->u;
6900 pTmpRsp->u += cbItem;
6901 }
6902 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6903 {
6904 GCPtrTop = pTmpRsp->DWords.dw0;
6905 pTmpRsp->DWords.dw0 += cbItem;
6906 }
6907 else
6908 {
6909 GCPtrTop = pTmpRsp->Words.w0;
6910 pTmpRsp->Words.w0 += cbItem;
6911 }
6912 return GCPtrTop;
6913}
6914
6915/** @} */
6916
6917
6918/** @name FPU access and helpers.
6919 *
6920 * @{
6921 */
6922
6923
6924/**
6925 * Hook for preparing to use the host FPU.
6926 *
6927 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6928 *
6929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6930 */
6931DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
6932{
6933#ifdef IN_RING3
6934 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6935#else
6936 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6937#endif
6938 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6939}
6940
6941
6942/**
6943 * Hook for preparing to use the host FPU for SSE.
6944 *
6945 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6946 *
6947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6948 */
6949DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
6950{
6951 iemFpuPrepareUsage(pVCpu);
6952}
6953
6954
6955/**
6956 * Hook for preparing to use the host FPU for AVX.
6957 *
6958 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6959 *
6960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6961 */
6962DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
6963{
6964 iemFpuPrepareUsage(pVCpu);
6965}
6966
6967
6968/**
6969 * Hook for actualizing the guest FPU state before the interpreter reads it.
6970 *
6971 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6972 *
6973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6974 */
6975DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
6976{
6977#ifdef IN_RING3
6978 NOREF(pVCpu);
6979#else
6980 CPUMRZFpuStateActualizeForRead(pVCpu);
6981#endif
6982 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6983}
6984
6985
6986/**
6987 * Hook for actualizing the guest FPU state before the interpreter changes it.
6988 *
6989 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6990 *
6991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6992 */
6993DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
6994{
6995#ifdef IN_RING3
6996 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6997#else
6998 CPUMRZFpuStateActualizeForChange(pVCpu);
6999#endif
7000 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7001}
7002
7003
7004/**
7005 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7006 * only.
7007 *
7008 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7009 *
7010 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7011 */
7012DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
7013{
7014#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7015 NOREF(pVCpu);
7016#else
7017 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7018#endif
7019 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7020}
7021
7022
7023/**
7024 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7025 * read+write.
7026 *
7027 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7028 *
7029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7030 */
7031DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
7032{
7033#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7034 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7035#else
7036 CPUMRZFpuStateActualizeForChange(pVCpu);
7037#endif
7038 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7039
7040 /* Make sure any changes are loaded the next time around. */
7041 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
7042}
7043
7044
7045/**
7046 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7047 * only.
7048 *
7049 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7050 *
7051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7052 */
7053DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7054{
7055#ifdef IN_RING3
7056 NOREF(pVCpu);
7057#else
7058 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7059#endif
7060 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7061}
7062
7063
7064/**
7065 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7066 * read+write.
7067 *
7068 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7069 *
7070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7071 */
7072DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7073{
7074#ifdef IN_RING3
7075 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7076#else
7077 CPUMRZFpuStateActualizeForChange(pVCpu);
7078#endif
7079 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7080
7081 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
7082 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
7083}
7084
7085
7086/**
7087 * Stores a QNaN value into a FPU register.
7088 *
7089 * @param pReg Pointer to the register.
7090 */
7091DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7092{
7093 pReg->au32[0] = UINT32_C(0x00000000);
7094 pReg->au32[1] = UINT32_C(0xc0000000);
7095 pReg->au16[4] = UINT16_C(0xffff);
7096}
7097
7098
7099/**
7100 * Updates the FOP, FPU.CS and FPUIP registers.
7101 *
7102 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7103 * @param pFpuCtx The FPU context.
7104 */
7105DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7106{
7107 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7108 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7109 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7110 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7111 {
7112 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7113 * happens in real mode here based on the fnsave and fnstenv images. */
7114 pFpuCtx->CS = 0;
7115 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7116 }
7117 else if (!IEM_IS_LONG_MODE(pVCpu))
7118 {
7119 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7120 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7121 }
7122 else
7123 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7124}
7125
7126
7127/**
7128 * Updates the x87.DS and FPUDP registers.
7129 *
7130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7131 * @param pFpuCtx The FPU context.
7132 * @param iEffSeg The effective segment register.
7133 * @param GCPtrEff The effective address relative to @a iEffSeg.
7134 */
7135DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7136{
7137 RTSEL sel;
7138 switch (iEffSeg)
7139 {
7140 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7141 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7142 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7143 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7144 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7145 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7146 default:
7147 AssertMsgFailed(("%d\n", iEffSeg));
7148 sel = pVCpu->cpum.GstCtx.ds.Sel;
7149 }
7150 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7151 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7152 {
7153 pFpuCtx->DS = 0;
7154 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7155 }
7156 else if (!IEM_IS_LONG_MODE(pVCpu))
7157 {
7158 pFpuCtx->DS = sel;
7159 pFpuCtx->FPUDP = GCPtrEff;
7160 }
7161 else
7162 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
7163}
7164
7165
7166/**
7167 * Rotates the stack registers in the push direction.
7168 *
7169 * @param pFpuCtx The FPU context.
7170 * @remarks This is a complete waste of time, but fxsave stores the registers in
7171 * stack order.
7172 */
7173DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7174{
7175 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7176 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7177 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7178 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7179 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7180 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7181 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7182 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7183 pFpuCtx->aRegs[0].r80 = r80Tmp;
7184}
7185
7186
7187/**
7188 * Rotates the stack registers in the pop direction.
7189 *
7190 * @param pFpuCtx The FPU context.
7191 * @remarks This is a complete waste of time, but fxsave stores the registers in
7192 * stack order.
7193 */
7194DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7195{
7196 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7197 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7198 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7199 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7200 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7201 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7202 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7203 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7204 pFpuCtx->aRegs[7].r80 = r80Tmp;
7205}
7206
7207
7208/**
7209 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7210 * exception prevents it.
7211 *
7212 * @param pResult The FPU operation result to push.
7213 * @param pFpuCtx The FPU context.
7214 */
7215IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7216{
7217 /* Update FSW and bail if there are pending exceptions afterwards. */
7218 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7219 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7220 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7221 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7222 {
7223 pFpuCtx->FSW = fFsw;
7224 return;
7225 }
7226
7227 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7228 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7229 {
7230 /* All is fine, push the actual value. */
7231 pFpuCtx->FTW |= RT_BIT(iNewTop);
7232 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7233 }
7234 else if (pFpuCtx->FCW & X86_FCW_IM)
7235 {
7236 /* Masked stack overflow, push QNaN. */
7237 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7238 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7239 }
7240 else
7241 {
7242 /* Raise stack overflow, don't push anything. */
7243 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7244 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7245 return;
7246 }
7247
7248 fFsw &= ~X86_FSW_TOP_MASK;
7249 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7250 pFpuCtx->FSW = fFsw;
7251
7252 iemFpuRotateStackPush(pFpuCtx);
7253}
7254
7255
7256/**
7257 * Stores a result in a FPU register and updates the FSW and FTW.
7258 *
7259 * @param pFpuCtx The FPU context.
7260 * @param pResult The result to store.
7261 * @param iStReg Which FPU register to store it in.
7262 */
7263IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7264{
7265 Assert(iStReg < 8);
7266 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7267 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7268 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7269 pFpuCtx->FTW |= RT_BIT(iReg);
7270 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7271}
7272
7273
7274/**
7275 * Only updates the FPU status word (FSW) with the result of the current
7276 * instruction.
7277 *
7278 * @param pFpuCtx The FPU context.
7279 * @param u16FSW The FSW output of the current instruction.
7280 */
7281IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7282{
7283 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7284 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7285}
7286
7287
7288/**
7289 * Pops one item off the FPU stack if no pending exception prevents it.
7290 *
7291 * @param pFpuCtx The FPU context.
7292 */
7293IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7294{
7295 /* Check pending exceptions. */
7296 uint16_t uFSW = pFpuCtx->FSW;
7297 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7298 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7299 return;
7300
7301 /* TOP--. */
7302 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7303 uFSW &= ~X86_FSW_TOP_MASK;
7304 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7305 pFpuCtx->FSW = uFSW;
7306
7307 /* Mark the previous ST0 as empty. */
7308 iOldTop >>= X86_FSW_TOP_SHIFT;
7309 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7310
7311 /* Rotate the registers. */
7312 iemFpuRotateStackPop(pFpuCtx);
7313}
7314
7315
7316/**
7317 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7318 *
7319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7320 * @param pResult The FPU operation result to push.
7321 */
7322IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7323{
7324 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7325 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7326 iemFpuMaybePushResult(pResult, pFpuCtx);
7327}
7328
7329
7330/**
7331 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7332 * and sets FPUDP and FPUDS.
7333 *
7334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7335 * @param pResult The FPU operation result to push.
7336 * @param iEffSeg The effective segment register.
7337 * @param GCPtrEff The effective address relative to @a iEffSeg.
7338 */
7339IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7340{
7341 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7342 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7343 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7344 iemFpuMaybePushResult(pResult, pFpuCtx);
7345}
7346
7347
7348/**
7349 * Replace ST0 with the first value and push the second onto the FPU stack,
7350 * unless a pending exception prevents it.
7351 *
7352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7353 * @param pResult The FPU operation result to store and push.
7354 */
7355IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7356{
7357 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7358 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7359
7360 /* Update FSW and bail if there are pending exceptions afterwards. */
7361 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7362 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7363 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7364 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7365 {
7366 pFpuCtx->FSW = fFsw;
7367 return;
7368 }
7369
7370 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7371 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7372 {
7373 /* All is fine, push the actual value. */
7374 pFpuCtx->FTW |= RT_BIT(iNewTop);
7375 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7376 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7377 }
7378 else if (pFpuCtx->FCW & X86_FCW_IM)
7379 {
7380 /* Masked stack overflow, push QNaN. */
7381 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7382 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7383 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7384 }
7385 else
7386 {
7387 /* Raise stack overflow, don't push anything. */
7388 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7389 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7390 return;
7391 }
7392
7393 fFsw &= ~X86_FSW_TOP_MASK;
7394 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7395 pFpuCtx->FSW = fFsw;
7396
7397 iemFpuRotateStackPush(pFpuCtx);
7398}
7399
7400
7401/**
7402 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7403 * FOP.
7404 *
7405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7406 * @param pResult The result to store.
7407 * @param iStReg Which FPU register to store it in.
7408 */
7409IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7410{
7411 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7412 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7413 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7414}
7415
7416
7417/**
7418 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7419 * FOP, and then pops the stack.
7420 *
7421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7422 * @param pResult The result to store.
7423 * @param iStReg Which FPU register to store it in.
7424 */
7425IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7426{
7427 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7428 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7429 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7430 iemFpuMaybePopOne(pFpuCtx);
7431}
7432
7433
7434/**
7435 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7436 * FPUDP, and FPUDS.
7437 *
7438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7439 * @param pResult The result to store.
7440 * @param iStReg Which FPU register to store it in.
7441 * @param iEffSeg The effective memory operand selector register.
7442 * @param GCPtrEff The effective memory operand offset.
7443 */
7444IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7445 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7446{
7447 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7448 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7449 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7450 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7451}
7452
7453
7454/**
7455 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7456 * FPUDP, and FPUDS, and then pops the stack.
7457 *
7458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7459 * @param pResult The result to store.
7460 * @param iStReg Which FPU register to store it in.
7461 * @param iEffSeg The effective memory operand selector register.
7462 * @param GCPtrEff The effective memory operand offset.
7463 */
7464IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7465 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7466{
7467 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7468 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7469 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7470 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7471 iemFpuMaybePopOne(pFpuCtx);
7472}
7473
7474
7475/**
7476 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7477 *
7478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7479 */
7480IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7481{
7482 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7483 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7484}
7485
7486
7487/**
7488 * Marks the specified stack register as free (for FFREE).
7489 *
7490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7491 * @param iStReg The register to free.
7492 */
7493IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7494{
7495 Assert(iStReg < 8);
7496 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7497 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7498 pFpuCtx->FTW &= ~RT_BIT(iReg);
7499}
7500
7501
7502/**
7503 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7504 *
7505 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7506 */
7507IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
7508{
7509 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7510 uint16_t uFsw = pFpuCtx->FSW;
7511 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7512 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7513 uFsw &= ~X86_FSW_TOP_MASK;
7514 uFsw |= uTop;
7515 pFpuCtx->FSW = uFsw;
7516}
7517
7518
7519/**
7520 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7521 *
7522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7523 */
7524IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
7525{
7526 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7527 uint16_t uFsw = pFpuCtx->FSW;
7528 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7529 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7530 uFsw &= ~X86_FSW_TOP_MASK;
7531 uFsw |= uTop;
7532 pFpuCtx->FSW = uFsw;
7533}
7534
7535
7536/**
7537 * Updates the FSW, FOP, FPUIP, and FPUCS.
7538 *
7539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7540 * @param u16FSW The FSW from the current instruction.
7541 */
7542IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
7543{
7544 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7545 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7546 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7547}
7548
7549
7550/**
7551 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7552 *
7553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7554 * @param u16FSW The FSW from the current instruction.
7555 */
7556IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7557{
7558 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7559 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7560 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7561 iemFpuMaybePopOne(pFpuCtx);
7562}
7563
7564
7565/**
7566 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7567 *
7568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7569 * @param u16FSW The FSW from the current instruction.
7570 * @param iEffSeg The effective memory operand selector register.
7571 * @param GCPtrEff The effective memory operand offset.
7572 */
7573IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7574{
7575 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7576 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7577 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7578 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7579}
7580
7581
7582/**
7583 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7584 *
7585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7586 * @param u16FSW The FSW from the current instruction.
7587 */
7588IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7589{
7590 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7591 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7592 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7593 iemFpuMaybePopOne(pFpuCtx);
7594 iemFpuMaybePopOne(pFpuCtx);
7595}
7596
7597
7598/**
7599 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7600 *
7601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7602 * @param u16FSW The FSW from the current instruction.
7603 * @param iEffSeg The effective memory operand selector register.
7604 * @param GCPtrEff The effective memory operand offset.
7605 */
7606IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7607{
7608 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7609 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7610 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7611 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7612 iemFpuMaybePopOne(pFpuCtx);
7613}
7614
7615
7616/**
7617 * Worker routine for raising an FPU stack underflow exception.
7618 *
7619 * @param pFpuCtx The FPU context.
7620 * @param iStReg The stack register being accessed.
7621 */
7622IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7623{
7624 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7625 if (pFpuCtx->FCW & X86_FCW_IM)
7626 {
7627 /* Masked underflow. */
7628 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7629 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7630 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7631 if (iStReg != UINT8_MAX)
7632 {
7633 pFpuCtx->FTW |= RT_BIT(iReg);
7634 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7635 }
7636 }
7637 else
7638 {
7639 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7640 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7641 }
7642}
7643
7644
7645/**
7646 * Raises a FPU stack underflow exception.
7647 *
7648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7649 * @param iStReg The destination register that should be loaded
7650 * with QNaN if \#IS is not masked. Specify
7651 * UINT8_MAX if none (like for fcom).
7652 */
7653DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
7654{
7655 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7656 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7657 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7658}
7659
7660
7661DECL_NO_INLINE(IEM_STATIC, void)
7662iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7663{
7664 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7665 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7666 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7667 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7668}
7669
7670
7671DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
7672{
7673 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7674 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7675 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7676 iemFpuMaybePopOne(pFpuCtx);
7677}
7678
7679
7680DECL_NO_INLINE(IEM_STATIC, void)
7681iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7682{
7683 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7684 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7685 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7686 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7687 iemFpuMaybePopOne(pFpuCtx);
7688}
7689
7690
7691DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
7692{
7693 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7694 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7695 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7696 iemFpuMaybePopOne(pFpuCtx);
7697 iemFpuMaybePopOne(pFpuCtx);
7698}
7699
7700
7701DECL_NO_INLINE(IEM_STATIC, void)
7702iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
7703{
7704 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7705 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7706
7707 if (pFpuCtx->FCW & X86_FCW_IM)
7708 {
7709 /* Masked overflow - Push QNaN. */
7710 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7711 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7712 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7713 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7714 pFpuCtx->FTW |= RT_BIT(iNewTop);
7715 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7716 iemFpuRotateStackPush(pFpuCtx);
7717 }
7718 else
7719 {
7720 /* Exception pending - don't change TOP or the register stack. */
7721 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7722 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7723 }
7724}
7725
7726
7727DECL_NO_INLINE(IEM_STATIC, void)
7728iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
7729{
7730 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7731 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7732
7733 if (pFpuCtx->FCW & X86_FCW_IM)
7734 {
7735 /* Masked overflow - Push QNaN. */
7736 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7737 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7738 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7739 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7740 pFpuCtx->FTW |= RT_BIT(iNewTop);
7741 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7742 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7743 iemFpuRotateStackPush(pFpuCtx);
7744 }
7745 else
7746 {
7747 /* Exception pending - don't change TOP or the register stack. */
7748 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7749 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7750 }
7751}
7752
7753
7754/**
7755 * Worker routine for raising an FPU stack overflow exception on a push.
7756 *
7757 * @param pFpuCtx The FPU context.
7758 */
7759IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7760{
7761 if (pFpuCtx->FCW & X86_FCW_IM)
7762 {
7763 /* Masked overflow. */
7764 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7765 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7766 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7767 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7768 pFpuCtx->FTW |= RT_BIT(iNewTop);
7769 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7770 iemFpuRotateStackPush(pFpuCtx);
7771 }
7772 else
7773 {
7774 /* Exception pending - don't change TOP or the register stack. */
7775 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7776 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7777 }
7778}
7779
7780
7781/**
7782 * Raises a FPU stack overflow exception on a push.
7783 *
7784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7785 */
7786DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
7787{
7788 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7789 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7790 iemFpuStackPushOverflowOnly(pFpuCtx);
7791}
7792
7793
7794/**
7795 * Raises a FPU stack overflow exception on a push with a memory operand.
7796 *
7797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7798 * @param iEffSeg The effective memory operand selector register.
7799 * @param GCPtrEff The effective memory operand offset.
7800 */
7801DECL_NO_INLINE(IEM_STATIC, void)
7802iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7803{
7804 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7805 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7806 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7807 iemFpuStackPushOverflowOnly(pFpuCtx);
7808}
7809
7810
7811IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
7812{
7813 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7814 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7815 if (pFpuCtx->FTW & RT_BIT(iReg))
7816 return VINF_SUCCESS;
7817 return VERR_NOT_FOUND;
7818}
7819
7820
7821IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7822{
7823 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7824 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7825 if (pFpuCtx->FTW & RT_BIT(iReg))
7826 {
7827 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7828 return VINF_SUCCESS;
7829 }
7830 return VERR_NOT_FOUND;
7831}
7832
7833
7834IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7835 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7836{
7837 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7838 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7839 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7840 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7841 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7842 {
7843 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7844 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7845 return VINF_SUCCESS;
7846 }
7847 return VERR_NOT_FOUND;
7848}
7849
7850
7851IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7852{
7853 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7854 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7855 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7856 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7857 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7858 {
7859 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7860 return VINF_SUCCESS;
7861 }
7862 return VERR_NOT_FOUND;
7863}
7864
7865
7866/**
7867 * Updates the FPU exception status after FCW is changed.
7868 *
7869 * @param pFpuCtx The FPU context.
7870 */
7871IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7872{
7873 uint16_t u16Fsw = pFpuCtx->FSW;
7874 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7875 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7876 else
7877 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7878 pFpuCtx->FSW = u16Fsw;
7879}
7880
7881
7882/**
7883 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7884 *
7885 * @returns The full FTW.
7886 * @param pFpuCtx The FPU context.
7887 */
7888IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7889{
7890 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7891 uint16_t u16Ftw = 0;
7892 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7893 for (unsigned iSt = 0; iSt < 8; iSt++)
7894 {
7895 unsigned const iReg = (iSt + iTop) & 7;
7896 if (!(u8Ftw & RT_BIT(iReg)))
7897 u16Ftw |= 3 << (iReg * 2); /* empty */
7898 else
7899 {
7900 uint16_t uTag;
7901 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7902 if (pr80Reg->s.uExponent == 0x7fff)
7903 uTag = 2; /* Exponent is all 1's => Special. */
7904 else if (pr80Reg->s.uExponent == 0x0000)
7905 {
7906 if (pr80Reg->s.u64Mantissa == 0x0000)
7907 uTag = 1; /* All bits are zero => Zero. */
7908 else
7909 uTag = 2; /* Must be special. */
7910 }
7911 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7912 uTag = 0; /* Valid. */
7913 else
7914 uTag = 2; /* Must be special. */
7915
7916 u16Ftw |= uTag << (iReg * 2); /* empty */
7917 }
7918 }
7919
7920 return u16Ftw;
7921}
7922
7923
7924/**
7925 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7926 *
7927 * @returns The compressed FTW.
7928 * @param u16FullFtw The full FTW to convert.
7929 */
7930IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7931{
7932 uint8_t u8Ftw = 0;
7933 for (unsigned i = 0; i < 8; i++)
7934 {
7935 if ((u16FullFtw & 3) != 3 /*empty*/)
7936 u8Ftw |= RT_BIT(i);
7937 u16FullFtw >>= 2;
7938 }
7939
7940 return u8Ftw;
7941}
7942
7943/** @} */
7944
7945
7946/** @name Memory access.
7947 *
7948 * @{
7949 */
7950
7951
7952/**
7953 * Updates the IEMCPU::cbWritten counter if applicable.
7954 *
7955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7956 * @param fAccess The access being accounted for.
7957 * @param cbMem The access size.
7958 */
7959DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
7960{
7961 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7962 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7963 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7964}
7965
7966
7967/**
7968 * Checks if the given segment can be written to, raise the appropriate
7969 * exception if not.
7970 *
7971 * @returns VBox strict status code.
7972 *
7973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7974 * @param pHid Pointer to the hidden register.
7975 * @param iSegReg The register number.
7976 * @param pu64BaseAddr Where to return the base address to use for the
7977 * segment. (In 64-bit code it may differ from the
7978 * base in the hidden segment.)
7979 */
7980IEM_STATIC VBOXSTRICTRC
7981iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7982{
7983 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7984
7985 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7986 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7987 else
7988 {
7989 if (!pHid->Attr.n.u1Present)
7990 {
7991 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7992 AssertRelease(uSel == 0);
7993 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7994 return iemRaiseGeneralProtectionFault0(pVCpu);
7995 }
7996
7997 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7998 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7999 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8000 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8001 *pu64BaseAddr = pHid->u64Base;
8002 }
8003 return VINF_SUCCESS;
8004}
8005
8006
8007/**
8008 * Checks if the given segment can be read from, raise the appropriate
8009 * exception if not.
8010 *
8011 * @returns VBox strict status code.
8012 *
8013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8014 * @param pHid Pointer to the hidden register.
8015 * @param iSegReg The register number.
8016 * @param pu64BaseAddr Where to return the base address to use for the
8017 * segment. (In 64-bit code it may differ from the
8018 * base in the hidden segment.)
8019 */
8020IEM_STATIC VBOXSTRICTRC
8021iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8022{
8023 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8024
8025 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8026 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8027 else
8028 {
8029 if (!pHid->Attr.n.u1Present)
8030 {
8031 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8032 AssertRelease(uSel == 0);
8033 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8034 return iemRaiseGeneralProtectionFault0(pVCpu);
8035 }
8036
8037 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8038 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8039 *pu64BaseAddr = pHid->u64Base;
8040 }
8041 return VINF_SUCCESS;
8042}
8043
8044
8045/**
8046 * Applies the segment limit, base and attributes.
8047 *
8048 * This may raise a \#GP or \#SS.
8049 *
8050 * @returns VBox strict status code.
8051 *
8052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8053 * @param fAccess The kind of access which is being performed.
8054 * @param iSegReg The index of the segment register to apply.
8055 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8056 * TSS, ++).
8057 * @param cbMem The access size.
8058 * @param pGCPtrMem Pointer to the guest memory address to apply
8059 * segmentation to. Input and output parameter.
8060 */
8061IEM_STATIC VBOXSTRICTRC
8062iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8063{
8064 if (iSegReg == UINT8_MAX)
8065 return VINF_SUCCESS;
8066
8067 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8068 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8069 switch (pVCpu->iem.s.enmCpuMode)
8070 {
8071 case IEMMODE_16BIT:
8072 case IEMMODE_32BIT:
8073 {
8074 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8075 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8076
8077 if ( pSel->Attr.n.u1Present
8078 && !pSel->Attr.n.u1Unusable)
8079 {
8080 Assert(pSel->Attr.n.u1DescType);
8081 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8082 {
8083 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8084 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8085 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8086
8087 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8088 {
8089 /** @todo CPL check. */
8090 }
8091
8092 /*
8093 * There are two kinds of data selectors, normal and expand down.
8094 */
8095 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8096 {
8097 if ( GCPtrFirst32 > pSel->u32Limit
8098 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8099 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8100 }
8101 else
8102 {
8103 /*
8104 * The upper boundary is defined by the B bit, not the G bit!
8105 */
8106 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8107 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8108 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8109 }
8110 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8111 }
8112 else
8113 {
8114
8115 /*
8116 * Code selector and usually be used to read thru, writing is
8117 * only permitted in real and V8086 mode.
8118 */
8119 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8120 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8121 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8122 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8123 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8124
8125 if ( GCPtrFirst32 > pSel->u32Limit
8126 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8127 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8128
8129 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8130 {
8131 /** @todo CPL check. */
8132 }
8133
8134 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8135 }
8136 }
8137 else
8138 return iemRaiseGeneralProtectionFault0(pVCpu);
8139 return VINF_SUCCESS;
8140 }
8141
8142 case IEMMODE_64BIT:
8143 {
8144 RTGCPTR GCPtrMem = *pGCPtrMem;
8145 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8146 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8147
8148 Assert(cbMem >= 1);
8149 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8150 return VINF_SUCCESS;
8151 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8152 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8153 return iemRaiseGeneralProtectionFault0(pVCpu);
8154 }
8155
8156 default:
8157 AssertFailedReturn(VERR_IEM_IPE_7);
8158 }
8159}
8160
8161
8162/**
8163 * Translates a virtual address to a physical physical address and checks if we
8164 * can access the page as specified.
8165 *
8166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8167 * @param GCPtrMem The virtual address.
8168 * @param fAccess The intended access.
8169 * @param pGCPhysMem Where to return the physical address.
8170 */
8171IEM_STATIC VBOXSTRICTRC
8172iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8173{
8174 /** @todo Need a different PGM interface here. We're currently using
8175 * generic / REM interfaces. this won't cut it for R0. */
8176 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8177 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
8178 * here. */
8179 PGMPTWALK Walk;
8180 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
8181 if (RT_FAILURE(rc))
8182 {
8183 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8184 /** @todo Check unassigned memory in unpaged mode. */
8185 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8186#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8187 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8188 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
8189#endif
8190 *pGCPhysMem = NIL_RTGCPHYS;
8191 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8192 }
8193
8194 /* If the page is writable and does not have the no-exec bit set, all
8195 access is allowed. Otherwise we'll have to check more carefully... */
8196 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8197 {
8198 /* Write to read only memory? */
8199 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8200 && !(Walk.fEffective & X86_PTE_RW)
8201 && ( ( pVCpu->iem.s.uCpl == 3
8202 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8203 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8204 {
8205 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8206 *pGCPhysMem = NIL_RTGCPHYS;
8207#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8208 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8209 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
8210#endif
8211 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8212 }
8213
8214 /* Kernel memory accessed by userland? */
8215 if ( !(Walk.fEffective & X86_PTE_US)
8216 && pVCpu->iem.s.uCpl == 3
8217 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8218 {
8219 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8220 *pGCPhysMem = NIL_RTGCPHYS;
8221#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8222 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8223 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
8224#endif
8225 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8226 }
8227
8228 /* Executing non-executable memory? */
8229 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8230 && (Walk.fEffective & X86_PTE_PAE_NX)
8231 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8232 {
8233 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8234 *pGCPhysMem = NIL_RTGCPHYS;
8235#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
8236 if (Walk.fFailed & PGM_WALKFAIL_EPT)
8237 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
8238#endif
8239 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8240 VERR_ACCESS_DENIED);
8241 }
8242 }
8243
8244 /*
8245 * Set the dirty / access flags.
8246 * ASSUMES this is set when the address is translated rather than on committ...
8247 */
8248 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8249 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8250 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
8251 {
8252 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8253 AssertRC(rc2);
8254 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
8255 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
8256 }
8257
8258 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
8259 *pGCPhysMem = GCPhys;
8260 return VINF_SUCCESS;
8261}
8262
8263
8264
8265/**
8266 * Maps a physical page.
8267 *
8268 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8270 * @param GCPhysMem The physical address.
8271 * @param fAccess The intended access.
8272 * @param ppvMem Where to return the mapping address.
8273 * @param pLock The PGM lock.
8274 */
8275IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8276{
8277#ifdef IEM_LOG_MEMORY_WRITES
8278 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8279 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8280#endif
8281
8282 /** @todo This API may require some improving later. A private deal with PGM
8283 * regarding locking and unlocking needs to be struct. A couple of TLBs
8284 * living in PGM, but with publicly accessible inlined access methods
8285 * could perhaps be an even better solution. */
8286 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8287 GCPhysMem,
8288 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8289 pVCpu->iem.s.fBypassHandlers,
8290 ppvMem,
8291 pLock);
8292 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8293 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8294
8295 return rc;
8296}
8297
8298
8299/**
8300 * Unmap a page previously mapped by iemMemPageMap.
8301 *
8302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8303 * @param GCPhysMem The physical address.
8304 * @param fAccess The intended access.
8305 * @param pvMem What iemMemPageMap returned.
8306 * @param pLock The PGM lock.
8307 */
8308DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8309{
8310 NOREF(pVCpu);
8311 NOREF(GCPhysMem);
8312 NOREF(fAccess);
8313 NOREF(pvMem);
8314 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8315}
8316
8317
8318/**
8319 * Looks up a memory mapping entry.
8320 *
8321 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8323 * @param pvMem The memory address.
8324 * @param fAccess The access to.
8325 */
8326DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8327{
8328 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8329 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8330 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8331 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8332 return 0;
8333 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8334 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8335 return 1;
8336 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8337 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8338 return 2;
8339 return VERR_NOT_FOUND;
8340}
8341
8342
8343/**
8344 * Finds a free memmap entry when using iNextMapping doesn't work.
8345 *
8346 * @returns Memory mapping index, 1024 on failure.
8347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8348 */
8349IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8350{
8351 /*
8352 * The easy case.
8353 */
8354 if (pVCpu->iem.s.cActiveMappings == 0)
8355 {
8356 pVCpu->iem.s.iNextMapping = 1;
8357 return 0;
8358 }
8359
8360 /* There should be enough mappings for all instructions. */
8361 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8362
8363 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8364 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8365 return i;
8366
8367 AssertFailedReturn(1024);
8368}
8369
8370
8371/**
8372 * Commits a bounce buffer that needs writing back and unmaps it.
8373 *
8374 * @returns Strict VBox status code.
8375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8376 * @param iMemMap The index of the buffer to commit.
8377 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8378 * Always false in ring-3, obviously.
8379 */
8380IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8381{
8382 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8383 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8384#ifdef IN_RING3
8385 Assert(!fPostponeFail);
8386 RT_NOREF_PV(fPostponeFail);
8387#endif
8388
8389 /*
8390 * Do the writing.
8391 */
8392 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8393 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8394 {
8395 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8396 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8397 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8398 if (!pVCpu->iem.s.fBypassHandlers)
8399 {
8400 /*
8401 * Carefully and efficiently dealing with access handler return
8402 * codes make this a little bloated.
8403 */
8404 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8405 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8406 pbBuf,
8407 cbFirst,
8408 PGMACCESSORIGIN_IEM);
8409 if (rcStrict == VINF_SUCCESS)
8410 {
8411 if (cbSecond)
8412 {
8413 rcStrict = PGMPhysWrite(pVM,
8414 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8415 pbBuf + cbFirst,
8416 cbSecond,
8417 PGMACCESSORIGIN_IEM);
8418 if (rcStrict == VINF_SUCCESS)
8419 { /* nothing */ }
8420 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8421 {
8422 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8423 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8425 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8426 }
8427#ifndef IN_RING3
8428 else if (fPostponeFail)
8429 {
8430 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8432 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8433 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8434 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8435 return iemSetPassUpStatus(pVCpu, rcStrict);
8436 }
8437#endif
8438 else
8439 {
8440 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8441 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8442 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8443 return rcStrict;
8444 }
8445 }
8446 }
8447 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8448 {
8449 if (!cbSecond)
8450 {
8451 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8453 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8454 }
8455 else
8456 {
8457 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8459 pbBuf + cbFirst,
8460 cbSecond,
8461 PGMACCESSORIGIN_IEM);
8462 if (rcStrict2 == VINF_SUCCESS)
8463 {
8464 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8465 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8466 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8467 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8468 }
8469 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8470 {
8471 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8472 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8473 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8474 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8475 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8476 }
8477#ifndef IN_RING3
8478 else if (fPostponeFail)
8479 {
8480 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8481 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8483 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8484 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8485 return iemSetPassUpStatus(pVCpu, rcStrict);
8486 }
8487#endif
8488 else
8489 {
8490 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8491 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8492 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8493 return rcStrict2;
8494 }
8495 }
8496 }
8497#ifndef IN_RING3
8498 else if (fPostponeFail)
8499 {
8500 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8501 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8502 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8503 if (!cbSecond)
8504 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8505 else
8506 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8507 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8508 return iemSetPassUpStatus(pVCpu, rcStrict);
8509 }
8510#endif
8511 else
8512 {
8513 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8514 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8515 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8516 return rcStrict;
8517 }
8518 }
8519 else
8520 {
8521 /*
8522 * No access handlers, much simpler.
8523 */
8524 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8525 if (RT_SUCCESS(rc))
8526 {
8527 if (cbSecond)
8528 {
8529 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8530 if (RT_SUCCESS(rc))
8531 { /* likely */ }
8532 else
8533 {
8534 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8535 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8536 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8537 return rc;
8538 }
8539 }
8540 }
8541 else
8542 {
8543 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8544 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8545 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8546 return rc;
8547 }
8548 }
8549 }
8550
8551#if defined(IEM_LOG_MEMORY_WRITES)
8552 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8553 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8554 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8555 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8556 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8557 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8558
8559 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8560 g_cbIemWrote = cbWrote;
8561 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8562#endif
8563
8564 /*
8565 * Free the mapping entry.
8566 */
8567 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8568 Assert(pVCpu->iem.s.cActiveMappings != 0);
8569 pVCpu->iem.s.cActiveMappings--;
8570 return VINF_SUCCESS;
8571}
8572
8573
8574/**
8575 * iemMemMap worker that deals with a request crossing pages.
8576 */
8577IEM_STATIC VBOXSTRICTRC
8578iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8579{
8580 /*
8581 * Do the address translations.
8582 */
8583 RTGCPHYS GCPhysFirst;
8584 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8585 if (rcStrict != VINF_SUCCESS)
8586 return rcStrict;
8587
8588 RTGCPHYS GCPhysSecond;
8589 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
8590 fAccess, &GCPhysSecond);
8591 if (rcStrict != VINF_SUCCESS)
8592 return rcStrict;
8593 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
8594
8595 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8596
8597 /*
8598 * Read in the current memory content if it's a read, execute or partial
8599 * write access.
8600 */
8601 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8602 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
8603 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8604
8605 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8606 {
8607 if (!pVCpu->iem.s.fBypassHandlers)
8608 {
8609 /*
8610 * Must carefully deal with access handler status codes here,
8611 * makes the code a bit bloated.
8612 */
8613 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8614 if (rcStrict == VINF_SUCCESS)
8615 {
8616 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8617 if (rcStrict == VINF_SUCCESS)
8618 { /*likely */ }
8619 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8620 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8621 else
8622 {
8623 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8624 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8625 return rcStrict;
8626 }
8627 }
8628 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8629 {
8630 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8631 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8632 {
8633 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8634 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8635 }
8636 else
8637 {
8638 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8639 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8640 return rcStrict2;
8641 }
8642 }
8643 else
8644 {
8645 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8646 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8647 return rcStrict;
8648 }
8649 }
8650 else
8651 {
8652 /*
8653 * No informational status codes here, much more straight forward.
8654 */
8655 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8656 if (RT_SUCCESS(rc))
8657 {
8658 Assert(rc == VINF_SUCCESS);
8659 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8660 if (RT_SUCCESS(rc))
8661 Assert(rc == VINF_SUCCESS);
8662 else
8663 {
8664 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8665 return rc;
8666 }
8667 }
8668 else
8669 {
8670 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8671 return rc;
8672 }
8673 }
8674 }
8675#ifdef VBOX_STRICT
8676 else
8677 memset(pbBuf, 0xcc, cbMem);
8678 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8679 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8680#endif
8681
8682 /*
8683 * Commit the bounce buffer entry.
8684 */
8685 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8686 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8687 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8688 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8689 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8690 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8691 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8692 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8693 pVCpu->iem.s.cActiveMappings++;
8694
8695 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8696 *ppvMem = pbBuf;
8697 return VINF_SUCCESS;
8698}
8699
8700
8701/**
8702 * iemMemMap woker that deals with iemMemPageMap failures.
8703 */
8704IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8705 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8706{
8707 /*
8708 * Filter out conditions we can handle and the ones which shouldn't happen.
8709 */
8710 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8711 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8712 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8713 {
8714 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8715 return rcMap;
8716 }
8717 pVCpu->iem.s.cPotentialExits++;
8718
8719 /*
8720 * Read in the current memory content if it's a read, execute or partial
8721 * write access.
8722 */
8723 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8724 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8725 {
8726 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8727 memset(pbBuf, 0xff, cbMem);
8728 else
8729 {
8730 int rc;
8731 if (!pVCpu->iem.s.fBypassHandlers)
8732 {
8733 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8734 if (rcStrict == VINF_SUCCESS)
8735 { /* nothing */ }
8736 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8737 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8738 else
8739 {
8740 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8741 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8742 return rcStrict;
8743 }
8744 }
8745 else
8746 {
8747 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8748 if (RT_SUCCESS(rc))
8749 { /* likely */ }
8750 else
8751 {
8752 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8753 GCPhysFirst, rc));
8754 return rc;
8755 }
8756 }
8757 }
8758 }
8759#ifdef VBOX_STRICT
8760 else
8761 memset(pbBuf, 0xcc, cbMem);
8762#endif
8763#ifdef VBOX_STRICT
8764 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8765 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8766#endif
8767
8768 /*
8769 * Commit the bounce buffer entry.
8770 */
8771 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8772 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8773 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8774 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8775 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8776 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8777 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8778 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8779 pVCpu->iem.s.cActiveMappings++;
8780
8781 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8782 *ppvMem = pbBuf;
8783 return VINF_SUCCESS;
8784}
8785
8786
8787
8788/**
8789 * Maps the specified guest memory for the given kind of access.
8790 *
8791 * This may be using bounce buffering of the memory if it's crossing a page
8792 * boundary or if there is an access handler installed for any of it. Because
8793 * of lock prefix guarantees, we're in for some extra clutter when this
8794 * happens.
8795 *
8796 * This may raise a \#GP, \#SS, \#PF or \#AC.
8797 *
8798 * @returns VBox strict status code.
8799 *
8800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8801 * @param ppvMem Where to return the pointer to the mapped
8802 * memory.
8803 * @param cbMem The number of bytes to map. This is usually 1,
8804 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8805 * string operations it can be up to a page.
8806 * @param iSegReg The index of the segment register to use for
8807 * this access. The base and limits are checked.
8808 * Use UINT8_MAX to indicate that no segmentation
8809 * is required (for IDT, GDT and LDT accesses).
8810 * @param GCPtrMem The address of the guest memory.
8811 * @param fAccess How the memory is being accessed. The
8812 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8813 * how to map the memory, while the
8814 * IEM_ACCESS_WHAT_XXX bit is used when raising
8815 * exceptions.
8816 */
8817IEM_STATIC VBOXSTRICTRC
8818iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8819{
8820 /*
8821 * Check the input and figure out which mapping entry to use.
8822 */
8823 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8824 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8825 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8826
8827 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8828 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8829 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8830 {
8831 iMemMap = iemMemMapFindFree(pVCpu);
8832 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8833 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8834 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8835 pVCpu->iem.s.aMemMappings[2].fAccess),
8836 VERR_IEM_IPE_9);
8837 }
8838
8839 /*
8840 * Map the memory, checking that we can actually access it. If something
8841 * slightly complicated happens, fall back on bounce buffering.
8842 */
8843 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8844 if (rcStrict != VINF_SUCCESS)
8845 return rcStrict;
8846
8847 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem > GUEST_PAGE_SIZE) /* Crossing a page boundary? */
8848 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8849
8850 RTGCPHYS GCPhysFirst;
8851 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8852 if (rcStrict != VINF_SUCCESS)
8853 return rcStrict;
8854
8855 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8856 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8857 if (fAccess & IEM_ACCESS_TYPE_READ)
8858 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8859
8860 void *pvMem;
8861 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8862 if (rcStrict != VINF_SUCCESS)
8863 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8864
8865 /*
8866 * Fill in the mapping table entry.
8867 */
8868 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8869 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8870 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8871 pVCpu->iem.s.cActiveMappings++;
8872
8873 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8874 *ppvMem = pvMem;
8875
8876 return VINF_SUCCESS;
8877}
8878
8879
8880/**
8881 * Commits the guest memory if bounce buffered and unmaps it.
8882 *
8883 * @returns Strict VBox status code.
8884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8885 * @param pvMem The mapping.
8886 * @param fAccess The kind of access.
8887 */
8888IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8889{
8890 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8891 AssertReturn(iMemMap >= 0, iMemMap);
8892
8893 /* If it's bounce buffered, we may need to write back the buffer. */
8894 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8895 {
8896 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8897 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8898 }
8899 /* Otherwise unlock it. */
8900 else
8901 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8902
8903 /* Free the entry. */
8904 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8905 Assert(pVCpu->iem.s.cActiveMappings != 0);
8906 pVCpu->iem.s.cActiveMappings--;
8907 return VINF_SUCCESS;
8908}
8909
8910#ifdef IEM_WITH_SETJMP
8911
8912/**
8913 * Maps the specified guest memory for the given kind of access, longjmp on
8914 * error.
8915 *
8916 * This may be using bounce buffering of the memory if it's crossing a page
8917 * boundary or if there is an access handler installed for any of it. Because
8918 * of lock prefix guarantees, we're in for some extra clutter when this
8919 * happens.
8920 *
8921 * This may raise a \#GP, \#SS, \#PF or \#AC.
8922 *
8923 * @returns Pointer to the mapped memory.
8924 *
8925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8926 * @param cbMem The number of bytes to map. This is usually 1,
8927 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8928 * string operations it can be up to a page.
8929 * @param iSegReg The index of the segment register to use for
8930 * this access. The base and limits are checked.
8931 * Use UINT8_MAX to indicate that no segmentation
8932 * is required (for IDT, GDT and LDT accesses).
8933 * @param GCPtrMem The address of the guest memory.
8934 * @param fAccess How the memory is being accessed. The
8935 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8936 * how to map the memory, while the
8937 * IEM_ACCESS_WHAT_XXX bit is used when raising
8938 * exceptions.
8939 */
8940IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8941{
8942 /*
8943 * Check the input and figure out which mapping entry to use.
8944 */
8945 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8946 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8947 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8948
8949 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8950 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8951 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8952 {
8953 iMemMap = iemMemMapFindFree(pVCpu);
8954 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8955 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8956 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8957 pVCpu->iem.s.aMemMappings[2].fAccess),
8958 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8959 }
8960
8961 /*
8962 * Map the memory, checking that we can actually access it. If something
8963 * slightly complicated happens, fall back on bounce buffering.
8964 */
8965 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8966 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8967 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8968
8969 /* Crossing a page boundary? */
8970 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
8971 { /* No (likely). */ }
8972 else
8973 {
8974 void *pvMem;
8975 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8976 if (rcStrict == VINF_SUCCESS)
8977 return pvMem;
8978 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8979 }
8980
8981 RTGCPHYS GCPhysFirst;
8982 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8983 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8984 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8985
8986 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8987 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8988 if (fAccess & IEM_ACCESS_TYPE_READ)
8989 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8990
8991 void *pvMem;
8992 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8993 if (rcStrict == VINF_SUCCESS)
8994 { /* likely */ }
8995 else
8996 {
8997 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8998 if (rcStrict == VINF_SUCCESS)
8999 return pvMem;
9000 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9001 }
9002
9003 /*
9004 * Fill in the mapping table entry.
9005 */
9006 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9007 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9008 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9009 pVCpu->iem.s.cActiveMappings++;
9010
9011 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9012 return pvMem;
9013}
9014
9015
9016/**
9017 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9018 *
9019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9020 * @param pvMem The mapping.
9021 * @param fAccess The kind of access.
9022 */
9023IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9024{
9025 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9026 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9027
9028 /* If it's bounce buffered, we may need to write back the buffer. */
9029 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9030 {
9031 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9032 {
9033 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9034 if (rcStrict == VINF_SUCCESS)
9035 return;
9036 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9037 }
9038 }
9039 /* Otherwise unlock it. */
9040 else
9041 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9042
9043 /* Free the entry. */
9044 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9045 Assert(pVCpu->iem.s.cActiveMappings != 0);
9046 pVCpu->iem.s.cActiveMappings--;
9047}
9048
9049#endif /* IEM_WITH_SETJMP */
9050
9051#ifndef IN_RING3
9052/**
9053 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9054 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9055 *
9056 * Allows the instruction to be completed and retired, while the IEM user will
9057 * return to ring-3 immediately afterwards and do the postponed writes there.
9058 *
9059 * @returns VBox status code (no strict statuses). Caller must check
9060 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9061 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9062 * @param pvMem The mapping.
9063 * @param fAccess The kind of access.
9064 */
9065IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9066{
9067 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9068 AssertReturn(iMemMap >= 0, iMemMap);
9069
9070 /* If it's bounce buffered, we may need to write back the buffer. */
9071 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9072 {
9073 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9074 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9075 }
9076 /* Otherwise unlock it. */
9077 else
9078 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9079
9080 /* Free the entry. */
9081 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9082 Assert(pVCpu->iem.s.cActiveMappings != 0);
9083 pVCpu->iem.s.cActiveMappings--;
9084 return VINF_SUCCESS;
9085}
9086#endif
9087
9088
9089/**
9090 * Rollbacks mappings, releasing page locks and such.
9091 *
9092 * The caller shall only call this after checking cActiveMappings.
9093 *
9094 * @returns Strict VBox status code to pass up.
9095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9096 */
9097IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9098{
9099 Assert(pVCpu->iem.s.cActiveMappings > 0);
9100
9101 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9102 while (iMemMap-- > 0)
9103 {
9104 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9105 if (fAccess != IEM_ACCESS_INVALID)
9106 {
9107 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9108 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9109 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9110 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9111 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9112 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9113 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9114 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9115 pVCpu->iem.s.cActiveMappings--;
9116 }
9117 }
9118}
9119
9120
9121/**
9122 * Fetches a data byte.
9123 *
9124 * @returns Strict VBox status code.
9125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9126 * @param pu8Dst Where to return the byte.
9127 * @param iSegReg The index of the segment register to use for
9128 * this access. The base and limits are checked.
9129 * @param GCPtrMem The address of the guest memory.
9130 */
9131IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9132{
9133 /* The lazy approach for now... */
9134 uint8_t const *pu8Src;
9135 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9136 if (rc == VINF_SUCCESS)
9137 {
9138 *pu8Dst = *pu8Src;
9139 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9140 }
9141 return rc;
9142}
9143
9144
9145#ifdef IEM_WITH_SETJMP
9146/**
9147 * Fetches a data byte, longjmp on error.
9148 *
9149 * @returns The byte.
9150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9151 * @param iSegReg The index of the segment register to use for
9152 * this access. The base and limits are checked.
9153 * @param GCPtrMem The address of the guest memory.
9154 */
9155DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9156{
9157 /* The lazy approach for now... */
9158 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9159 uint8_t const bRet = *pu8Src;
9160 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9161 return bRet;
9162}
9163#endif /* IEM_WITH_SETJMP */
9164
9165
9166/**
9167 * Fetches a data word.
9168 *
9169 * @returns Strict VBox status code.
9170 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9171 * @param pu16Dst Where to return the word.
9172 * @param iSegReg The index of the segment register to use for
9173 * this access. The base and limits are checked.
9174 * @param GCPtrMem The address of the guest memory.
9175 */
9176IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9177{
9178 /* The lazy approach for now... */
9179 uint16_t const *pu16Src;
9180 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9181 if (rc == VINF_SUCCESS)
9182 {
9183 *pu16Dst = *pu16Src;
9184 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9185 }
9186 return rc;
9187}
9188
9189
9190#ifdef IEM_WITH_SETJMP
9191/**
9192 * Fetches a data word, longjmp on error.
9193 *
9194 * @returns The word
9195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9196 * @param iSegReg The index of the segment register to use for
9197 * this access. The base and limits are checked.
9198 * @param GCPtrMem The address of the guest memory.
9199 */
9200DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9201{
9202 /* The lazy approach for now... */
9203 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9204 uint16_t const u16Ret = *pu16Src;
9205 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9206 return u16Ret;
9207}
9208#endif
9209
9210
9211/**
9212 * Fetches a data dword.
9213 *
9214 * @returns Strict VBox status code.
9215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9216 * @param pu32Dst Where to return the dword.
9217 * @param iSegReg The index of the segment register to use for
9218 * this access. The base and limits are checked.
9219 * @param GCPtrMem The address of the guest memory.
9220 */
9221IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9222{
9223 /* The lazy approach for now... */
9224 uint32_t const *pu32Src;
9225 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9226 if (rc == VINF_SUCCESS)
9227 {
9228 *pu32Dst = *pu32Src;
9229 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9230 }
9231 return rc;
9232}
9233
9234
9235/**
9236 * Fetches a data dword and zero extends it to a qword.
9237 *
9238 * @returns Strict VBox status code.
9239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9240 * @param pu64Dst Where to return the qword.
9241 * @param iSegReg The index of the segment register to use for
9242 * this access. The base and limits are checked.
9243 * @param GCPtrMem The address of the guest memory.
9244 */
9245IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9246{
9247 /* The lazy approach for now... */
9248 uint32_t const *pu32Src;
9249 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9250 if (rc == VINF_SUCCESS)
9251 {
9252 *pu64Dst = *pu32Src;
9253 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9254 }
9255 return rc;
9256}
9257
9258
9259#ifdef IEM_WITH_SETJMP
9260
9261IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9262{
9263 Assert(cbMem >= 1);
9264 Assert(iSegReg < X86_SREG_COUNT);
9265
9266 /*
9267 * 64-bit mode is simpler.
9268 */
9269 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9270 {
9271 if (iSegReg >= X86_SREG_FS)
9272 {
9273 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9274 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9275 GCPtrMem += pSel->u64Base;
9276 }
9277
9278 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9279 return GCPtrMem;
9280 }
9281 /*
9282 * 16-bit and 32-bit segmentation.
9283 */
9284 else
9285 {
9286 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9287 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9288 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9289 == X86DESCATTR_P /* data, expand up */
9290 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9291 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9292 {
9293 /* expand up */
9294 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9295 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9296 && GCPtrLast32 > (uint32_t)GCPtrMem))
9297 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9298 }
9299 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9300 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9301 {
9302 /* expand down */
9303 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9304 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9305 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9306 && GCPtrLast32 > (uint32_t)GCPtrMem))
9307 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9308 }
9309 else
9310 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9311 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9312 }
9313 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9314}
9315
9316
9317IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9318{
9319 Assert(cbMem >= 1);
9320 Assert(iSegReg < X86_SREG_COUNT);
9321
9322 /*
9323 * 64-bit mode is simpler.
9324 */
9325 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9326 {
9327 if (iSegReg >= X86_SREG_FS)
9328 {
9329 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9330 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9331 GCPtrMem += pSel->u64Base;
9332 }
9333
9334 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9335 return GCPtrMem;
9336 }
9337 /*
9338 * 16-bit and 32-bit segmentation.
9339 */
9340 else
9341 {
9342 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9343 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9344 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9345 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9346 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9347 {
9348 /* expand up */
9349 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9350 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9351 && GCPtrLast32 > (uint32_t)GCPtrMem))
9352 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9353 }
9354 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9355 {
9356 /* expand down */
9357 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9358 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9359 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9360 && GCPtrLast32 > (uint32_t)GCPtrMem))
9361 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9362 }
9363 else
9364 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9365 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9366 }
9367 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9368}
9369
9370
9371/**
9372 * Fetches a data dword, longjmp on error, fallback/safe version.
9373 *
9374 * @returns The dword
9375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9376 * @param iSegReg The index of the segment register to use for
9377 * this access. The base and limits are checked.
9378 * @param GCPtrMem The address of the guest memory.
9379 */
9380IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9381{
9382 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9383 uint32_t const u32Ret = *pu32Src;
9384 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9385 return u32Ret;
9386}
9387
9388
9389/**
9390 * Fetches a data dword, longjmp on error.
9391 *
9392 * @returns The dword
9393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9394 * @param iSegReg The index of the segment register to use for
9395 * this access. The base and limits are checked.
9396 * @param GCPtrMem The address of the guest memory.
9397 */
9398DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9399{
9400# ifdef IEM_WITH_DATA_TLB
9401 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9402 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9403 {
9404 /// @todo more later.
9405 }
9406
9407 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9408# else
9409 /* The lazy approach. */
9410 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9411 uint32_t const u32Ret = *pu32Src;
9412 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9413 return u32Ret;
9414# endif
9415}
9416#endif
9417
9418
9419#ifdef SOME_UNUSED_FUNCTION
9420/**
9421 * Fetches a data dword and sign extends it to a qword.
9422 *
9423 * @returns Strict VBox status code.
9424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9425 * @param pu64Dst Where to return the sign extended value.
9426 * @param iSegReg The index of the segment register to use for
9427 * this access. The base and limits are checked.
9428 * @param GCPtrMem The address of the guest memory.
9429 */
9430IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9431{
9432 /* The lazy approach for now... */
9433 int32_t const *pi32Src;
9434 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9435 if (rc == VINF_SUCCESS)
9436 {
9437 *pu64Dst = *pi32Src;
9438 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9439 }
9440#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9441 else
9442 *pu64Dst = 0;
9443#endif
9444 return rc;
9445}
9446#endif
9447
9448
9449/**
9450 * Fetches a data qword.
9451 *
9452 * @returns Strict VBox status code.
9453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9454 * @param pu64Dst Where to return the qword.
9455 * @param iSegReg The index of the segment register to use for
9456 * this access. The base and limits are checked.
9457 * @param GCPtrMem The address of the guest memory.
9458 */
9459IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9460{
9461 /* The lazy approach for now... */
9462 uint64_t const *pu64Src;
9463 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9464 if (rc == VINF_SUCCESS)
9465 {
9466 *pu64Dst = *pu64Src;
9467 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9468 }
9469 return rc;
9470}
9471
9472
9473#ifdef IEM_WITH_SETJMP
9474/**
9475 * Fetches a data qword, longjmp on error.
9476 *
9477 * @returns The qword.
9478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9479 * @param iSegReg The index of the segment register to use for
9480 * this access. The base and limits are checked.
9481 * @param GCPtrMem The address of the guest memory.
9482 */
9483DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9484{
9485 /* The lazy approach for now... */
9486 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9487 uint64_t const u64Ret = *pu64Src;
9488 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9489 return u64Ret;
9490}
9491#endif
9492
9493
9494/**
9495 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9496 *
9497 * @returns Strict VBox status code.
9498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9499 * @param pu64Dst Where to return the qword.
9500 * @param iSegReg The index of the segment register to use for
9501 * this access. The base and limits are checked.
9502 * @param GCPtrMem The address of the guest memory.
9503 */
9504IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9505{
9506 /* The lazy approach for now... */
9507 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9508 if (RT_UNLIKELY(GCPtrMem & 15))
9509 return iemRaiseGeneralProtectionFault0(pVCpu);
9510
9511 uint64_t const *pu64Src;
9512 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9513 if (rc == VINF_SUCCESS)
9514 {
9515 *pu64Dst = *pu64Src;
9516 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9517 }
9518 return rc;
9519}
9520
9521
9522#ifdef IEM_WITH_SETJMP
9523/**
9524 * Fetches a data qword, longjmp on error.
9525 *
9526 * @returns The qword.
9527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9528 * @param iSegReg The index of the segment register to use for
9529 * this access. The base and limits are checked.
9530 * @param GCPtrMem The address of the guest memory.
9531 */
9532DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9533{
9534 /* The lazy approach for now... */
9535 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9536 if (RT_LIKELY(!(GCPtrMem & 15)))
9537 {
9538 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9539 uint64_t const u64Ret = *pu64Src;
9540 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9541 return u64Ret;
9542 }
9543
9544 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9545 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9546}
9547#endif
9548
9549
9550/**
9551 * Fetches a data tword.
9552 *
9553 * @returns Strict VBox status code.
9554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9555 * @param pr80Dst Where to return the tword.
9556 * @param iSegReg The index of the segment register to use for
9557 * this access. The base and limits are checked.
9558 * @param GCPtrMem The address of the guest memory.
9559 */
9560IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9561{
9562 /* The lazy approach for now... */
9563 PCRTFLOAT80U pr80Src;
9564 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9565 if (rc == VINF_SUCCESS)
9566 {
9567 *pr80Dst = *pr80Src;
9568 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9569 }
9570 return rc;
9571}
9572
9573
9574#ifdef IEM_WITH_SETJMP
9575/**
9576 * Fetches a data tword, longjmp on error.
9577 *
9578 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9579 * @param pr80Dst Where to return the tword.
9580 * @param iSegReg The index of the segment register to use for
9581 * this access. The base and limits are checked.
9582 * @param GCPtrMem The address of the guest memory.
9583 */
9584DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9585{
9586 /* The lazy approach for now... */
9587 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9588 *pr80Dst = *pr80Src;
9589 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9590}
9591#endif
9592
9593
9594/**
9595 * Fetches a data dqword (double qword), generally SSE related.
9596 *
9597 * @returns Strict VBox status code.
9598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9599 * @param pu128Dst Where to return the qword.
9600 * @param iSegReg The index of the segment register to use for
9601 * this access. The base and limits are checked.
9602 * @param GCPtrMem The address of the guest memory.
9603 */
9604IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9605{
9606 /* The lazy approach for now... */
9607 PCRTUINT128U pu128Src;
9608 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9609 if (rc == VINF_SUCCESS)
9610 {
9611 pu128Dst->au64[0] = pu128Src->au64[0];
9612 pu128Dst->au64[1] = pu128Src->au64[1];
9613 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9614 }
9615 return rc;
9616}
9617
9618
9619#ifdef IEM_WITH_SETJMP
9620/**
9621 * Fetches a data dqword (double qword), generally SSE related.
9622 *
9623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9624 * @param pu128Dst Where to return the qword.
9625 * @param iSegReg The index of the segment register to use for
9626 * this access. The base and limits are checked.
9627 * @param GCPtrMem The address of the guest memory.
9628 */
9629IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9630{
9631 /* The lazy approach for now... */
9632 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9633 pu128Dst->au64[0] = pu128Src->au64[0];
9634 pu128Dst->au64[1] = pu128Src->au64[1];
9635 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9636}
9637#endif
9638
9639
9640/**
9641 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9642 * related.
9643 *
9644 * Raises \#GP(0) if not aligned.
9645 *
9646 * @returns Strict VBox status code.
9647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9648 * @param pu128Dst Where to return the qword.
9649 * @param iSegReg The index of the segment register to use for
9650 * this access. The base and limits are checked.
9651 * @param GCPtrMem The address of the guest memory.
9652 */
9653IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9654{
9655 /* The lazy approach for now... */
9656 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9657 if ( (GCPtrMem & 15)
9658 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9659 return iemRaiseGeneralProtectionFault0(pVCpu);
9660
9661 PCRTUINT128U pu128Src;
9662 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9663 if (rc == VINF_SUCCESS)
9664 {
9665 pu128Dst->au64[0] = pu128Src->au64[0];
9666 pu128Dst->au64[1] = pu128Src->au64[1];
9667 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9668 }
9669 return rc;
9670}
9671
9672
9673#ifdef IEM_WITH_SETJMP
9674/**
9675 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9676 * related, longjmp on error.
9677 *
9678 * Raises \#GP(0) if not aligned.
9679 *
9680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9681 * @param pu128Dst Where to return the qword.
9682 * @param iSegReg The index of the segment register to use for
9683 * this access. The base and limits are checked.
9684 * @param GCPtrMem The address of the guest memory.
9685 */
9686DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9687{
9688 /* The lazy approach for now... */
9689 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9690 if ( (GCPtrMem & 15) == 0
9691 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9692 {
9693 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9694 pu128Dst->au64[0] = pu128Src->au64[0];
9695 pu128Dst->au64[1] = pu128Src->au64[1];
9696 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9697 return;
9698 }
9699
9700 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9701 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9702}
9703#endif
9704
9705
9706/**
9707 * Fetches a data oword (octo word), generally AVX related.
9708 *
9709 * @returns Strict VBox status code.
9710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9711 * @param pu256Dst Where to return the qword.
9712 * @param iSegReg The index of the segment register to use for
9713 * this access. The base and limits are checked.
9714 * @param GCPtrMem The address of the guest memory.
9715 */
9716IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9717{
9718 /* The lazy approach for now... */
9719 PCRTUINT256U pu256Src;
9720 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9721 if (rc == VINF_SUCCESS)
9722 {
9723 pu256Dst->au64[0] = pu256Src->au64[0];
9724 pu256Dst->au64[1] = pu256Src->au64[1];
9725 pu256Dst->au64[2] = pu256Src->au64[2];
9726 pu256Dst->au64[3] = pu256Src->au64[3];
9727 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9728 }
9729 return rc;
9730}
9731
9732
9733#ifdef IEM_WITH_SETJMP
9734/**
9735 * Fetches a data oword (octo word), generally AVX related.
9736 *
9737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9738 * @param pu256Dst Where to return the qword.
9739 * @param iSegReg The index of the segment register to use for
9740 * this access. The base and limits are checked.
9741 * @param GCPtrMem The address of the guest memory.
9742 */
9743IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9744{
9745 /* The lazy approach for now... */
9746 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9747 pu256Dst->au64[0] = pu256Src->au64[0];
9748 pu256Dst->au64[1] = pu256Src->au64[1];
9749 pu256Dst->au64[2] = pu256Src->au64[2];
9750 pu256Dst->au64[3] = pu256Src->au64[3];
9751 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9752}
9753#endif
9754
9755
9756/**
9757 * Fetches a data oword (octo word) at an aligned address, generally AVX
9758 * related.
9759 *
9760 * Raises \#GP(0) if not aligned.
9761 *
9762 * @returns Strict VBox status code.
9763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9764 * @param pu256Dst Where to return the qword.
9765 * @param iSegReg The index of the segment register to use for
9766 * this access. The base and limits are checked.
9767 * @param GCPtrMem The address of the guest memory.
9768 */
9769IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9770{
9771 /* The lazy approach for now... */
9772 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9773 if (GCPtrMem & 31)
9774 return iemRaiseGeneralProtectionFault0(pVCpu);
9775
9776 PCRTUINT256U pu256Src;
9777 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9778 if (rc == VINF_SUCCESS)
9779 {
9780 pu256Dst->au64[0] = pu256Src->au64[0];
9781 pu256Dst->au64[1] = pu256Src->au64[1];
9782 pu256Dst->au64[2] = pu256Src->au64[2];
9783 pu256Dst->au64[3] = pu256Src->au64[3];
9784 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9785 }
9786 return rc;
9787}
9788
9789
9790#ifdef IEM_WITH_SETJMP
9791/**
9792 * Fetches a data oword (octo word) at an aligned address, generally AVX
9793 * related, longjmp on error.
9794 *
9795 * Raises \#GP(0) if not aligned.
9796 *
9797 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9798 * @param pu256Dst Where to return the qword.
9799 * @param iSegReg The index of the segment register to use for
9800 * this access. The base and limits are checked.
9801 * @param GCPtrMem The address of the guest memory.
9802 */
9803DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9804{
9805 /* The lazy approach for now... */
9806 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9807 if ((GCPtrMem & 31) == 0)
9808 {
9809 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9810 pu256Dst->au64[0] = pu256Src->au64[0];
9811 pu256Dst->au64[1] = pu256Src->au64[1];
9812 pu256Dst->au64[2] = pu256Src->au64[2];
9813 pu256Dst->au64[3] = pu256Src->au64[3];
9814 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9815 return;
9816 }
9817
9818 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9819 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9820}
9821#endif
9822
9823
9824
9825/**
9826 * Fetches a descriptor register (lgdt, lidt).
9827 *
9828 * @returns Strict VBox status code.
9829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9830 * @param pcbLimit Where to return the limit.
9831 * @param pGCPtrBase Where to return the base.
9832 * @param iSegReg The index of the segment register to use for
9833 * this access. The base and limits are checked.
9834 * @param GCPtrMem The address of the guest memory.
9835 * @param enmOpSize The effective operand size.
9836 */
9837IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9838 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9839{
9840 /*
9841 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9842 * little special:
9843 * - The two reads are done separately.
9844 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9845 * - We suspect the 386 to actually commit the limit before the base in
9846 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9847 * don't try emulate this eccentric behavior, because it's not well
9848 * enough understood and rather hard to trigger.
9849 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9850 */
9851 VBOXSTRICTRC rcStrict;
9852 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9853 {
9854 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9855 if (rcStrict == VINF_SUCCESS)
9856 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9857 }
9858 else
9859 {
9860 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9861 if (enmOpSize == IEMMODE_32BIT)
9862 {
9863 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9864 {
9865 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9866 if (rcStrict == VINF_SUCCESS)
9867 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9868 }
9869 else
9870 {
9871 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9872 if (rcStrict == VINF_SUCCESS)
9873 {
9874 *pcbLimit = (uint16_t)uTmp;
9875 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9876 }
9877 }
9878 if (rcStrict == VINF_SUCCESS)
9879 *pGCPtrBase = uTmp;
9880 }
9881 else
9882 {
9883 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9884 if (rcStrict == VINF_SUCCESS)
9885 {
9886 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9887 if (rcStrict == VINF_SUCCESS)
9888 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9889 }
9890 }
9891 }
9892 return rcStrict;
9893}
9894
9895
9896
9897/**
9898 * Stores a data byte.
9899 *
9900 * @returns Strict VBox status code.
9901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9902 * @param iSegReg The index of the segment register to use for
9903 * this access. The base and limits are checked.
9904 * @param GCPtrMem The address of the guest memory.
9905 * @param u8Value The value to store.
9906 */
9907IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9908{
9909 /* The lazy approach for now... */
9910 uint8_t *pu8Dst;
9911 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9912 if (rc == VINF_SUCCESS)
9913 {
9914 *pu8Dst = u8Value;
9915 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9916 }
9917 return rc;
9918}
9919
9920
9921#ifdef IEM_WITH_SETJMP
9922/**
9923 * Stores a data byte, longjmp on error.
9924 *
9925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9926 * @param iSegReg The index of the segment register to use for
9927 * this access. The base and limits are checked.
9928 * @param GCPtrMem The address of the guest memory.
9929 * @param u8Value The value to store.
9930 */
9931IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9932{
9933 /* The lazy approach for now... */
9934 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9935 *pu8Dst = u8Value;
9936 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9937}
9938#endif
9939
9940
9941/**
9942 * Stores a data word.
9943 *
9944 * @returns Strict VBox status code.
9945 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9946 * @param iSegReg The index of the segment register to use for
9947 * this access. The base and limits are checked.
9948 * @param GCPtrMem The address of the guest memory.
9949 * @param u16Value The value to store.
9950 */
9951IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9952{
9953 /* The lazy approach for now... */
9954 uint16_t *pu16Dst;
9955 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9956 if (rc == VINF_SUCCESS)
9957 {
9958 *pu16Dst = u16Value;
9959 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9960 }
9961 return rc;
9962}
9963
9964
9965#ifdef IEM_WITH_SETJMP
9966/**
9967 * Stores a data word, longjmp on error.
9968 *
9969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9970 * @param iSegReg The index of the segment register to use for
9971 * this access. The base and limits are checked.
9972 * @param GCPtrMem The address of the guest memory.
9973 * @param u16Value The value to store.
9974 */
9975IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9976{
9977 /* The lazy approach for now... */
9978 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9979 *pu16Dst = u16Value;
9980 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9981}
9982#endif
9983
9984
9985/**
9986 * Stores a data dword.
9987 *
9988 * @returns Strict VBox status code.
9989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9990 * @param iSegReg The index of the segment register to use for
9991 * this access. The base and limits are checked.
9992 * @param GCPtrMem The address of the guest memory.
9993 * @param u32Value The value to store.
9994 */
9995IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9996{
9997 /* The lazy approach for now... */
9998 uint32_t *pu32Dst;
9999 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10000 if (rc == VINF_SUCCESS)
10001 {
10002 *pu32Dst = u32Value;
10003 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10004 }
10005 return rc;
10006}
10007
10008
10009#ifdef IEM_WITH_SETJMP
10010/**
10011 * Stores a data dword.
10012 *
10013 * @returns Strict VBox status code.
10014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10015 * @param iSegReg The index of the segment register to use for
10016 * this access. The base and limits are checked.
10017 * @param GCPtrMem The address of the guest memory.
10018 * @param u32Value The value to store.
10019 */
10020IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10021{
10022 /* The lazy approach for now... */
10023 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10024 *pu32Dst = u32Value;
10025 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10026}
10027#endif
10028
10029
10030/**
10031 * Stores a data qword.
10032 *
10033 * @returns Strict VBox status code.
10034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10035 * @param iSegReg The index of the segment register to use for
10036 * this access. The base and limits are checked.
10037 * @param GCPtrMem The address of the guest memory.
10038 * @param u64Value The value to store.
10039 */
10040IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10041{
10042 /* The lazy approach for now... */
10043 uint64_t *pu64Dst;
10044 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10045 if (rc == VINF_SUCCESS)
10046 {
10047 *pu64Dst = u64Value;
10048 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10049 }
10050 return rc;
10051}
10052
10053
10054#ifdef IEM_WITH_SETJMP
10055/**
10056 * Stores a data qword, longjmp on error.
10057 *
10058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10059 * @param iSegReg The index of the segment register to use for
10060 * this access. The base and limits are checked.
10061 * @param GCPtrMem The address of the guest memory.
10062 * @param u64Value The value to store.
10063 */
10064IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10065{
10066 /* The lazy approach for now... */
10067 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10068 *pu64Dst = u64Value;
10069 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10070}
10071#endif
10072
10073
10074/**
10075 * Stores a data dqword.
10076 *
10077 * @returns Strict VBox status code.
10078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10079 * @param iSegReg The index of the segment register to use for
10080 * this access. The base and limits are checked.
10081 * @param GCPtrMem The address of the guest memory.
10082 * @param u128Value The value to store.
10083 */
10084IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10085{
10086 /* The lazy approach for now... */
10087 PRTUINT128U pu128Dst;
10088 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10089 if (rc == VINF_SUCCESS)
10090 {
10091 pu128Dst->au64[0] = u128Value.au64[0];
10092 pu128Dst->au64[1] = u128Value.au64[1];
10093 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10094 }
10095 return rc;
10096}
10097
10098
10099#ifdef IEM_WITH_SETJMP
10100/**
10101 * Stores a data dqword, longjmp on error.
10102 *
10103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10104 * @param iSegReg The index of the segment register to use for
10105 * this access. The base and limits are checked.
10106 * @param GCPtrMem The address of the guest memory.
10107 * @param u128Value The value to store.
10108 */
10109IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10110{
10111 /* The lazy approach for now... */
10112 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10113 pu128Dst->au64[0] = u128Value.au64[0];
10114 pu128Dst->au64[1] = u128Value.au64[1];
10115 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10116}
10117#endif
10118
10119
10120/**
10121 * Stores a data dqword, SSE aligned.
10122 *
10123 * @returns Strict VBox status code.
10124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10125 * @param iSegReg The index of the segment register to use for
10126 * this access. The base and limits are checked.
10127 * @param GCPtrMem The address of the guest memory.
10128 * @param u128Value The value to store.
10129 */
10130IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10131{
10132 /* The lazy approach for now... */
10133 if ( (GCPtrMem & 15)
10134 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10135 return iemRaiseGeneralProtectionFault0(pVCpu);
10136
10137 PRTUINT128U pu128Dst;
10138 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10139 if (rc == VINF_SUCCESS)
10140 {
10141 pu128Dst->au64[0] = u128Value.au64[0];
10142 pu128Dst->au64[1] = u128Value.au64[1];
10143 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10144 }
10145 return rc;
10146}
10147
10148
10149#ifdef IEM_WITH_SETJMP
10150/**
10151 * Stores a data dqword, SSE aligned.
10152 *
10153 * @returns Strict VBox status code.
10154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10155 * @param iSegReg The index of the segment register to use for
10156 * this access. The base and limits are checked.
10157 * @param GCPtrMem The address of the guest memory.
10158 * @param u128Value The value to store.
10159 */
10160DECL_NO_INLINE(IEM_STATIC, void)
10161iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10162{
10163 /* The lazy approach for now... */
10164 if ( (GCPtrMem & 15) == 0
10165 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10166 {
10167 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10168 pu128Dst->au64[0] = u128Value.au64[0];
10169 pu128Dst->au64[1] = u128Value.au64[1];
10170 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10171 return;
10172 }
10173
10174 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10175 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10176}
10177#endif
10178
10179
10180/**
10181 * Stores a data dqword.
10182 *
10183 * @returns Strict VBox status code.
10184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10185 * @param iSegReg The index of the segment register to use for
10186 * this access. The base and limits are checked.
10187 * @param GCPtrMem The address of the guest memory.
10188 * @param pu256Value Pointer to the value to store.
10189 */
10190IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10191{
10192 /* The lazy approach for now... */
10193 PRTUINT256U pu256Dst;
10194 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10195 if (rc == VINF_SUCCESS)
10196 {
10197 pu256Dst->au64[0] = pu256Value->au64[0];
10198 pu256Dst->au64[1] = pu256Value->au64[1];
10199 pu256Dst->au64[2] = pu256Value->au64[2];
10200 pu256Dst->au64[3] = pu256Value->au64[3];
10201 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10202 }
10203 return rc;
10204}
10205
10206
10207#ifdef IEM_WITH_SETJMP
10208/**
10209 * Stores a data dqword, longjmp on error.
10210 *
10211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10212 * @param iSegReg The index of the segment register to use for
10213 * this access. The base and limits are checked.
10214 * @param GCPtrMem The address of the guest memory.
10215 * @param pu256Value Pointer to the value to store.
10216 */
10217IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10218{
10219 /* The lazy approach for now... */
10220 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10221 pu256Dst->au64[0] = pu256Value->au64[0];
10222 pu256Dst->au64[1] = pu256Value->au64[1];
10223 pu256Dst->au64[2] = pu256Value->au64[2];
10224 pu256Dst->au64[3] = pu256Value->au64[3];
10225 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10226}
10227#endif
10228
10229
10230/**
10231 * Stores a data dqword, AVX aligned.
10232 *
10233 * @returns Strict VBox status code.
10234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10235 * @param iSegReg The index of the segment register to use for
10236 * this access. The base and limits are checked.
10237 * @param GCPtrMem The address of the guest memory.
10238 * @param pu256Value Pointer to the value to store.
10239 */
10240IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10241{
10242 /* The lazy approach for now... */
10243 if (GCPtrMem & 31)
10244 return iemRaiseGeneralProtectionFault0(pVCpu);
10245
10246 PRTUINT256U pu256Dst;
10247 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10248 if (rc == VINF_SUCCESS)
10249 {
10250 pu256Dst->au64[0] = pu256Value->au64[0];
10251 pu256Dst->au64[1] = pu256Value->au64[1];
10252 pu256Dst->au64[2] = pu256Value->au64[2];
10253 pu256Dst->au64[3] = pu256Value->au64[3];
10254 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10255 }
10256 return rc;
10257}
10258
10259
10260#ifdef IEM_WITH_SETJMP
10261/**
10262 * Stores a data dqword, AVX aligned.
10263 *
10264 * @returns Strict VBox status code.
10265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10266 * @param iSegReg The index of the segment register to use for
10267 * this access. The base and limits are checked.
10268 * @param GCPtrMem The address of the guest memory.
10269 * @param pu256Value Pointer to the value to store.
10270 */
10271DECL_NO_INLINE(IEM_STATIC, void)
10272iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10273{
10274 /* The lazy approach for now... */
10275 if ((GCPtrMem & 31) == 0)
10276 {
10277 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10278 pu256Dst->au64[0] = pu256Value->au64[0];
10279 pu256Dst->au64[1] = pu256Value->au64[1];
10280 pu256Dst->au64[2] = pu256Value->au64[2];
10281 pu256Dst->au64[3] = pu256Value->au64[3];
10282 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10283 return;
10284 }
10285
10286 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10287 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10288}
10289#endif
10290
10291
10292/**
10293 * Stores a descriptor register (sgdt, sidt).
10294 *
10295 * @returns Strict VBox status code.
10296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10297 * @param cbLimit The limit.
10298 * @param GCPtrBase The base address.
10299 * @param iSegReg The index of the segment register to use for
10300 * this access. The base and limits are checked.
10301 * @param GCPtrMem The address of the guest memory.
10302 */
10303IEM_STATIC VBOXSTRICTRC
10304iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10305{
10306 /*
10307 * The SIDT and SGDT instructions actually stores the data using two
10308 * independent writes. The instructions does not respond to opsize prefixes.
10309 */
10310 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10311 if (rcStrict == VINF_SUCCESS)
10312 {
10313 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10314 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10315 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10316 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10317 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10318 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10319 else
10320 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10321 }
10322 return rcStrict;
10323}
10324
10325
10326/**
10327 * Pushes a word onto the stack.
10328 *
10329 * @returns Strict VBox status code.
10330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10331 * @param u16Value The value to push.
10332 */
10333IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10334{
10335 /* Increment the stack pointer. */
10336 uint64_t uNewRsp;
10337 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10338
10339 /* Write the word the lazy way. */
10340 uint16_t *pu16Dst;
10341 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10342 if (rc == VINF_SUCCESS)
10343 {
10344 *pu16Dst = u16Value;
10345 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10346 }
10347
10348 /* Commit the new RSP value unless we an access handler made trouble. */
10349 if (rc == VINF_SUCCESS)
10350 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10351
10352 return rc;
10353}
10354
10355
10356/**
10357 * Pushes a dword onto the stack.
10358 *
10359 * @returns Strict VBox status code.
10360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10361 * @param u32Value The value to push.
10362 */
10363IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10364{
10365 /* Increment the stack pointer. */
10366 uint64_t uNewRsp;
10367 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10368
10369 /* Write the dword the lazy way. */
10370 uint32_t *pu32Dst;
10371 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10372 if (rc == VINF_SUCCESS)
10373 {
10374 *pu32Dst = u32Value;
10375 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10376 }
10377
10378 /* Commit the new RSP value unless we an access handler made trouble. */
10379 if (rc == VINF_SUCCESS)
10380 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10381
10382 return rc;
10383}
10384
10385
10386/**
10387 * Pushes a dword segment register value onto the stack.
10388 *
10389 * @returns Strict VBox status code.
10390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10391 * @param u32Value The value to push.
10392 */
10393IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10394{
10395 /* Increment the stack pointer. */
10396 uint64_t uNewRsp;
10397 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10398
10399 /* The intel docs talks about zero extending the selector register
10400 value. My actual intel CPU here might be zero extending the value
10401 but it still only writes the lower word... */
10402 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10403 * happens when crossing an electric page boundrary, is the high word checked
10404 * for write accessibility or not? Probably it is. What about segment limits?
10405 * It appears this behavior is also shared with trap error codes.
10406 *
10407 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10408 * ancient hardware when it actually did change. */
10409 uint16_t *pu16Dst;
10410 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10411 if (rc == VINF_SUCCESS)
10412 {
10413 *pu16Dst = (uint16_t)u32Value;
10414 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10415 }
10416
10417 /* Commit the new RSP value unless we an access handler made trouble. */
10418 if (rc == VINF_SUCCESS)
10419 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10420
10421 return rc;
10422}
10423
10424
10425/**
10426 * Pushes a qword onto the stack.
10427 *
10428 * @returns Strict VBox status code.
10429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10430 * @param u64Value The value to push.
10431 */
10432IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10433{
10434 /* Increment the stack pointer. */
10435 uint64_t uNewRsp;
10436 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10437
10438 /* Write the word the lazy way. */
10439 uint64_t *pu64Dst;
10440 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10441 if (rc == VINF_SUCCESS)
10442 {
10443 *pu64Dst = u64Value;
10444 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10445 }
10446
10447 /* Commit the new RSP value unless we an access handler made trouble. */
10448 if (rc == VINF_SUCCESS)
10449 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10450
10451 return rc;
10452}
10453
10454
10455/**
10456 * Pops a word from the stack.
10457 *
10458 * @returns Strict VBox status code.
10459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10460 * @param pu16Value Where to store the popped value.
10461 */
10462IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10463{
10464 /* Increment the stack pointer. */
10465 uint64_t uNewRsp;
10466 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10467
10468 /* Write the word the lazy way. */
10469 uint16_t const *pu16Src;
10470 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10471 if (rc == VINF_SUCCESS)
10472 {
10473 *pu16Value = *pu16Src;
10474 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10475
10476 /* Commit the new RSP value. */
10477 if (rc == VINF_SUCCESS)
10478 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10479 }
10480
10481 return rc;
10482}
10483
10484
10485/**
10486 * Pops a dword from the stack.
10487 *
10488 * @returns Strict VBox status code.
10489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10490 * @param pu32Value Where to store the popped value.
10491 */
10492IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10493{
10494 /* Increment the stack pointer. */
10495 uint64_t uNewRsp;
10496 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10497
10498 /* Write the word the lazy way. */
10499 uint32_t const *pu32Src;
10500 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10501 if (rc == VINF_SUCCESS)
10502 {
10503 *pu32Value = *pu32Src;
10504 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10505
10506 /* Commit the new RSP value. */
10507 if (rc == VINF_SUCCESS)
10508 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10509 }
10510
10511 return rc;
10512}
10513
10514
10515/**
10516 * Pops a qword from the stack.
10517 *
10518 * @returns Strict VBox status code.
10519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10520 * @param pu64Value Where to store the popped value.
10521 */
10522IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
10523{
10524 /* Increment the stack pointer. */
10525 uint64_t uNewRsp;
10526 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10527
10528 /* Write the word the lazy way. */
10529 uint64_t const *pu64Src;
10530 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10531 if (rc == VINF_SUCCESS)
10532 {
10533 *pu64Value = *pu64Src;
10534 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10535
10536 /* Commit the new RSP value. */
10537 if (rc == VINF_SUCCESS)
10538 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10539 }
10540
10541 return rc;
10542}
10543
10544
10545/**
10546 * Pushes a word onto the stack, using a temporary stack pointer.
10547 *
10548 * @returns Strict VBox status code.
10549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10550 * @param u16Value The value to push.
10551 * @param pTmpRsp Pointer to the temporary stack pointer.
10552 */
10553IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10554{
10555 /* Increment the stack pointer. */
10556 RTUINT64U NewRsp = *pTmpRsp;
10557 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10558
10559 /* Write the word the lazy way. */
10560 uint16_t *pu16Dst;
10561 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10562 if (rc == VINF_SUCCESS)
10563 {
10564 *pu16Dst = u16Value;
10565 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10566 }
10567
10568 /* Commit the new RSP value unless we an access handler made trouble. */
10569 if (rc == VINF_SUCCESS)
10570 *pTmpRsp = NewRsp;
10571
10572 return rc;
10573}
10574
10575
10576/**
10577 * Pushes a dword onto the stack, using a temporary stack pointer.
10578 *
10579 * @returns Strict VBox status code.
10580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10581 * @param u32Value The value to push.
10582 * @param pTmpRsp Pointer to the temporary stack pointer.
10583 */
10584IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10585{
10586 /* Increment the stack pointer. */
10587 RTUINT64U NewRsp = *pTmpRsp;
10588 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10589
10590 /* Write the word the lazy way. */
10591 uint32_t *pu32Dst;
10592 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10593 if (rc == VINF_SUCCESS)
10594 {
10595 *pu32Dst = u32Value;
10596 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10597 }
10598
10599 /* Commit the new RSP value unless we an access handler made trouble. */
10600 if (rc == VINF_SUCCESS)
10601 *pTmpRsp = NewRsp;
10602
10603 return rc;
10604}
10605
10606
10607/**
10608 * Pushes a dword onto the stack, using a temporary stack pointer.
10609 *
10610 * @returns Strict VBox status code.
10611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10612 * @param u64Value The value to push.
10613 * @param pTmpRsp Pointer to the temporary stack pointer.
10614 */
10615IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10616{
10617 /* Increment the stack pointer. */
10618 RTUINT64U NewRsp = *pTmpRsp;
10619 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10620
10621 /* Write the word the lazy way. */
10622 uint64_t *pu64Dst;
10623 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10624 if (rc == VINF_SUCCESS)
10625 {
10626 *pu64Dst = u64Value;
10627 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10628 }
10629
10630 /* Commit the new RSP value unless we an access handler made trouble. */
10631 if (rc == VINF_SUCCESS)
10632 *pTmpRsp = NewRsp;
10633
10634 return rc;
10635}
10636
10637
10638/**
10639 * Pops a word from the stack, using a temporary stack pointer.
10640 *
10641 * @returns Strict VBox status code.
10642 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10643 * @param pu16Value Where to store the popped value.
10644 * @param pTmpRsp Pointer to the temporary stack pointer.
10645 */
10646IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10647{
10648 /* Increment the stack pointer. */
10649 RTUINT64U NewRsp = *pTmpRsp;
10650 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10651
10652 /* Write the word the lazy way. */
10653 uint16_t const *pu16Src;
10654 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10655 if (rc == VINF_SUCCESS)
10656 {
10657 *pu16Value = *pu16Src;
10658 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10659
10660 /* Commit the new RSP value. */
10661 if (rc == VINF_SUCCESS)
10662 *pTmpRsp = NewRsp;
10663 }
10664
10665 return rc;
10666}
10667
10668
10669/**
10670 * Pops a dword from the stack, using a temporary stack pointer.
10671 *
10672 * @returns Strict VBox status code.
10673 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10674 * @param pu32Value Where to store the popped value.
10675 * @param pTmpRsp Pointer to the temporary stack pointer.
10676 */
10677IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10678{
10679 /* Increment the stack pointer. */
10680 RTUINT64U NewRsp = *pTmpRsp;
10681 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10682
10683 /* Write the word the lazy way. */
10684 uint32_t const *pu32Src;
10685 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10686 if (rc == VINF_SUCCESS)
10687 {
10688 *pu32Value = *pu32Src;
10689 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10690
10691 /* Commit the new RSP value. */
10692 if (rc == VINF_SUCCESS)
10693 *pTmpRsp = NewRsp;
10694 }
10695
10696 return rc;
10697}
10698
10699
10700/**
10701 * Pops a qword from the stack, using a temporary stack pointer.
10702 *
10703 * @returns Strict VBox status code.
10704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10705 * @param pu64Value Where to store the popped value.
10706 * @param pTmpRsp Pointer to the temporary stack pointer.
10707 */
10708IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10709{
10710 /* Increment the stack pointer. */
10711 RTUINT64U NewRsp = *pTmpRsp;
10712 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10713
10714 /* Write the word the lazy way. */
10715 uint64_t const *pu64Src;
10716 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10717 if (rcStrict == VINF_SUCCESS)
10718 {
10719 *pu64Value = *pu64Src;
10720 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10721
10722 /* Commit the new RSP value. */
10723 if (rcStrict == VINF_SUCCESS)
10724 *pTmpRsp = NewRsp;
10725 }
10726
10727 return rcStrict;
10728}
10729
10730
10731/**
10732 * Begin a special stack push (used by interrupt, exceptions and such).
10733 *
10734 * This will raise \#SS or \#PF if appropriate.
10735 *
10736 * @returns Strict VBox status code.
10737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10738 * @param cbMem The number of bytes to push onto the stack.
10739 * @param ppvMem Where to return the pointer to the stack memory.
10740 * As with the other memory functions this could be
10741 * direct access or bounce buffered access, so
10742 * don't commit register until the commit call
10743 * succeeds.
10744 * @param puNewRsp Where to return the new RSP value. This must be
10745 * passed unchanged to
10746 * iemMemStackPushCommitSpecial().
10747 */
10748IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10749{
10750 Assert(cbMem < UINT8_MAX);
10751 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10752 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10753}
10754
10755
10756/**
10757 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10758 *
10759 * This will update the rSP.
10760 *
10761 * @returns Strict VBox status code.
10762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10763 * @param pvMem The pointer returned by
10764 * iemMemStackPushBeginSpecial().
10765 * @param uNewRsp The new RSP value returned by
10766 * iemMemStackPushBeginSpecial().
10767 */
10768IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
10769{
10770 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10771 if (rcStrict == VINF_SUCCESS)
10772 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10773 return rcStrict;
10774}
10775
10776
10777/**
10778 * Begin a special stack pop (used by iret, retf and such).
10779 *
10780 * This will raise \#SS or \#PF if appropriate.
10781 *
10782 * @returns Strict VBox status code.
10783 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10784 * @param cbMem The number of bytes to pop from the stack.
10785 * @param ppvMem Where to return the pointer to the stack memory.
10786 * @param puNewRsp Where to return the new RSP value. This must be
10787 * assigned to CPUMCTX::rsp manually some time
10788 * after iemMemStackPopDoneSpecial() has been
10789 * called.
10790 */
10791IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10792{
10793 Assert(cbMem < UINT8_MAX);
10794 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10795 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10796}
10797
10798
10799/**
10800 * Continue a special stack pop (used by iret and retf).
10801 *
10802 * This will raise \#SS or \#PF if appropriate.
10803 *
10804 * @returns Strict VBox status code.
10805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10806 * @param cbMem The number of bytes to pop from the stack.
10807 * @param ppvMem Where to return the pointer to the stack memory.
10808 * @param puNewRsp Where to return the new RSP value. This must be
10809 * assigned to CPUMCTX::rsp manually some time
10810 * after iemMemStackPopDoneSpecial() has been
10811 * called.
10812 */
10813IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10814{
10815 Assert(cbMem < UINT8_MAX);
10816 RTUINT64U NewRsp;
10817 NewRsp.u = *puNewRsp;
10818 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10819 *puNewRsp = NewRsp.u;
10820 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10821}
10822
10823
10824/**
10825 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10826 * iemMemStackPopContinueSpecial).
10827 *
10828 * The caller will manually commit the rSP.
10829 *
10830 * @returns Strict VBox status code.
10831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10832 * @param pvMem The pointer returned by
10833 * iemMemStackPopBeginSpecial() or
10834 * iemMemStackPopContinueSpecial().
10835 */
10836IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
10837{
10838 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10839}
10840
10841
10842/**
10843 * Fetches a system table byte.
10844 *
10845 * @returns Strict VBox status code.
10846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10847 * @param pbDst Where to return the byte.
10848 * @param iSegReg The index of the segment register to use for
10849 * this access. The base and limits are checked.
10850 * @param GCPtrMem The address of the guest memory.
10851 */
10852IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10853{
10854 /* The lazy approach for now... */
10855 uint8_t const *pbSrc;
10856 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10857 if (rc == VINF_SUCCESS)
10858 {
10859 *pbDst = *pbSrc;
10860 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10861 }
10862 return rc;
10863}
10864
10865
10866/**
10867 * Fetches a system table word.
10868 *
10869 * @returns Strict VBox status code.
10870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10871 * @param pu16Dst Where to return the word.
10872 * @param iSegReg The index of the segment register to use for
10873 * this access. The base and limits are checked.
10874 * @param GCPtrMem The address of the guest memory.
10875 */
10876IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10877{
10878 /* The lazy approach for now... */
10879 uint16_t const *pu16Src;
10880 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10881 if (rc == VINF_SUCCESS)
10882 {
10883 *pu16Dst = *pu16Src;
10884 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10885 }
10886 return rc;
10887}
10888
10889
10890/**
10891 * Fetches a system table dword.
10892 *
10893 * @returns Strict VBox status code.
10894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10895 * @param pu32Dst Where to return the dword.
10896 * @param iSegReg The index of the segment register to use for
10897 * this access. The base and limits are checked.
10898 * @param GCPtrMem The address of the guest memory.
10899 */
10900IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10901{
10902 /* The lazy approach for now... */
10903 uint32_t const *pu32Src;
10904 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10905 if (rc == VINF_SUCCESS)
10906 {
10907 *pu32Dst = *pu32Src;
10908 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10909 }
10910 return rc;
10911}
10912
10913
10914/**
10915 * Fetches a system table qword.
10916 *
10917 * @returns Strict VBox status code.
10918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10919 * @param pu64Dst Where to return the qword.
10920 * @param iSegReg The index of the segment register to use for
10921 * this access. The base and limits are checked.
10922 * @param GCPtrMem The address of the guest memory.
10923 */
10924IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10925{
10926 /* The lazy approach for now... */
10927 uint64_t const *pu64Src;
10928 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10929 if (rc == VINF_SUCCESS)
10930 {
10931 *pu64Dst = *pu64Src;
10932 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10933 }
10934 return rc;
10935}
10936
10937
10938/**
10939 * Fetches a descriptor table entry with caller specified error code.
10940 *
10941 * @returns Strict VBox status code.
10942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10943 * @param pDesc Where to return the descriptor table entry.
10944 * @param uSel The selector which table entry to fetch.
10945 * @param uXcpt The exception to raise on table lookup error.
10946 * @param uErrorCode The error code associated with the exception.
10947 */
10948IEM_STATIC VBOXSTRICTRC
10949iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10950{
10951 AssertPtr(pDesc);
10952 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10953
10954 /** @todo did the 286 require all 8 bytes to be accessible? */
10955 /*
10956 * Get the selector table base and check bounds.
10957 */
10958 RTGCPTR GCPtrBase;
10959 if (uSel & X86_SEL_LDT)
10960 {
10961 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10962 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10963 {
10964 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10965 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10966 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10967 uErrorCode, 0);
10968 }
10969
10970 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10971 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10972 }
10973 else
10974 {
10975 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10976 {
10977 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10978 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10979 uErrorCode, 0);
10980 }
10981 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10982 }
10983
10984 /*
10985 * Read the legacy descriptor and maybe the long mode extensions if
10986 * required.
10987 */
10988 VBOXSTRICTRC rcStrict;
10989 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10990 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10991 else
10992 {
10993 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10994 if (rcStrict == VINF_SUCCESS)
10995 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10996 if (rcStrict == VINF_SUCCESS)
10997 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10998 if (rcStrict == VINF_SUCCESS)
10999 pDesc->Legacy.au16[3] = 0;
11000 else
11001 return rcStrict;
11002 }
11003
11004 if (rcStrict == VINF_SUCCESS)
11005 {
11006 if ( !IEM_IS_LONG_MODE(pVCpu)
11007 || pDesc->Legacy.Gen.u1DescType)
11008 pDesc->Long.au64[1] = 0;
11009 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11010 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11011 else
11012 {
11013 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11014 /** @todo is this the right exception? */
11015 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11016 }
11017 }
11018 return rcStrict;
11019}
11020
11021
11022/**
11023 * Fetches a descriptor table entry.
11024 *
11025 * @returns Strict VBox status code.
11026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11027 * @param pDesc Where to return the descriptor table entry.
11028 * @param uSel The selector which table entry to fetch.
11029 * @param uXcpt The exception to raise on table lookup error.
11030 */
11031IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11032{
11033 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11034}
11035
11036
11037/**
11038 * Fakes a long mode stack selector for SS = 0.
11039 *
11040 * @param pDescSs Where to return the fake stack descriptor.
11041 * @param uDpl The DPL we want.
11042 */
11043IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11044{
11045 pDescSs->Long.au64[0] = 0;
11046 pDescSs->Long.au64[1] = 0;
11047 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11048 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11049 pDescSs->Long.Gen.u2Dpl = uDpl;
11050 pDescSs->Long.Gen.u1Present = 1;
11051 pDescSs->Long.Gen.u1Long = 1;
11052}
11053
11054
11055/**
11056 * Marks the selector descriptor as accessed (only non-system descriptors).
11057 *
11058 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11059 * will therefore skip the limit checks.
11060 *
11061 * @returns Strict VBox status code.
11062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11063 * @param uSel The selector.
11064 */
11065IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
11066{
11067 /*
11068 * Get the selector table base and calculate the entry address.
11069 */
11070 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11071 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11072 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11073 GCPtr += uSel & X86_SEL_MASK;
11074
11075 /*
11076 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11077 * ugly stuff to avoid this. This will make sure it's an atomic access
11078 * as well more or less remove any question about 8-bit or 32-bit accesss.
11079 */
11080 VBOXSTRICTRC rcStrict;
11081 uint32_t volatile *pu32;
11082 if ((GCPtr & 3) == 0)
11083 {
11084 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11085 GCPtr += 2 + 2;
11086 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11087 if (rcStrict != VINF_SUCCESS)
11088 return rcStrict;
11089 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11090 }
11091 else
11092 {
11093 /* The misaligned GDT/LDT case, map the whole thing. */
11094 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11095 if (rcStrict != VINF_SUCCESS)
11096 return rcStrict;
11097 switch ((uintptr_t)pu32 & 3)
11098 {
11099 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11100 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11101 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11102 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11103 }
11104 }
11105
11106 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11107}
11108
11109/** @} */
11110
11111
11112/*
11113 * Include the C/C++ implementation of instruction.
11114 */
11115#include "IEMAllCImpl.cpp.h"
11116
11117
11118
11119/** @name "Microcode" macros.
11120 *
11121 * The idea is that we should be able to use the same code to interpret
11122 * instructions as well as recompiler instructions. Thus this obfuscation.
11123 *
11124 * @{
11125 */
11126#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11127#define IEM_MC_END() }
11128#define IEM_MC_PAUSE() do {} while (0)
11129#define IEM_MC_CONTINUE() do {} while (0)
11130
11131/** Internal macro. */
11132#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11133 do \
11134 { \
11135 VBOXSTRICTRC rcStrict2 = a_Expr; \
11136 if (rcStrict2 != VINF_SUCCESS) \
11137 return rcStrict2; \
11138 } while (0)
11139
11140
11141#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11142#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11143#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11144#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11145#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11146#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11147#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11148#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11149#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11150 do { \
11151 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11152 return iemRaiseDeviceNotAvailable(pVCpu); \
11153 } while (0)
11154#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11155 do { \
11156 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11157 return iemRaiseDeviceNotAvailable(pVCpu); \
11158 } while (0)
11159#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11160 do { \
11161 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
11162 return iemRaiseMathFault(pVCpu); \
11163 } while (0)
11164#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11165 do { \
11166 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11167 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11168 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11169 return iemRaiseUndefinedOpcode(pVCpu); \
11170 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11171 return iemRaiseDeviceNotAvailable(pVCpu); \
11172 } while (0)
11173#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11174 do { \
11175 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11176 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11177 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11178 return iemRaiseUndefinedOpcode(pVCpu); \
11179 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11180 return iemRaiseDeviceNotAvailable(pVCpu); \
11181 } while (0)
11182#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11183 do { \
11184 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11185 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11186 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11187 return iemRaiseUndefinedOpcode(pVCpu); \
11188 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11189 return iemRaiseDeviceNotAvailable(pVCpu); \
11190 } while (0)
11191#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11192 do { \
11193 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11194 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11195 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11196 return iemRaiseUndefinedOpcode(pVCpu); \
11197 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11198 return iemRaiseDeviceNotAvailable(pVCpu); \
11199 } while (0)
11200#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11201 do { \
11202 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11203 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11204 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11205 return iemRaiseUndefinedOpcode(pVCpu); \
11206 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11207 return iemRaiseDeviceNotAvailable(pVCpu); \
11208 } while (0)
11209#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11210 do { \
11211 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11212 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11213 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11214 return iemRaiseUndefinedOpcode(pVCpu); \
11215 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11216 return iemRaiseDeviceNotAvailable(pVCpu); \
11217 } while (0)
11218#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11219 do { \
11220 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11221 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11222 return iemRaiseUndefinedOpcode(pVCpu); \
11223 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11224 return iemRaiseDeviceNotAvailable(pVCpu); \
11225 } while (0)
11226#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11227 do { \
11228 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11229 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11230 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11231 return iemRaiseUndefinedOpcode(pVCpu); \
11232 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11233 return iemRaiseDeviceNotAvailable(pVCpu); \
11234 } while (0)
11235#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11236 do { \
11237 if (pVCpu->iem.s.uCpl != 0) \
11238 return iemRaiseGeneralProtectionFault0(pVCpu); \
11239 } while (0)
11240#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11241 do { \
11242 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11243 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11244 } while (0)
11245#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11246 do { \
11247 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11248 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11249 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11250 return iemRaiseUndefinedOpcode(pVCpu); \
11251 } while (0)
11252#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11253 do { \
11254 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11255 return iemRaiseGeneralProtectionFault0(pVCpu); \
11256 } while (0)
11257
11258
11259#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11260#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11261#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11262#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11263#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11264#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11265#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11266 uint32_t a_Name; \
11267 uint32_t *a_pName = &a_Name
11268#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11269 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11270
11271#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11272#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11273
11274#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11275#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11276#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11277#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11278#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11279#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11280#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11281#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11282#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11283#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11284#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11285#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11286#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11287#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11288#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11289#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11290#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11291#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11292 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11293 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11294 } while (0)
11295#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11296 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11297 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11298 } while (0)
11299#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11300 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11301 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11302 } while (0)
11303/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11304#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11305 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11306 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11307 } while (0)
11308#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11309 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11310 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11311 } while (0)
11312/** @note Not for IOPL or IF testing or modification. */
11313#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11314#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11315#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
11316#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
11317
11318#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11319#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11320#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11321#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11322#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11323#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11324#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11325#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11326#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11327#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11328/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11329#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11330 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11331 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11332 } while (0)
11333#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11334 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11335 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11336 } while (0)
11337#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11338 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11339
11340
11341#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11342#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11343/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11344 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11345#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11346#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11347/** @note Not for IOPL or IF testing or modification. */
11348#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11349
11350#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11351#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11352#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11353 do { \
11354 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11355 *pu32Reg += (a_u32Value); \
11356 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11357 } while (0)
11358#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11359
11360#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11361#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11362#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11363 do { \
11364 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11365 *pu32Reg -= (a_u32Value); \
11366 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11367 } while (0)
11368#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11369#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11370
11371#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11372#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11373#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11374#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11375#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11376#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11377#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11378
11379#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11380#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11381#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11382#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11383
11384#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11385#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11386#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11387
11388#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11389#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11390#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11391
11392#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11393#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11394#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11395
11396#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11397#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11398#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11399
11400#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11401
11402#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11403
11404#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11405#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11406#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11407 do { \
11408 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11409 *pu32Reg &= (a_u32Value); \
11410 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11411 } while (0)
11412#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11413
11414#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11415#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11416#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11417 do { \
11418 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11419 *pu32Reg |= (a_u32Value); \
11420 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11421 } while (0)
11422#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11423
11424
11425/** @note Not for IOPL or IF modification. */
11426#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11427/** @note Not for IOPL or IF modification. */
11428#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11429/** @note Not for IOPL or IF modification. */
11430#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11431
11432#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11433
11434/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11435#define IEM_MC_FPU_TO_MMX_MODE() do { \
11436 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
11437 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
11438 } while (0)
11439
11440/** Switches the FPU state from MMX mode (FTW=0xffff). */
11441#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11442 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
11443 } while (0)
11444
11445#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11446 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
11447#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11448 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11449#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11450 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11451 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11452 } while (0)
11453#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11454 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11455 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11456 } while (0)
11457#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11458 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11459#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11460 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11461#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11462 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11463
11464#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11465 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
11466 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
11467 } while (0)
11468#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11469 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11470#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11471 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11472#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11473 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11474#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11475 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11476 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11477 } while (0)
11478#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11479 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11480#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11481 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11482 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11483 } while (0)
11484#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11485 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11486#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11487 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11488 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11489 } while (0)
11490#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11491 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11492#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11493 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11494#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11495 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11496#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11497 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
11498#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11499 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
11500 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
11501 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
11502 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
11503 } while (0)
11504
11505#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11506 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11507 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
11508 } while (0)
11509#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11510 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11511 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11512 } while (0)
11513#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11514 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11515 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11516 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11517 } while (0)
11518#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11519 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11520 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11521 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11522 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11523 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11524 } while (0)
11525
11526#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11527#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11528 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11529 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11530 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11531 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11532 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11533 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11534 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11535 } while (0)
11536#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11537 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11538 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11539 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11540 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11541 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11542 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11543 } while (0)
11544#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11545 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11546 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11547 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11548 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11549 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11550 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11551 } while (0)
11552#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11553 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11554 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11555 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11556 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11557 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11558 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11559 } while (0)
11560
11561#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11562 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11563#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11564 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11565#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11566 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
11567#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11568 do { uintptr_t const iYRegTmp = (a_iYReg); \
11569 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11570 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11571 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
11572 } while (0)
11573
11574#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11575 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11576 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11577 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11578 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11579 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11580 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11581 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11582 } while (0)
11583#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11584 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11585 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11586 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11587 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11588 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11589 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11590 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11591 } while (0)
11592#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11593 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11594 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11595 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11596 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11597 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11598 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11599 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11600 } while (0)
11601
11602#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11603 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11604 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11605 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11606 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11607 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11608 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11609 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11610 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11611 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11612 } while (0)
11613#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11614 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11615 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11616 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11617 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11618 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11619 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11620 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11621 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11622 } while (0)
11623#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11624 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11625 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11626 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11627 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11628 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11629 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11630 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11631 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11632 } while (0)
11633#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11634 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11635 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11636 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11637 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11638 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11639 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11640 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11641 } while (0)
11642
11643#ifndef IEM_WITH_SETJMP
11644# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11645 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11646# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11647 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11648# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11650#else
11651# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11652 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11653# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11654 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11655# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11656 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11657#endif
11658
11659#ifndef IEM_WITH_SETJMP
11660# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11662# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11664# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11666#else
11667# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11668 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11669# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11670 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11671# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11672 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11673#endif
11674
11675#ifndef IEM_WITH_SETJMP
11676# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11678# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11680# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11682#else
11683# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11684 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11685# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11686 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11687# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11688 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11689#endif
11690
11691#ifdef SOME_UNUSED_FUNCTION
11692# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11694#endif
11695
11696#ifndef IEM_WITH_SETJMP
11697# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11698 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11699# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11700 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11701# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11702 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11703# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11705#else
11706# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11707 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11708# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11709 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11710# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11711 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11712# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11713 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11714#endif
11715
11716#ifndef IEM_WITH_SETJMP
11717# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11718 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11719# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11721# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11723#else
11724# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11725 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11726# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11727 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11728# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11729 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11730#endif
11731
11732#ifndef IEM_WITH_SETJMP
11733# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11734 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11735# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11736 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11737#else
11738# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11739 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11740# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11741 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11742#endif
11743
11744#ifndef IEM_WITH_SETJMP
11745# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11747# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11749#else
11750# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11751 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11752# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11753 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11754#endif
11755
11756
11757
11758#ifndef IEM_WITH_SETJMP
11759# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11760 do { \
11761 uint8_t u8Tmp; \
11762 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11763 (a_u16Dst) = u8Tmp; \
11764 } while (0)
11765# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11766 do { \
11767 uint8_t u8Tmp; \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11769 (a_u32Dst) = u8Tmp; \
11770 } while (0)
11771# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11772 do { \
11773 uint8_t u8Tmp; \
11774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11775 (a_u64Dst) = u8Tmp; \
11776 } while (0)
11777# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11778 do { \
11779 uint16_t u16Tmp; \
11780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11781 (a_u32Dst) = u16Tmp; \
11782 } while (0)
11783# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11784 do { \
11785 uint16_t u16Tmp; \
11786 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11787 (a_u64Dst) = u16Tmp; \
11788 } while (0)
11789# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11790 do { \
11791 uint32_t u32Tmp; \
11792 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11793 (a_u64Dst) = u32Tmp; \
11794 } while (0)
11795#else /* IEM_WITH_SETJMP */
11796# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11797 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11798# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11799 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11800# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11801 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11802# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11803 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11804# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11805 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11806# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11807 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11808#endif /* IEM_WITH_SETJMP */
11809
11810#ifndef IEM_WITH_SETJMP
11811# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11812 do { \
11813 uint8_t u8Tmp; \
11814 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11815 (a_u16Dst) = (int8_t)u8Tmp; \
11816 } while (0)
11817# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11818 do { \
11819 uint8_t u8Tmp; \
11820 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11821 (a_u32Dst) = (int8_t)u8Tmp; \
11822 } while (0)
11823# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11824 do { \
11825 uint8_t u8Tmp; \
11826 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11827 (a_u64Dst) = (int8_t)u8Tmp; \
11828 } while (0)
11829# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11830 do { \
11831 uint16_t u16Tmp; \
11832 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11833 (a_u32Dst) = (int16_t)u16Tmp; \
11834 } while (0)
11835# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11836 do { \
11837 uint16_t u16Tmp; \
11838 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11839 (a_u64Dst) = (int16_t)u16Tmp; \
11840 } while (0)
11841# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11842 do { \
11843 uint32_t u32Tmp; \
11844 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11845 (a_u64Dst) = (int32_t)u32Tmp; \
11846 } while (0)
11847#else /* IEM_WITH_SETJMP */
11848# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11849 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11850# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11851 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11852# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11853 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11854# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11855 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11856# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11857 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11858# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11859 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11860#endif /* IEM_WITH_SETJMP */
11861
11862#ifndef IEM_WITH_SETJMP
11863# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11864 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11865# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11866 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11867# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11868 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11869# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11870 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11871#else
11872# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11873 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11874# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11875 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11876# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11877 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11878# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11879 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11880#endif
11881
11882#ifndef IEM_WITH_SETJMP
11883# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11884 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11885# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11886 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11887# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11888 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11889# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11890 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11891#else
11892# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11893 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11894# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11895 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11896# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11897 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11898# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11899 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11900#endif
11901
11902#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11903#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11904#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11905#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11906#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11907#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11908#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11909 do { \
11910 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11911 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11912 } while (0)
11913#define IEM_MC_STORE_MEM_INDEF_D80_BY_REF(a_pd80Dst) \
11914 do { \
11915 (a_pd80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11916 (a_pd80Dst)->au16[4] = UINT16_C(0xffff); \
11917 } while (0)
11918
11919#ifndef IEM_WITH_SETJMP
11920# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11921 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11922# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11923 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11924#else
11925# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11926 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11927# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11928 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11929#endif
11930
11931#ifndef IEM_WITH_SETJMP
11932# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11933 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11934# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11935 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11936#else
11937# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11938 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11939# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11940 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11941#endif
11942
11943
11944#define IEM_MC_PUSH_U16(a_u16Value) \
11945 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11946#define IEM_MC_PUSH_U32(a_u32Value) \
11947 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11948#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11949 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11950#define IEM_MC_PUSH_U64(a_u64Value) \
11951 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11952
11953#define IEM_MC_POP_U16(a_pu16Value) \
11954 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11955#define IEM_MC_POP_U32(a_pu32Value) \
11956 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11957#define IEM_MC_POP_U64(a_pu64Value) \
11958 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11959
11960/** Maps guest memory for direct or bounce buffered access.
11961 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11962 * @remarks May return.
11963 */
11964#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11965 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11966
11967/** Maps guest memory for direct or bounce buffered access.
11968 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11969 * @remarks May return.
11970 */
11971#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11972 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11973
11974/** Commits the memory and unmaps the guest memory.
11975 * @remarks May return.
11976 */
11977#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11978 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11979
11980/** Commits the memory and unmaps the guest memory unless the FPU status word
11981 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11982 * that would cause FLD not to store.
11983 *
11984 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11985 * store, while \#P will not.
11986 *
11987 * @remarks May in theory return - for now.
11988 */
11989#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11990 do { \
11991 if ( !(a_u16FSW & X86_FSW_ES) \
11992 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11993 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
11994 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11995 } while (0)
11996
11997/** Calculate efficient address from R/M. */
11998#ifndef IEM_WITH_SETJMP
11999# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12000 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12001#else
12002# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12003 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12004#endif
12005
12006#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12007#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12008#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12009#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12010#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12011#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12012#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12013
12014/**
12015 * Defers the rest of the instruction emulation to a C implementation routine
12016 * and returns, only taking the standard parameters.
12017 *
12018 * @param a_pfnCImpl The pointer to the C routine.
12019 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12020 */
12021#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12022
12023/**
12024 * Defers the rest of instruction emulation to a C implementation routine and
12025 * returns, taking one argument in addition to the standard ones.
12026 *
12027 * @param a_pfnCImpl The pointer to the C routine.
12028 * @param a0 The argument.
12029 */
12030#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12031
12032/**
12033 * Defers the rest of the instruction emulation to a C implementation routine
12034 * and returns, taking two arguments in addition to the standard ones.
12035 *
12036 * @param a_pfnCImpl The pointer to the C routine.
12037 * @param a0 The first extra argument.
12038 * @param a1 The second extra argument.
12039 */
12040#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12041
12042/**
12043 * Defers the rest of the instruction emulation to a C implementation routine
12044 * and returns, taking three arguments in addition to the standard ones.
12045 *
12046 * @param a_pfnCImpl The pointer to the C routine.
12047 * @param a0 The first extra argument.
12048 * @param a1 The second extra argument.
12049 * @param a2 The third extra argument.
12050 */
12051#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12052
12053/**
12054 * Defers the rest of the instruction emulation to a C implementation routine
12055 * and returns, taking four arguments in addition to the standard ones.
12056 *
12057 * @param a_pfnCImpl The pointer to the C routine.
12058 * @param a0 The first extra argument.
12059 * @param a1 The second extra argument.
12060 * @param a2 The third extra argument.
12061 * @param a3 The fourth extra argument.
12062 */
12063#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12064
12065/**
12066 * Defers the rest of the instruction emulation to a C implementation routine
12067 * and returns, taking two arguments in addition to the standard ones.
12068 *
12069 * @param a_pfnCImpl The pointer to the C routine.
12070 * @param a0 The first extra argument.
12071 * @param a1 The second extra argument.
12072 * @param a2 The third extra argument.
12073 * @param a3 The fourth extra argument.
12074 * @param a4 The fifth extra argument.
12075 */
12076#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12077
12078/**
12079 * Defers the entire instruction emulation to a C implementation routine and
12080 * returns, only taking the standard parameters.
12081 *
12082 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12083 *
12084 * @param a_pfnCImpl The pointer to the C routine.
12085 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12086 */
12087#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12088
12089/**
12090 * Defers the entire instruction emulation to a C implementation routine and
12091 * returns, taking one argument in addition to the standard ones.
12092 *
12093 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12094 *
12095 * @param a_pfnCImpl The pointer to the C routine.
12096 * @param a0 The argument.
12097 */
12098#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12099
12100/**
12101 * Defers the entire instruction emulation to a C implementation routine and
12102 * returns, taking two arguments in addition to the standard ones.
12103 *
12104 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12105 *
12106 * @param a_pfnCImpl The pointer to the C routine.
12107 * @param a0 The first extra argument.
12108 * @param a1 The second extra argument.
12109 */
12110#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12111
12112/**
12113 * Defers the entire instruction emulation to a C implementation routine and
12114 * returns, taking three arguments in addition to the standard ones.
12115 *
12116 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12117 *
12118 * @param a_pfnCImpl The pointer to the C routine.
12119 * @param a0 The first extra argument.
12120 * @param a1 The second extra argument.
12121 * @param a2 The third extra argument.
12122 */
12123#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12124
12125/**
12126 * Calls a FPU assembly implementation taking one visible argument.
12127 *
12128 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12129 * @param a0 The first extra argument.
12130 */
12131#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12132 do { \
12133 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
12134 } while (0)
12135
12136/**
12137 * Calls a FPU assembly implementation taking two visible arguments.
12138 *
12139 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12140 * @param a0 The first extra argument.
12141 * @param a1 The second extra argument.
12142 */
12143#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12144 do { \
12145 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12146 } while (0)
12147
12148/**
12149 * Calls a FPU assembly implementation taking three visible arguments.
12150 *
12151 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12152 * @param a0 The first extra argument.
12153 * @param a1 The second extra argument.
12154 * @param a2 The third extra argument.
12155 */
12156#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12157 do { \
12158 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12159 } while (0)
12160
12161#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12162 do { \
12163 (a_FpuData).FSW = (a_FSW); \
12164 (a_FpuData).r80Result = *(a_pr80Value); \
12165 } while (0)
12166
12167/** Pushes FPU result onto the stack. */
12168#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12169 iemFpuPushResult(pVCpu, &a_FpuData)
12170/** Pushes FPU result onto the stack and sets the FPUDP. */
12171#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12172 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12173
12174/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12175#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12176 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12177
12178/** Stores FPU result in a stack register. */
12179#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12180 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12181/** Stores FPU result in a stack register and pops the stack. */
12182#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12183 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12184/** Stores FPU result in a stack register and sets the FPUDP. */
12185#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12186 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12187/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12188 * stack. */
12189#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12190 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12191
12192/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12193#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12194 iemFpuUpdateOpcodeAndIp(pVCpu)
12195/** Free a stack register (for FFREE and FFREEP). */
12196#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12197 iemFpuStackFree(pVCpu, a_iStReg)
12198/** Increment the FPU stack pointer. */
12199#define IEM_MC_FPU_STACK_INC_TOP() \
12200 iemFpuStackIncTop(pVCpu)
12201/** Decrement the FPU stack pointer. */
12202#define IEM_MC_FPU_STACK_DEC_TOP() \
12203 iemFpuStackDecTop(pVCpu)
12204
12205/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12206#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12207 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12208/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12209#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12210 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12211/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12212#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12213 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12214/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12215#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12216 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12217/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12218 * stack. */
12219#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12220 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12221/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12222#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12223 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12224
12225/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12226#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12227 iemFpuStackUnderflow(pVCpu, a_iStDst)
12228/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12229 * stack. */
12230#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12231 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12232/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12233 * FPUDS. */
12234#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12235 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12236/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12237 * FPUDS. Pops stack. */
12238#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12239 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12240/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12241 * stack twice. */
12242#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12243 iemFpuStackUnderflowThenPopPop(pVCpu)
12244/** Raises a FPU stack underflow exception for an instruction pushing a result
12245 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12246#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12247 iemFpuStackPushUnderflow(pVCpu)
12248/** Raises a FPU stack underflow exception for an instruction pushing a result
12249 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12250#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12251 iemFpuStackPushUnderflowTwo(pVCpu)
12252
12253/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12254 * FPUIP, FPUCS and FOP. */
12255#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12256 iemFpuStackPushOverflow(pVCpu)
12257/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12258 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12259#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12260 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12261/** Prepares for using the FPU state.
12262 * Ensures that we can use the host FPU in the current context (RC+R0.
12263 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12264#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12265/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12266#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12267/** Actualizes the guest FPU state so it can be accessed and modified. */
12268#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12269
12270/** Prepares for using the SSE state.
12271 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12272 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12273#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12274/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12275#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12276/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12277#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12278
12279/** Prepares for using the AVX state.
12280 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12281 * Ensures the guest AVX state in the CPUMCTX is up to date.
12282 * @note This will include the AVX512 state too when support for it is added
12283 * due to the zero extending feature of VEX instruction. */
12284#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12285/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12286#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12287/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12288#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12289
12290/**
12291 * Calls a MMX assembly implementation taking two visible arguments.
12292 *
12293 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12294 * @param a0 The first extra argument.
12295 * @param a1 The second extra argument.
12296 */
12297#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12298 do { \
12299 IEM_MC_PREPARE_FPU_USAGE(); \
12300 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12301 } while (0)
12302
12303/**
12304 * Calls a MMX assembly implementation taking three visible arguments.
12305 *
12306 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12307 * @param a0 The first extra argument.
12308 * @param a1 The second extra argument.
12309 * @param a2 The third extra argument.
12310 */
12311#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12312 do { \
12313 IEM_MC_PREPARE_FPU_USAGE(); \
12314 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12315 } while (0)
12316
12317
12318/**
12319 * Calls a SSE assembly implementation taking two visible arguments.
12320 *
12321 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12322 * @param a0 The first extra argument.
12323 * @param a1 The second extra argument.
12324 */
12325#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12326 do { \
12327 IEM_MC_PREPARE_SSE_USAGE(); \
12328 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12329 } while (0)
12330
12331/**
12332 * Calls a SSE assembly implementation taking three visible arguments.
12333 *
12334 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12335 * @param a0 The first extra argument.
12336 * @param a1 The second extra argument.
12337 * @param a2 The third extra argument.
12338 */
12339#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12340 do { \
12341 IEM_MC_PREPARE_SSE_USAGE(); \
12342 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12343 } while (0)
12344
12345
12346/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12347 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12348#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12349 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
12350
12351/**
12352 * Calls a AVX assembly implementation taking two visible arguments.
12353 *
12354 * There is one implicit zero'th argument, a pointer to the extended state.
12355 *
12356 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12357 * @param a1 The first extra argument.
12358 * @param a2 The second extra argument.
12359 */
12360#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12361 do { \
12362 IEM_MC_PREPARE_AVX_USAGE(); \
12363 a_pfnAImpl(pXState, (a1), (a2)); \
12364 } while (0)
12365
12366/**
12367 * Calls a AVX assembly implementation taking three visible arguments.
12368 *
12369 * There is one implicit zero'th argument, a pointer to the extended state.
12370 *
12371 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12372 * @param a1 The first extra argument.
12373 * @param a2 The second extra argument.
12374 * @param a3 The third extra argument.
12375 */
12376#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12377 do { \
12378 IEM_MC_PREPARE_AVX_USAGE(); \
12379 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12380 } while (0)
12381
12382/** @note Not for IOPL or IF testing. */
12383#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12384/** @note Not for IOPL or IF testing. */
12385#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12386/** @note Not for IOPL or IF testing. */
12387#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12388/** @note Not for IOPL or IF testing. */
12389#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12390/** @note Not for IOPL or IF testing. */
12391#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12392 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12393 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12394/** @note Not for IOPL or IF testing. */
12395#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12396 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12397 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12398/** @note Not for IOPL or IF testing. */
12399#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12400 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12401 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12402 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12403/** @note Not for IOPL or IF testing. */
12404#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12405 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12406 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12407 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12408#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12409#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12410#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12411/** @note Not for IOPL or IF testing. */
12412#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12413 if ( pVCpu->cpum.GstCtx.cx != 0 \
12414 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12415/** @note Not for IOPL or IF testing. */
12416#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12417 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12418 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12419/** @note Not for IOPL or IF testing. */
12420#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12421 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12422 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12423/** @note Not for IOPL or IF testing. */
12424#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12425 if ( pVCpu->cpum.GstCtx.cx != 0 \
12426 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12427/** @note Not for IOPL or IF testing. */
12428#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12429 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12430 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12431/** @note Not for IOPL or IF testing. */
12432#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12433 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12434 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12435#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12436#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12437
12438#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12439 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12440#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12441 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12442#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12443 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12444#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12445 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12446#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12447 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12448#define IEM_MC_IF_FCW_IM() \
12449 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
12450
12451#define IEM_MC_ELSE() } else {
12452#define IEM_MC_ENDIF() } do {} while (0)
12453
12454/** @} */
12455
12456
12457/** @name Opcode Debug Helpers.
12458 * @{
12459 */
12460#ifdef VBOX_WITH_STATISTICS
12461# ifdef IN_RING3
12462# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsR3.a_Stats += 1; } while (0)
12463# else
12464# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsRZ.a_Stats += 1; } while (0)
12465# endif
12466#else
12467# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12468#endif
12469
12470#ifdef DEBUG
12471# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12472 do { \
12473 IEMOP_INC_STATS(a_Stats); \
12474 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12475 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12476 } while (0)
12477
12478# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12479 do { \
12480 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12481 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12482 (void)RT_CONCAT(OP_,a_Upper); \
12483 (void)(a_fDisHints); \
12484 (void)(a_fIemHints); \
12485 } while (0)
12486
12487# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12488 do { \
12489 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12490 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12491 (void)RT_CONCAT(OP_,a_Upper); \
12492 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12493 (void)(a_fDisHints); \
12494 (void)(a_fIemHints); \
12495 } while (0)
12496
12497# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12498 do { \
12499 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12500 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12501 (void)RT_CONCAT(OP_,a_Upper); \
12502 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12503 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12504 (void)(a_fDisHints); \
12505 (void)(a_fIemHints); \
12506 } while (0)
12507
12508# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12509 do { \
12510 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12511 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12512 (void)RT_CONCAT(OP_,a_Upper); \
12513 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12514 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12515 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12516 (void)(a_fDisHints); \
12517 (void)(a_fIemHints); \
12518 } while (0)
12519
12520# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12521 do { \
12522 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12523 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12524 (void)RT_CONCAT(OP_,a_Upper); \
12525 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12526 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12527 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12528 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12529 (void)(a_fDisHints); \
12530 (void)(a_fIemHints); \
12531 } while (0)
12532
12533#else
12534# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12535
12536# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12537 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12538# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12539 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12540# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12541 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12542# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12543 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12544# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12545 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12546
12547#endif
12548
12549#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12550 IEMOP_MNEMONIC0EX(a_Lower, \
12551 #a_Lower, \
12552 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12553#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12554 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12555 #a_Lower " " #a_Op1, \
12556 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12557#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12558 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12559 #a_Lower " " #a_Op1 "," #a_Op2, \
12560 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12561#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12562 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12563 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12564 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12565#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12566 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12567 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12568 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12569
12570/** @} */
12571
12572
12573/** @name Opcode Helpers.
12574 * @{
12575 */
12576
12577#ifdef IN_RING3
12578# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12579 do { \
12580 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12581 else \
12582 { \
12583 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12584 return IEMOP_RAISE_INVALID_OPCODE(); \
12585 } \
12586 } while (0)
12587#else
12588# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12589 do { \
12590 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12591 else return IEMOP_RAISE_INVALID_OPCODE(); \
12592 } while (0)
12593#endif
12594
12595/** The instruction requires a 186 or later. */
12596#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12597# define IEMOP_HLP_MIN_186() do { } while (0)
12598#else
12599# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12600#endif
12601
12602/** The instruction requires a 286 or later. */
12603#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12604# define IEMOP_HLP_MIN_286() do { } while (0)
12605#else
12606# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12607#endif
12608
12609/** The instruction requires a 386 or later. */
12610#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12611# define IEMOP_HLP_MIN_386() do { } while (0)
12612#else
12613# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12614#endif
12615
12616/** The instruction requires a 386 or later if the given expression is true. */
12617#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12618# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12619#else
12620# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12621#endif
12622
12623/** The instruction requires a 486 or later. */
12624#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12625# define IEMOP_HLP_MIN_486() do { } while (0)
12626#else
12627# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12628#endif
12629
12630/** The instruction requires a Pentium (586) or later. */
12631#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12632# define IEMOP_HLP_MIN_586() do { } while (0)
12633#else
12634# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12635#endif
12636
12637/** The instruction requires a PentiumPro (686) or later. */
12638#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12639# define IEMOP_HLP_MIN_686() do { } while (0)
12640#else
12641# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12642#endif
12643
12644
12645/** The instruction raises an \#UD in real and V8086 mode. */
12646#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12647 do \
12648 { \
12649 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12650 else return IEMOP_RAISE_INVALID_OPCODE(); \
12651 } while (0)
12652
12653#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12654/** This instruction raises an \#UD in real and V8086 mode or when not using a
12655 * 64-bit code segment when in long mode (applicable to all VMX instructions
12656 * except VMCALL).
12657 */
12658#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12659 do \
12660 { \
12661 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12662 && ( !IEM_IS_LONG_MODE(pVCpu) \
12663 || IEM_IS_64BIT_CODE(pVCpu))) \
12664 { /* likely */ } \
12665 else \
12666 { \
12667 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12668 { \
12669 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12670 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12671 return IEMOP_RAISE_INVALID_OPCODE(); \
12672 } \
12673 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12674 { \
12675 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12676 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12677 return IEMOP_RAISE_INVALID_OPCODE(); \
12678 } \
12679 } \
12680 } while (0)
12681
12682/** The instruction can only be executed in VMX operation (VMX root mode and
12683 * non-root mode).
12684 *
12685 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12686 */
12687# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12688 do \
12689 { \
12690 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12691 else \
12692 { \
12693 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12694 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12695 return IEMOP_RAISE_INVALID_OPCODE(); \
12696 } \
12697 } while (0)
12698#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12699
12700/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12701 * 64-bit mode. */
12702#define IEMOP_HLP_NO_64BIT() \
12703 do \
12704 { \
12705 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12706 return IEMOP_RAISE_INVALID_OPCODE(); \
12707 } while (0)
12708
12709/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12710 * 64-bit mode. */
12711#define IEMOP_HLP_ONLY_64BIT() \
12712 do \
12713 { \
12714 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12715 return IEMOP_RAISE_INVALID_OPCODE(); \
12716 } while (0)
12717
12718/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12719#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12720 do \
12721 { \
12722 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12723 iemRecalEffOpSize64Default(pVCpu); \
12724 } while (0)
12725
12726/** The instruction has 64-bit operand size if 64-bit mode. */
12727#define IEMOP_HLP_64BIT_OP_SIZE() \
12728 do \
12729 { \
12730 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12731 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12732 } while (0)
12733
12734/** Only a REX prefix immediately preceeding the first opcode byte takes
12735 * effect. This macro helps ensuring this as well as logging bad guest code. */
12736#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12737 do \
12738 { \
12739 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12740 { \
12741 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12742 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12743 pVCpu->iem.s.uRexB = 0; \
12744 pVCpu->iem.s.uRexIndex = 0; \
12745 pVCpu->iem.s.uRexReg = 0; \
12746 iemRecalEffOpSize(pVCpu); \
12747 } \
12748 } while (0)
12749
12750/**
12751 * Done decoding.
12752 */
12753#define IEMOP_HLP_DONE_DECODING() \
12754 do \
12755 { \
12756 /*nothing for now, maybe later... */ \
12757 } while (0)
12758
12759/**
12760 * Done decoding, raise \#UD exception if lock prefix present.
12761 */
12762#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12763 do \
12764 { \
12765 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12766 { /* likely */ } \
12767 else \
12768 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12769 } while (0)
12770
12771
12772/**
12773 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12774 * repnz or size prefixes are present, or if in real or v8086 mode.
12775 */
12776#define IEMOP_HLP_DONE_VEX_DECODING() \
12777 do \
12778 { \
12779 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12780 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12781 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12782 { /* likely */ } \
12783 else \
12784 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12785 } while (0)
12786
12787/**
12788 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12789 * repnz or size prefixes are present, or if in real or v8086 mode.
12790 */
12791#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12792 do \
12793 { \
12794 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12795 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12796 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12797 && pVCpu->iem.s.uVexLength == 0)) \
12798 { /* likely */ } \
12799 else \
12800 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12801 } while (0)
12802
12803
12804/**
12805 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12806 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12807 * register 0, or if in real or v8086 mode.
12808 */
12809#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12810 do \
12811 { \
12812 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12813 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12814 && !pVCpu->iem.s.uVex3rdReg \
12815 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12816 { /* likely */ } \
12817 else \
12818 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12819 } while (0)
12820
12821/**
12822 * Done decoding VEX, no V, L=0.
12823 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12824 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12825 */
12826#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12827 do \
12828 { \
12829 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12830 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12831 && pVCpu->iem.s.uVexLength == 0 \
12832 && pVCpu->iem.s.uVex3rdReg == 0 \
12833 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12834 { /* likely */ } \
12835 else \
12836 return IEMOP_RAISE_INVALID_OPCODE(); \
12837 } while (0)
12838
12839#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12840 do \
12841 { \
12842 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12843 { /* likely */ } \
12844 else \
12845 { \
12846 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12847 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12848 } \
12849 } while (0)
12850#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12851 do \
12852 { \
12853 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12854 { /* likely */ } \
12855 else \
12856 { \
12857 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12858 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12859 } \
12860 } while (0)
12861
12862/**
12863 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12864 * are present.
12865 */
12866#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12867 do \
12868 { \
12869 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12870 { /* likely */ } \
12871 else \
12872 return IEMOP_RAISE_INVALID_OPCODE(); \
12873 } while (0)
12874
12875/**
12876 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12877 * prefixes are present.
12878 */
12879#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12880 do \
12881 { \
12882 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12883 { /* likely */ } \
12884 else \
12885 return IEMOP_RAISE_INVALID_OPCODE(); \
12886 } while (0)
12887
12888
12889/**
12890 * Calculates the effective address of a ModR/M memory operand.
12891 *
12892 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12893 *
12894 * @return Strict VBox status code.
12895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12896 * @param bRm The ModRM byte.
12897 * @param cbImm The size of any immediate following the
12898 * effective address opcode bytes. Important for
12899 * RIP relative addressing.
12900 * @param pGCPtrEff Where to return the effective address.
12901 */
12902IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12903{
12904 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12905# define SET_SS_DEF() \
12906 do \
12907 { \
12908 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12909 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12910 } while (0)
12911
12912 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12913 {
12914/** @todo Check the effective address size crap! */
12915 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12916 {
12917 uint16_t u16EffAddr;
12918
12919 /* Handle the disp16 form with no registers first. */
12920 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12921 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12922 else
12923 {
12924 /* Get the displacment. */
12925 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12926 {
12927 case 0: u16EffAddr = 0; break;
12928 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12929 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12930 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12931 }
12932
12933 /* Add the base and index registers to the disp. */
12934 switch (bRm & X86_MODRM_RM_MASK)
12935 {
12936 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12937 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12938 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12939 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12940 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12941 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12942 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12943 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12944 }
12945 }
12946
12947 *pGCPtrEff = u16EffAddr;
12948 }
12949 else
12950 {
12951 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12952 uint32_t u32EffAddr;
12953
12954 /* Handle the disp32 form with no registers first. */
12955 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12956 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12957 else
12958 {
12959 /* Get the register (or SIB) value. */
12960 switch ((bRm & X86_MODRM_RM_MASK))
12961 {
12962 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12963 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12964 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12965 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12966 case 4: /* SIB */
12967 {
12968 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12969
12970 /* Get the index and scale it. */
12971 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12972 {
12973 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12974 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12975 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12976 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12977 case 4: u32EffAddr = 0; /*none */ break;
12978 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12979 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12980 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12981 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12982 }
12983 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12984
12985 /* add base */
12986 switch (bSib & X86_SIB_BASE_MASK)
12987 {
12988 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12989 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12990 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12991 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12992 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12993 case 5:
12994 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12995 {
12996 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12997 SET_SS_DEF();
12998 }
12999 else
13000 {
13001 uint32_t u32Disp;
13002 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13003 u32EffAddr += u32Disp;
13004 }
13005 break;
13006 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13007 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13008 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13009 }
13010 break;
13011 }
13012 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13013 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13014 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13015 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13016 }
13017
13018 /* Get and add the displacement. */
13019 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13020 {
13021 case 0:
13022 break;
13023 case 1:
13024 {
13025 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13026 u32EffAddr += i8Disp;
13027 break;
13028 }
13029 case 2:
13030 {
13031 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13032 u32EffAddr += u32Disp;
13033 break;
13034 }
13035 default:
13036 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13037 }
13038
13039 }
13040 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13041 *pGCPtrEff = u32EffAddr;
13042 else
13043 {
13044 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13045 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13046 }
13047 }
13048 }
13049 else
13050 {
13051 uint64_t u64EffAddr;
13052
13053 /* Handle the rip+disp32 form with no registers first. */
13054 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13055 {
13056 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13057 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13058 }
13059 else
13060 {
13061 /* Get the register (or SIB) value. */
13062 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13063 {
13064 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13065 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13066 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13067 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13068 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13069 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13070 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13071 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13072 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13073 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13074 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13075 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13076 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13077 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13078 /* SIB */
13079 case 4:
13080 case 12:
13081 {
13082 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13083
13084 /* Get the index and scale it. */
13085 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13086 {
13087 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13088 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13089 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13090 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13091 case 4: u64EffAddr = 0; /*none */ break;
13092 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13093 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13094 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13095 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13096 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13097 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13098 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13099 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13100 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13101 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13102 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13103 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13104 }
13105 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13106
13107 /* add base */
13108 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13109 {
13110 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13111 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13112 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13113 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13114 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13115 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13116 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13117 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13118 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13119 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13120 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13121 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13122 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13123 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13124 /* complicated encodings */
13125 case 5:
13126 case 13:
13127 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13128 {
13129 if (!pVCpu->iem.s.uRexB)
13130 {
13131 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13132 SET_SS_DEF();
13133 }
13134 else
13135 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13136 }
13137 else
13138 {
13139 uint32_t u32Disp;
13140 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13141 u64EffAddr += (int32_t)u32Disp;
13142 }
13143 break;
13144 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13145 }
13146 break;
13147 }
13148 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13149 }
13150
13151 /* Get and add the displacement. */
13152 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13153 {
13154 case 0:
13155 break;
13156 case 1:
13157 {
13158 int8_t i8Disp;
13159 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13160 u64EffAddr += i8Disp;
13161 break;
13162 }
13163 case 2:
13164 {
13165 uint32_t u32Disp;
13166 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13167 u64EffAddr += (int32_t)u32Disp;
13168 break;
13169 }
13170 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13171 }
13172
13173 }
13174
13175 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13176 *pGCPtrEff = u64EffAddr;
13177 else
13178 {
13179 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13180 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13181 }
13182 }
13183
13184 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13185 return VINF_SUCCESS;
13186}
13187
13188
13189/**
13190 * Calculates the effective address of a ModR/M memory operand.
13191 *
13192 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13193 *
13194 * @return Strict VBox status code.
13195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13196 * @param bRm The ModRM byte.
13197 * @param cbImm The size of any immediate following the
13198 * effective address opcode bytes. Important for
13199 * RIP relative addressing.
13200 * @param pGCPtrEff Where to return the effective address.
13201 * @param offRsp RSP displacement.
13202 */
13203IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13204{
13205 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13206# define SET_SS_DEF() \
13207 do \
13208 { \
13209 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13210 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13211 } while (0)
13212
13213 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13214 {
13215/** @todo Check the effective address size crap! */
13216 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13217 {
13218 uint16_t u16EffAddr;
13219
13220 /* Handle the disp16 form with no registers first. */
13221 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13222 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13223 else
13224 {
13225 /* Get the displacment. */
13226 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13227 {
13228 case 0: u16EffAddr = 0; break;
13229 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13230 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13231 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13232 }
13233
13234 /* Add the base and index registers to the disp. */
13235 switch (bRm & X86_MODRM_RM_MASK)
13236 {
13237 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13238 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13239 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13240 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13241 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13242 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13243 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13244 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13245 }
13246 }
13247
13248 *pGCPtrEff = u16EffAddr;
13249 }
13250 else
13251 {
13252 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13253 uint32_t u32EffAddr;
13254
13255 /* Handle the disp32 form with no registers first. */
13256 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13257 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13258 else
13259 {
13260 /* Get the register (or SIB) value. */
13261 switch ((bRm & X86_MODRM_RM_MASK))
13262 {
13263 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13264 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13265 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13266 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13267 case 4: /* SIB */
13268 {
13269 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13270
13271 /* Get the index and scale it. */
13272 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13273 {
13274 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13275 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13276 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13277 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13278 case 4: u32EffAddr = 0; /*none */ break;
13279 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13280 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13281 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13282 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13283 }
13284 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13285
13286 /* add base */
13287 switch (bSib & X86_SIB_BASE_MASK)
13288 {
13289 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13290 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13291 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13292 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13293 case 4:
13294 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13295 SET_SS_DEF();
13296 break;
13297 case 5:
13298 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13299 {
13300 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13301 SET_SS_DEF();
13302 }
13303 else
13304 {
13305 uint32_t u32Disp;
13306 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13307 u32EffAddr += u32Disp;
13308 }
13309 break;
13310 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13311 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13312 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13313 }
13314 break;
13315 }
13316 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13317 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13318 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13319 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13320 }
13321
13322 /* Get and add the displacement. */
13323 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13324 {
13325 case 0:
13326 break;
13327 case 1:
13328 {
13329 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13330 u32EffAddr += i8Disp;
13331 break;
13332 }
13333 case 2:
13334 {
13335 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13336 u32EffAddr += u32Disp;
13337 break;
13338 }
13339 default:
13340 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13341 }
13342
13343 }
13344 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13345 *pGCPtrEff = u32EffAddr;
13346 else
13347 {
13348 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13349 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13350 }
13351 }
13352 }
13353 else
13354 {
13355 uint64_t u64EffAddr;
13356
13357 /* Handle the rip+disp32 form with no registers first. */
13358 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13359 {
13360 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13361 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13362 }
13363 else
13364 {
13365 /* Get the register (or SIB) value. */
13366 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13367 {
13368 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13369 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13370 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13371 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13372 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13373 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13374 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13375 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13376 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13377 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13378 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13379 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13380 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13381 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13382 /* SIB */
13383 case 4:
13384 case 12:
13385 {
13386 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13387
13388 /* Get the index and scale it. */
13389 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13390 {
13391 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13392 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13393 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13394 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13395 case 4: u64EffAddr = 0; /*none */ break;
13396 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13397 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13398 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13399 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13400 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13401 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13402 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13403 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13404 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13405 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13406 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13407 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13408 }
13409 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13410
13411 /* add base */
13412 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13413 {
13414 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13415 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13416 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13417 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13418 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13419 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13420 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13421 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13422 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13423 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13424 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13425 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13426 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13427 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13428 /* complicated encodings */
13429 case 5:
13430 case 13:
13431 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13432 {
13433 if (!pVCpu->iem.s.uRexB)
13434 {
13435 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13436 SET_SS_DEF();
13437 }
13438 else
13439 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13440 }
13441 else
13442 {
13443 uint32_t u32Disp;
13444 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13445 u64EffAddr += (int32_t)u32Disp;
13446 }
13447 break;
13448 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13449 }
13450 break;
13451 }
13452 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13453 }
13454
13455 /* Get and add the displacement. */
13456 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13457 {
13458 case 0:
13459 break;
13460 case 1:
13461 {
13462 int8_t i8Disp;
13463 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13464 u64EffAddr += i8Disp;
13465 break;
13466 }
13467 case 2:
13468 {
13469 uint32_t u32Disp;
13470 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13471 u64EffAddr += (int32_t)u32Disp;
13472 break;
13473 }
13474 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13475 }
13476
13477 }
13478
13479 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13480 *pGCPtrEff = u64EffAddr;
13481 else
13482 {
13483 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13484 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13485 }
13486 }
13487
13488 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13489 return VINF_SUCCESS;
13490}
13491
13492
13493#ifdef IEM_WITH_SETJMP
13494/**
13495 * Calculates the effective address of a ModR/M memory operand.
13496 *
13497 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13498 *
13499 * May longjmp on internal error.
13500 *
13501 * @return The effective address.
13502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13503 * @param bRm The ModRM byte.
13504 * @param cbImm The size of any immediate following the
13505 * effective address opcode bytes. Important for
13506 * RIP relative addressing.
13507 */
13508IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
13509{
13510 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13511# define SET_SS_DEF() \
13512 do \
13513 { \
13514 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13515 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13516 } while (0)
13517
13518 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13519 {
13520/** @todo Check the effective address size crap! */
13521 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13522 {
13523 uint16_t u16EffAddr;
13524
13525 /* Handle the disp16 form with no registers first. */
13526 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13527 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13528 else
13529 {
13530 /* Get the displacment. */
13531 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13532 {
13533 case 0: u16EffAddr = 0; break;
13534 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13535 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13536 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13537 }
13538
13539 /* Add the base and index registers to the disp. */
13540 switch (bRm & X86_MODRM_RM_MASK)
13541 {
13542 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13543 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13544 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13545 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13546 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13547 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13548 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13549 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13550 }
13551 }
13552
13553 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13554 return u16EffAddr;
13555 }
13556
13557 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13558 uint32_t u32EffAddr;
13559
13560 /* Handle the disp32 form with no registers first. */
13561 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13562 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13563 else
13564 {
13565 /* Get the register (or SIB) value. */
13566 switch ((bRm & X86_MODRM_RM_MASK))
13567 {
13568 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13569 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13570 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13571 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13572 case 4: /* SIB */
13573 {
13574 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13575
13576 /* Get the index and scale it. */
13577 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13578 {
13579 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13580 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13581 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13582 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13583 case 4: u32EffAddr = 0; /*none */ break;
13584 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13585 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13586 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13587 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13588 }
13589 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13590
13591 /* add base */
13592 switch (bSib & X86_SIB_BASE_MASK)
13593 {
13594 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13595 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13596 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13597 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13598 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13599 case 5:
13600 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13601 {
13602 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13603 SET_SS_DEF();
13604 }
13605 else
13606 {
13607 uint32_t u32Disp;
13608 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13609 u32EffAddr += u32Disp;
13610 }
13611 break;
13612 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13613 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13614 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13615 }
13616 break;
13617 }
13618 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13619 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13620 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13621 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13622 }
13623
13624 /* Get and add the displacement. */
13625 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13626 {
13627 case 0:
13628 break;
13629 case 1:
13630 {
13631 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13632 u32EffAddr += i8Disp;
13633 break;
13634 }
13635 case 2:
13636 {
13637 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13638 u32EffAddr += u32Disp;
13639 break;
13640 }
13641 default:
13642 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13643 }
13644 }
13645
13646 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13647 {
13648 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13649 return u32EffAddr;
13650 }
13651 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13652 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13653 return u32EffAddr & UINT16_MAX;
13654 }
13655
13656 uint64_t u64EffAddr;
13657
13658 /* Handle the rip+disp32 form with no registers first. */
13659 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13660 {
13661 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13662 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13663 }
13664 else
13665 {
13666 /* Get the register (or SIB) value. */
13667 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13668 {
13669 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13670 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13671 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13672 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13673 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13674 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13675 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13676 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13677 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13678 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13679 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13680 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13681 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13682 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13683 /* SIB */
13684 case 4:
13685 case 12:
13686 {
13687 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13688
13689 /* Get the index and scale it. */
13690 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13691 {
13692 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13693 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13694 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13695 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13696 case 4: u64EffAddr = 0; /*none */ break;
13697 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13698 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13699 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13700 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13701 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13702 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13703 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13704 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13705 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13706 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13707 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13708 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13709 }
13710 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13711
13712 /* add base */
13713 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13714 {
13715 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13716 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13717 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13718 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13719 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13720 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13721 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13722 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13723 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13724 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13725 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13726 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13727 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13728 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13729 /* complicated encodings */
13730 case 5:
13731 case 13:
13732 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13733 {
13734 if (!pVCpu->iem.s.uRexB)
13735 {
13736 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13737 SET_SS_DEF();
13738 }
13739 else
13740 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13741 }
13742 else
13743 {
13744 uint32_t u32Disp;
13745 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13746 u64EffAddr += (int32_t)u32Disp;
13747 }
13748 break;
13749 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13750 }
13751 break;
13752 }
13753 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13754 }
13755
13756 /* Get and add the displacement. */
13757 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13758 {
13759 case 0:
13760 break;
13761 case 1:
13762 {
13763 int8_t i8Disp;
13764 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13765 u64EffAddr += i8Disp;
13766 break;
13767 }
13768 case 2:
13769 {
13770 uint32_t u32Disp;
13771 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13772 u64EffAddr += (int32_t)u32Disp;
13773 break;
13774 }
13775 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13776 }
13777
13778 }
13779
13780 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13781 {
13782 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13783 return u64EffAddr;
13784 }
13785 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13786 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13787 return u64EffAddr & UINT32_MAX;
13788}
13789#endif /* IEM_WITH_SETJMP */
13790
13791/** @} */
13792
13793
13794
13795/*
13796 * Include the instructions
13797 */
13798#include "IEMAllInstructions.cpp.h"
13799
13800
13801
13802#ifdef LOG_ENABLED
13803/**
13804 * Logs the current instruction.
13805 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13806 * @param fSameCtx Set if we have the same context information as the VMM,
13807 * clear if we may have already executed an instruction in
13808 * our debug context. When clear, we assume IEMCPU holds
13809 * valid CPU mode info.
13810 *
13811 * The @a fSameCtx parameter is now misleading and obsolete.
13812 * @param pszFunction The IEM function doing the execution.
13813 */
13814IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
13815{
13816# ifdef IN_RING3
13817 if (LogIs2Enabled())
13818 {
13819 char szInstr[256];
13820 uint32_t cbInstr = 0;
13821 if (fSameCtx)
13822 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13823 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13824 szInstr, sizeof(szInstr), &cbInstr);
13825 else
13826 {
13827 uint32_t fFlags = 0;
13828 switch (pVCpu->iem.s.enmCpuMode)
13829 {
13830 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13831 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13832 case IEMMODE_16BIT:
13833 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13834 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13835 else
13836 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13837 break;
13838 }
13839 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13840 szInstr, sizeof(szInstr), &cbInstr);
13841 }
13842
13843 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
13844 Log2(("**** %s\n"
13845 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13846 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13847 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13848 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13849 " %s\n"
13850 , pszFunction,
13851 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13852 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13853 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13854 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13855 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13856 szInstr));
13857
13858 if (LogIs3Enabled())
13859 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13860 }
13861 else
13862# endif
13863 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13864 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13865 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13866}
13867#endif /* LOG_ENABLED */
13868
13869
13870#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13871/**
13872 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
13873 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
13874 *
13875 * @returns Modified rcStrict.
13876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13877 * @param rcStrict The instruction execution status.
13878 */
13879static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13880{
13881 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
13882 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
13883 {
13884 /* VMX preemption timer takes priority over NMI-window exits. */
13885 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
13886 {
13887 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
13888 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
13889 }
13890 /*
13891 * Check remaining intercepts.
13892 *
13893 * NMI-window and Interrupt-window VM-exits.
13894 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
13895 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
13896 *
13897 * See Intel spec. 26.7.6 "NMI-Window Exiting".
13898 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
13899 */
13900 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
13901 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13902 && !TRPMHasTrap(pVCpu))
13903 {
13904 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
13905 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
13906 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
13907 {
13908 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
13909 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
13910 }
13911 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
13912 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
13913 {
13914 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
13915 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
13916 }
13917 }
13918 }
13919 /* TPR-below threshold/APIC write has the highest priority. */
13920 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
13921 {
13922 rcStrict = iemVmxApicWriteEmulation(pVCpu);
13923 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13924 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
13925 }
13926 /* MTF takes priority over VMX-preemption timer. */
13927 else
13928 {
13929 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
13930 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13931 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
13932 }
13933 return rcStrict;
13934}
13935#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13936
13937
13938/**
13939 * Makes status code addjustments (pass up from I/O and access handler)
13940 * as well as maintaining statistics.
13941 *
13942 * @returns Strict VBox status code to pass up.
13943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13944 * @param rcStrict The status from executing an instruction.
13945 */
13946DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13947{
13948 if (rcStrict != VINF_SUCCESS)
13949 {
13950 if (RT_SUCCESS(rcStrict))
13951 {
13952 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13953 || rcStrict == VINF_IOM_R3_IOPORT_READ
13954 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13955 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13956 || rcStrict == VINF_IOM_R3_MMIO_READ
13957 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13958 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13959 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13960 || rcStrict == VINF_CPUM_R3_MSR_READ
13961 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13962 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13963 || rcStrict == VINF_EM_RAW_TO_R3
13964 || rcStrict == VINF_EM_TRIPLE_FAULT
13965 || rcStrict == VINF_GIM_R3_HYPERCALL
13966 /* raw-mode / virt handlers only: */
13967 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13968 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13969 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13970 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13971 || rcStrict == VINF_SELM_SYNC_GDT
13972 || rcStrict == VINF_CSAM_PENDING_ACTION
13973 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13974 /* nested hw.virt codes: */
13975 || rcStrict == VINF_VMX_VMEXIT
13976 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
13977 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13978 || rcStrict == VINF_SVM_VMEXIT
13979 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13980/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13981 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13982#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13983 if ( rcStrict == VINF_VMX_VMEXIT
13984 && rcPassUp == VINF_SUCCESS)
13985 rcStrict = VINF_SUCCESS;
13986 else
13987#endif
13988#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13989 if ( rcStrict == VINF_SVM_VMEXIT
13990 && rcPassUp == VINF_SUCCESS)
13991 rcStrict = VINF_SUCCESS;
13992 else
13993#endif
13994 if (rcPassUp == VINF_SUCCESS)
13995 pVCpu->iem.s.cRetInfStatuses++;
13996 else if ( rcPassUp < VINF_EM_FIRST
13997 || rcPassUp > VINF_EM_LAST
13998 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13999 {
14000 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14001 pVCpu->iem.s.cRetPassUpStatus++;
14002 rcStrict = rcPassUp;
14003 }
14004 else
14005 {
14006 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14007 pVCpu->iem.s.cRetInfStatuses++;
14008 }
14009 }
14010 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14011 pVCpu->iem.s.cRetAspectNotImplemented++;
14012 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14013 pVCpu->iem.s.cRetInstrNotImplemented++;
14014 else
14015 pVCpu->iem.s.cRetErrStatuses++;
14016 }
14017 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14018 {
14019 pVCpu->iem.s.cRetPassUpStatus++;
14020 rcStrict = pVCpu->iem.s.rcPassUp;
14021 }
14022
14023 return rcStrict;
14024}
14025
14026
14027/**
14028 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14029 * IEMExecOneWithPrefetchedByPC.
14030 *
14031 * Similar code is found in IEMExecLots.
14032 *
14033 * @return Strict VBox status code.
14034 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14035 * @param fExecuteInhibit If set, execute the instruction following CLI,
14036 * POP SS and MOV SS,GR.
14037 * @param pszFunction The calling function name.
14038 */
14039DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
14040{
14041 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14042 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14043 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14044 RT_NOREF_PV(pszFunction);
14045
14046#ifdef IEM_WITH_SETJMP
14047 VBOXSTRICTRC rcStrict;
14048 jmp_buf JmpBuf;
14049 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14050 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14051 if ((rcStrict = setjmp(JmpBuf)) == 0)
14052 {
14053 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14054 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14055 }
14056 else
14057 pVCpu->iem.s.cLongJumps++;
14058 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14059#else
14060 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14061 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14062#endif
14063 if (rcStrict == VINF_SUCCESS)
14064 pVCpu->iem.s.cInstructions++;
14065 if (pVCpu->iem.s.cActiveMappings > 0)
14066 {
14067 Assert(rcStrict != VINF_SUCCESS);
14068 iemMemRollback(pVCpu);
14069 }
14070 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14071 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14072 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14073
14074//#ifdef DEBUG
14075// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14076//#endif
14077
14078#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14079 /*
14080 * Perform any VMX nested-guest instruction boundary actions.
14081 *
14082 * If any of these causes a VM-exit, we must skip executing the next
14083 * instruction (would run into stale page tables). A VM-exit makes sure
14084 * there is no interrupt-inhibition, so that should ensure we don't go
14085 * to try execute the next instruction. Clearing fExecuteInhibit is
14086 * problematic because of the setjmp/longjmp clobbering above.
14087 */
14088 if ( rcStrict == VINF_SUCCESS
14089 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
14090 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
14091 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
14092#endif
14093
14094 /* Execute the next instruction as well if a cli, pop ss or
14095 mov ss, Gr has just completed successfully. */
14096 if ( fExecuteInhibit
14097 && rcStrict == VINF_SUCCESS
14098 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14099 && EMIsInhibitInterruptsActive(pVCpu))
14100 {
14101 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
14102 if (rcStrict == VINF_SUCCESS)
14103 {
14104#ifdef LOG_ENABLED
14105 iemLogCurInstr(pVCpu, false, pszFunction);
14106#endif
14107#ifdef IEM_WITH_SETJMP
14108 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14109 if ((rcStrict = setjmp(JmpBuf)) == 0)
14110 {
14111 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14112 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14113 }
14114 else
14115 pVCpu->iem.s.cLongJumps++;
14116 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14117#else
14118 IEM_OPCODE_GET_NEXT_U8(&b);
14119 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14120#endif
14121 if (rcStrict == VINF_SUCCESS)
14122 pVCpu->iem.s.cInstructions++;
14123 if (pVCpu->iem.s.cActiveMappings > 0)
14124 {
14125 Assert(rcStrict != VINF_SUCCESS);
14126 iemMemRollback(pVCpu);
14127 }
14128 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14129 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14130 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14131 }
14132 else if (pVCpu->iem.s.cActiveMappings > 0)
14133 iemMemRollback(pVCpu);
14134 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
14135 }
14136
14137 /*
14138 * Return value fiddling, statistics and sanity assertions.
14139 */
14140 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14141
14142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14143 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14144 return rcStrict;
14145}
14146
14147
14148/**
14149 * Execute one instruction.
14150 *
14151 * @return Strict VBox status code.
14152 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14153 */
14154VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14155{
14156 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
14157#ifdef LOG_ENABLED
14158 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14159#endif
14160
14161 /*
14162 * Do the decoding and emulation.
14163 */
14164 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14165 if (rcStrict == VINF_SUCCESS)
14166 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14167 else if (pVCpu->iem.s.cActiveMappings > 0)
14168 iemMemRollback(pVCpu);
14169
14170 if (rcStrict != VINF_SUCCESS)
14171 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14172 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14173 return rcStrict;
14174}
14175
14176
14177VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14178{
14179 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14180
14181 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14182 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14183 if (rcStrict == VINF_SUCCESS)
14184 {
14185 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14186 if (pcbWritten)
14187 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14188 }
14189 else if (pVCpu->iem.s.cActiveMappings > 0)
14190 iemMemRollback(pVCpu);
14191
14192 return rcStrict;
14193}
14194
14195
14196VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14197 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14198{
14199 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14200
14201 VBOXSTRICTRC rcStrict;
14202 if ( cbOpcodeBytes
14203 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14204 {
14205 iemInitDecoder(pVCpu, false, false);
14206#ifdef IEM_WITH_CODE_TLB
14207 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14208 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14209 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14210 pVCpu->iem.s.offCurInstrStart = 0;
14211 pVCpu->iem.s.offInstrNextByte = 0;
14212#else
14213 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14214 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14215#endif
14216 rcStrict = VINF_SUCCESS;
14217 }
14218 else
14219 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14220 if (rcStrict == VINF_SUCCESS)
14221 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14222 else if (pVCpu->iem.s.cActiveMappings > 0)
14223 iemMemRollback(pVCpu);
14224
14225 return rcStrict;
14226}
14227
14228
14229VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14230{
14231 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14232
14233 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14234 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14235 if (rcStrict == VINF_SUCCESS)
14236 {
14237 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14238 if (pcbWritten)
14239 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14240 }
14241 else if (pVCpu->iem.s.cActiveMappings > 0)
14242 iemMemRollback(pVCpu);
14243
14244 return rcStrict;
14245}
14246
14247
14248VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14249 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14250{
14251 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14252
14253 VBOXSTRICTRC rcStrict;
14254 if ( cbOpcodeBytes
14255 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14256 {
14257 iemInitDecoder(pVCpu, true, false);
14258#ifdef IEM_WITH_CODE_TLB
14259 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14260 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14261 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14262 pVCpu->iem.s.offCurInstrStart = 0;
14263 pVCpu->iem.s.offInstrNextByte = 0;
14264#else
14265 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14266 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14267#endif
14268 rcStrict = VINF_SUCCESS;
14269 }
14270 else
14271 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14272 if (rcStrict == VINF_SUCCESS)
14273 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14274 else if (pVCpu->iem.s.cActiveMappings > 0)
14275 iemMemRollback(pVCpu);
14276
14277 return rcStrict;
14278}
14279
14280
14281/**
14282 * For debugging DISGetParamSize, may come in handy.
14283 *
14284 * @returns Strict VBox status code.
14285 * @param pVCpu The cross context virtual CPU structure of the
14286 * calling EMT.
14287 * @param pCtxCore The context core structure.
14288 * @param OpcodeBytesPC The PC of the opcode bytes.
14289 * @param pvOpcodeBytes Prefeched opcode bytes.
14290 * @param cbOpcodeBytes Number of prefetched bytes.
14291 * @param pcbWritten Where to return the number of bytes written.
14292 * Optional.
14293 */
14294VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14295 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14296 uint32_t *pcbWritten)
14297{
14298 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14299
14300 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14301 VBOXSTRICTRC rcStrict;
14302 if ( cbOpcodeBytes
14303 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14304 {
14305 iemInitDecoder(pVCpu, true, false);
14306#ifdef IEM_WITH_CODE_TLB
14307 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14308 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14309 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14310 pVCpu->iem.s.offCurInstrStart = 0;
14311 pVCpu->iem.s.offInstrNextByte = 0;
14312#else
14313 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14314 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14315#endif
14316 rcStrict = VINF_SUCCESS;
14317 }
14318 else
14319 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14320 if (rcStrict == VINF_SUCCESS)
14321 {
14322 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14323 if (pcbWritten)
14324 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14325 }
14326 else if (pVCpu->iem.s.cActiveMappings > 0)
14327 iemMemRollback(pVCpu);
14328
14329 return rcStrict;
14330}
14331
14332
14333/**
14334 * For handling split cacheline lock operations when the host has split-lock
14335 * detection enabled.
14336 *
14337 * This will cause the interpreter to disregard the lock prefix and implicit
14338 * locking (xchg).
14339 *
14340 * @returns Strict VBox status code.
14341 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14342 */
14343VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
14344{
14345 /*
14346 * Do the decoding and emulation.
14347 */
14348 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
14349 if (rcStrict == VINF_SUCCESS)
14350 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
14351 else if (pVCpu->iem.s.cActiveMappings > 0)
14352 iemMemRollback(pVCpu);
14353
14354 if (rcStrict != VINF_SUCCESS)
14355 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14356 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14357 return rcStrict;
14358}
14359
14360
14361VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14362{
14363 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14364 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14365
14366 /*
14367 * See if there is an interrupt pending in TRPM, inject it if we can.
14368 */
14369 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14370#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14371 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14372 if (fIntrEnabled)
14373 {
14374 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14375 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14376 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14377 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
14378 else
14379 {
14380 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14381 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14382 }
14383 }
14384#else
14385 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14386#endif
14387
14388 /** @todo What if we are injecting an exception and not an interrupt? Is that
14389 * possible here? For now we assert it is indeed only an interrupt. */
14390 if ( fIntrEnabled
14391 && TRPMHasTrap(pVCpu)
14392 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14393 {
14394 uint8_t u8TrapNo;
14395 TRPMEVENT enmType;
14396 uint32_t uErrCode;
14397 RTGCPTR uCr2;
14398 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
14399 AssertRC(rc2);
14400 Assert(enmType == TRPM_HARDWARE_INT);
14401 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14402 TRPMResetTrap(pVCpu);
14403#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14404 /* Injecting an event may cause a VM-exit. */
14405 if ( rcStrict != VINF_SUCCESS
14406 && rcStrict != VINF_IEM_RAISED_XCPT)
14407 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14408#else
14409 NOREF(rcStrict);
14410#endif
14411 }
14412
14413 /*
14414 * Initial decoder init w/ prefetch, then setup setjmp.
14415 */
14416 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14417 if (rcStrict == VINF_SUCCESS)
14418 {
14419#ifdef IEM_WITH_SETJMP
14420 jmp_buf JmpBuf;
14421 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14422 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14423 pVCpu->iem.s.cActiveMappings = 0;
14424 if ((rcStrict = setjmp(JmpBuf)) == 0)
14425#endif
14426 {
14427 /*
14428 * The run loop. We limit ourselves to 4096 instructions right now.
14429 */
14430 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14431 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14432 for (;;)
14433 {
14434 /*
14435 * Log the state.
14436 */
14437#ifdef LOG_ENABLED
14438 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14439#endif
14440
14441 /*
14442 * Do the decoding and emulation.
14443 */
14444 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14445 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14446 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14447 {
14448 Assert(pVCpu->iem.s.cActiveMappings == 0);
14449 pVCpu->iem.s.cInstructions++;
14450 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14451 {
14452 uint64_t fCpu = pVCpu->fLocalForcedActions
14453 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14454 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14455 | VMCPU_FF_TLB_FLUSH
14456 | VMCPU_FF_INHIBIT_INTERRUPTS
14457 | VMCPU_FF_BLOCK_NMIS
14458 | VMCPU_FF_UNHALT ));
14459
14460 if (RT_LIKELY( ( !fCpu
14461 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14462 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14463 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14464 {
14465 if (cMaxInstructionsGccStupidity-- > 0)
14466 {
14467 /* Poll timers every now an then according to the caller's specs. */
14468 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14469 || !TMTimerPollBool(pVM, pVCpu))
14470 {
14471 Assert(pVCpu->iem.s.cActiveMappings == 0);
14472 iemReInitDecoder(pVCpu);
14473 continue;
14474 }
14475 }
14476 }
14477 }
14478 Assert(pVCpu->iem.s.cActiveMappings == 0);
14479 }
14480 else if (pVCpu->iem.s.cActiveMappings > 0)
14481 iemMemRollback(pVCpu);
14482 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14483 break;
14484 }
14485 }
14486#ifdef IEM_WITH_SETJMP
14487 else
14488 {
14489 if (pVCpu->iem.s.cActiveMappings > 0)
14490 iemMemRollback(pVCpu);
14491# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14492 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14493# endif
14494 pVCpu->iem.s.cLongJumps++;
14495 }
14496 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14497#endif
14498
14499 /*
14500 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14501 */
14502 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14503 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14504 }
14505 else
14506 {
14507 if (pVCpu->iem.s.cActiveMappings > 0)
14508 iemMemRollback(pVCpu);
14509
14510#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14511 /*
14512 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14513 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14514 */
14515 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14516#endif
14517 }
14518
14519 /*
14520 * Maybe re-enter raw-mode and log.
14521 */
14522 if (rcStrict != VINF_SUCCESS)
14523 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14524 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14525 if (pcInstructions)
14526 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14527 return rcStrict;
14528}
14529
14530
14531/**
14532 * Interface used by EMExecuteExec, does exit statistics and limits.
14533 *
14534 * @returns Strict VBox status code.
14535 * @param pVCpu The cross context virtual CPU structure.
14536 * @param fWillExit To be defined.
14537 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14538 * @param cMaxInstructions Maximum number of instructions to execute.
14539 * @param cMaxInstructionsWithoutExits
14540 * The max number of instructions without exits.
14541 * @param pStats Where to return statistics.
14542 */
14543VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14544 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14545{
14546 NOREF(fWillExit); /** @todo define flexible exit crits */
14547
14548 /*
14549 * Initialize return stats.
14550 */
14551 pStats->cInstructions = 0;
14552 pStats->cExits = 0;
14553 pStats->cMaxExitDistance = 0;
14554 pStats->cReserved = 0;
14555
14556 /*
14557 * Initial decoder init w/ prefetch, then setup setjmp.
14558 */
14559 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14560 if (rcStrict == VINF_SUCCESS)
14561 {
14562#ifdef IEM_WITH_SETJMP
14563 jmp_buf JmpBuf;
14564 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14565 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14566 pVCpu->iem.s.cActiveMappings = 0;
14567 if ((rcStrict = setjmp(JmpBuf)) == 0)
14568#endif
14569 {
14570#ifdef IN_RING0
14571 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14572#endif
14573 uint32_t cInstructionSinceLastExit = 0;
14574
14575 /*
14576 * The run loop. We limit ourselves to 4096 instructions right now.
14577 */
14578 PVM pVM = pVCpu->CTX_SUFF(pVM);
14579 for (;;)
14580 {
14581 /*
14582 * Log the state.
14583 */
14584#ifdef LOG_ENABLED
14585 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14586#endif
14587
14588 /*
14589 * Do the decoding and emulation.
14590 */
14591 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14592
14593 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14594 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14595
14596 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14597 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14598 {
14599 pStats->cExits += 1;
14600 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14601 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14602 cInstructionSinceLastExit = 0;
14603 }
14604
14605 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14606 {
14607 Assert(pVCpu->iem.s.cActiveMappings == 0);
14608 pVCpu->iem.s.cInstructions++;
14609 pStats->cInstructions++;
14610 cInstructionSinceLastExit++;
14611 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14612 {
14613 uint64_t fCpu = pVCpu->fLocalForcedActions
14614 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14615 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14616 | VMCPU_FF_TLB_FLUSH
14617 | VMCPU_FF_INHIBIT_INTERRUPTS
14618 | VMCPU_FF_BLOCK_NMIS
14619 | VMCPU_FF_UNHALT ));
14620
14621 if (RT_LIKELY( ( ( !fCpu
14622 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14623 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14624 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14625 || pStats->cInstructions < cMinInstructions))
14626 {
14627 if (pStats->cInstructions < cMaxInstructions)
14628 {
14629 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14630 {
14631#ifdef IN_RING0
14632 if ( !fCheckPreemptionPending
14633 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14634#endif
14635 {
14636 Assert(pVCpu->iem.s.cActiveMappings == 0);
14637 iemReInitDecoder(pVCpu);
14638 continue;
14639 }
14640#ifdef IN_RING0
14641 rcStrict = VINF_EM_RAW_INTERRUPT;
14642 break;
14643#endif
14644 }
14645 }
14646 }
14647 Assert(!(fCpu & VMCPU_FF_IEM));
14648 }
14649 Assert(pVCpu->iem.s.cActiveMappings == 0);
14650 }
14651 else if (pVCpu->iem.s.cActiveMappings > 0)
14652 iemMemRollback(pVCpu);
14653 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14654 break;
14655 }
14656 }
14657#ifdef IEM_WITH_SETJMP
14658 else
14659 {
14660 if (pVCpu->iem.s.cActiveMappings > 0)
14661 iemMemRollback(pVCpu);
14662 pVCpu->iem.s.cLongJumps++;
14663 }
14664 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14665#endif
14666
14667 /*
14668 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14669 */
14670 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14671 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14672 }
14673 else
14674 {
14675 if (pVCpu->iem.s.cActiveMappings > 0)
14676 iemMemRollback(pVCpu);
14677
14678#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14679 /*
14680 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14681 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14682 */
14683 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14684#endif
14685 }
14686
14687 /*
14688 * Maybe re-enter raw-mode and log.
14689 */
14690 if (rcStrict != VINF_SUCCESS)
14691 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14692 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14693 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14694 return rcStrict;
14695}
14696
14697
14698/**
14699 * Injects a trap, fault, abort, software interrupt or external interrupt.
14700 *
14701 * The parameter list matches TRPMQueryTrapAll pretty closely.
14702 *
14703 * @returns Strict VBox status code.
14704 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14705 * @param u8TrapNo The trap number.
14706 * @param enmType What type is it (trap/fault/abort), software
14707 * interrupt or hardware interrupt.
14708 * @param uErrCode The error code if applicable.
14709 * @param uCr2 The CR2 value if applicable.
14710 * @param cbInstr The instruction length (only relevant for
14711 * software interrupts).
14712 */
14713VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14714 uint8_t cbInstr)
14715{
14716 iemInitDecoder(pVCpu, false, false);
14717#ifdef DBGFTRACE_ENABLED
14718 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14719 u8TrapNo, enmType, uErrCode, uCr2);
14720#endif
14721
14722 uint32_t fFlags;
14723 switch (enmType)
14724 {
14725 case TRPM_HARDWARE_INT:
14726 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14727 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14728 uErrCode = uCr2 = 0;
14729 break;
14730
14731 case TRPM_SOFTWARE_INT:
14732 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14733 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14734 uErrCode = uCr2 = 0;
14735 break;
14736
14737 case TRPM_TRAP:
14738 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14739 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14740 if (u8TrapNo == X86_XCPT_PF)
14741 fFlags |= IEM_XCPT_FLAGS_CR2;
14742 switch (u8TrapNo)
14743 {
14744 case X86_XCPT_DF:
14745 case X86_XCPT_TS:
14746 case X86_XCPT_NP:
14747 case X86_XCPT_SS:
14748 case X86_XCPT_PF:
14749 case X86_XCPT_AC:
14750 case X86_XCPT_GP:
14751 fFlags |= IEM_XCPT_FLAGS_ERR;
14752 break;
14753 }
14754 break;
14755
14756 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14757 }
14758
14759 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14760
14761 if (pVCpu->iem.s.cActiveMappings > 0)
14762 iemMemRollback(pVCpu);
14763
14764 return rcStrict;
14765}
14766
14767
14768/**
14769 * Injects the active TRPM event.
14770 *
14771 * @returns Strict VBox status code.
14772 * @param pVCpu The cross context virtual CPU structure.
14773 */
14774VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
14775{
14776#ifndef IEM_IMPLEMENTS_TASKSWITCH
14777 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14778#else
14779 uint8_t u8TrapNo;
14780 TRPMEVENT enmType;
14781 uint32_t uErrCode;
14782 RTGCUINTPTR uCr2;
14783 uint8_t cbInstr;
14784 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
14785 if (RT_FAILURE(rc))
14786 return rc;
14787
14788 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
14789 * ICEBP \#DB injection as a special case. */
14790 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14791#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14792 if (rcStrict == VINF_SVM_VMEXIT)
14793 rcStrict = VINF_SUCCESS;
14794#endif
14795#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14796 if (rcStrict == VINF_VMX_VMEXIT)
14797 rcStrict = VINF_SUCCESS;
14798#endif
14799 /** @todo Are there any other codes that imply the event was successfully
14800 * delivered to the guest? See @bugref{6607}. */
14801 if ( rcStrict == VINF_SUCCESS
14802 || rcStrict == VINF_IEM_RAISED_XCPT)
14803 TRPMResetTrap(pVCpu);
14804
14805 return rcStrict;
14806#endif
14807}
14808
14809
14810VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14811{
14812 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14813 return VERR_NOT_IMPLEMENTED;
14814}
14815
14816
14817VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14818{
14819 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14820 return VERR_NOT_IMPLEMENTED;
14821}
14822
14823
14824#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14825/**
14826 * Executes a IRET instruction with default operand size.
14827 *
14828 * This is for PATM.
14829 *
14830 * @returns VBox status code.
14831 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14832 * @param pCtxCore The register frame.
14833 */
14834VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
14835{
14836 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14837
14838 iemCtxCoreToCtx(pCtx, pCtxCore);
14839 iemInitDecoder(pVCpu);
14840 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14841 if (rcStrict == VINF_SUCCESS)
14842 iemCtxToCtxCore(pCtxCore, pCtx);
14843 else
14844 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14845 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14846 return rcStrict;
14847}
14848#endif
14849
14850
14851/**
14852 * Macro used by the IEMExec* method to check the given instruction length.
14853 *
14854 * Will return on failure!
14855 *
14856 * @param a_cbInstr The given instruction length.
14857 * @param a_cbMin The minimum length.
14858 */
14859#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14860 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14861 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14862
14863
14864/**
14865 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14866 *
14867 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14868 *
14869 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14871 * @param rcStrict The status code to fiddle.
14872 */
14873DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14874{
14875 iemUninitExec(pVCpu);
14876 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14877}
14878
14879
14880/**
14881 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14882 *
14883 * This API ASSUMES that the caller has already verified that the guest code is
14884 * allowed to access the I/O port. (The I/O port is in the DX register in the
14885 * guest state.)
14886 *
14887 * @returns Strict VBox status code.
14888 * @param pVCpu The cross context virtual CPU structure.
14889 * @param cbValue The size of the I/O port access (1, 2, or 4).
14890 * @param enmAddrMode The addressing mode.
14891 * @param fRepPrefix Indicates whether a repeat prefix is used
14892 * (doesn't matter which for this instruction).
14893 * @param cbInstr The instruction length in bytes.
14894 * @param iEffSeg The effective segment address.
14895 * @param fIoChecked Whether the access to the I/O port has been
14896 * checked or not. It's typically checked in the
14897 * HM scenario.
14898 */
14899VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14900 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14901{
14902 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14903 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14904
14905 /*
14906 * State init.
14907 */
14908 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14909
14910 /*
14911 * Switch orgy for getting to the right handler.
14912 */
14913 VBOXSTRICTRC rcStrict;
14914 if (fRepPrefix)
14915 {
14916 switch (enmAddrMode)
14917 {
14918 case IEMMODE_16BIT:
14919 switch (cbValue)
14920 {
14921 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14922 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14923 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14924 default:
14925 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14926 }
14927 break;
14928
14929 case IEMMODE_32BIT:
14930 switch (cbValue)
14931 {
14932 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14933 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14934 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14935 default:
14936 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14937 }
14938 break;
14939
14940 case IEMMODE_64BIT:
14941 switch (cbValue)
14942 {
14943 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14944 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14945 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14946 default:
14947 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14948 }
14949 break;
14950
14951 default:
14952 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14953 }
14954 }
14955 else
14956 {
14957 switch (enmAddrMode)
14958 {
14959 case IEMMODE_16BIT:
14960 switch (cbValue)
14961 {
14962 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14963 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14964 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14965 default:
14966 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14967 }
14968 break;
14969
14970 case IEMMODE_32BIT:
14971 switch (cbValue)
14972 {
14973 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14974 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14975 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14976 default:
14977 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14978 }
14979 break;
14980
14981 case IEMMODE_64BIT:
14982 switch (cbValue)
14983 {
14984 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14985 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14986 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14987 default:
14988 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14989 }
14990 break;
14991
14992 default:
14993 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14994 }
14995 }
14996
14997 if (pVCpu->iem.s.cActiveMappings)
14998 iemMemRollback(pVCpu);
14999
15000 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15001}
15002
15003
15004/**
15005 * Interface for HM and EM for executing string I/O IN (read) instructions.
15006 *
15007 * This API ASSUMES that the caller has already verified that the guest code is
15008 * allowed to access the I/O port. (The I/O port is in the DX register in the
15009 * guest state.)
15010 *
15011 * @returns Strict VBox status code.
15012 * @param pVCpu The cross context virtual CPU structure.
15013 * @param cbValue The size of the I/O port access (1, 2, or 4).
15014 * @param enmAddrMode The addressing mode.
15015 * @param fRepPrefix Indicates whether a repeat prefix is used
15016 * (doesn't matter which for this instruction).
15017 * @param cbInstr The instruction length in bytes.
15018 * @param fIoChecked Whether the access to the I/O port has been
15019 * checked or not. It's typically checked in the
15020 * HM scenario.
15021 */
15022VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15023 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15024{
15025 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15026
15027 /*
15028 * State init.
15029 */
15030 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15031
15032 /*
15033 * Switch orgy for getting to the right handler.
15034 */
15035 VBOXSTRICTRC rcStrict;
15036 if (fRepPrefix)
15037 {
15038 switch (enmAddrMode)
15039 {
15040 case IEMMODE_16BIT:
15041 switch (cbValue)
15042 {
15043 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15044 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15045 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15046 default:
15047 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15048 }
15049 break;
15050
15051 case IEMMODE_32BIT:
15052 switch (cbValue)
15053 {
15054 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15055 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15056 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15057 default:
15058 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15059 }
15060 break;
15061
15062 case IEMMODE_64BIT:
15063 switch (cbValue)
15064 {
15065 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15066 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15067 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15068 default:
15069 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15070 }
15071 break;
15072
15073 default:
15074 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15075 }
15076 }
15077 else
15078 {
15079 switch (enmAddrMode)
15080 {
15081 case IEMMODE_16BIT:
15082 switch (cbValue)
15083 {
15084 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15085 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15086 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15087 default:
15088 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15089 }
15090 break;
15091
15092 case IEMMODE_32BIT:
15093 switch (cbValue)
15094 {
15095 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15096 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15097 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15098 default:
15099 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15100 }
15101 break;
15102
15103 case IEMMODE_64BIT:
15104 switch (cbValue)
15105 {
15106 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15107 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15108 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15109 default:
15110 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15111 }
15112 break;
15113
15114 default:
15115 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15116 }
15117 }
15118
15119 if ( pVCpu->iem.s.cActiveMappings == 0
15120 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
15121 { /* likely */ }
15122 else
15123 {
15124 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
15125 iemMemRollback(pVCpu);
15126 }
15127 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15128}
15129
15130
15131/**
15132 * Interface for rawmode to write execute an OUT instruction.
15133 *
15134 * @returns Strict VBox status code.
15135 * @param pVCpu The cross context virtual CPU structure.
15136 * @param cbInstr The instruction length in bytes.
15137 * @param u16Port The port to read.
15138 * @param fImm Whether the port is specified using an immediate operand or
15139 * using the implicit DX register.
15140 * @param cbReg The register size.
15141 *
15142 * @remarks In ring-0 not all of the state needs to be synced in.
15143 */
15144VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15145{
15146 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15147 Assert(cbReg <= 4 && cbReg != 3);
15148
15149 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15150 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15151 Assert(!pVCpu->iem.s.cActiveMappings);
15152 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15153}
15154
15155
15156/**
15157 * Interface for rawmode to write execute an IN instruction.
15158 *
15159 * @returns Strict VBox status code.
15160 * @param pVCpu The cross context virtual CPU structure.
15161 * @param cbInstr The instruction length in bytes.
15162 * @param u16Port The port to read.
15163 * @param fImm Whether the port is specified using an immediate operand or
15164 * using the implicit DX.
15165 * @param cbReg The register size.
15166 */
15167VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15168{
15169 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15170 Assert(cbReg <= 4 && cbReg != 3);
15171
15172 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15173 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15174 Assert(!pVCpu->iem.s.cActiveMappings);
15175 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15176}
15177
15178
15179/**
15180 * Interface for HM and EM to write to a CRx register.
15181 *
15182 * @returns Strict VBox status code.
15183 * @param pVCpu The cross context virtual CPU structure.
15184 * @param cbInstr The instruction length in bytes.
15185 * @param iCrReg The control register number (destination).
15186 * @param iGReg The general purpose register number (source).
15187 *
15188 * @remarks In ring-0 not all of the state needs to be synced in.
15189 */
15190VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15191{
15192 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15193 Assert(iCrReg < 16);
15194 Assert(iGReg < 16);
15195
15196 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15197 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15198 Assert(!pVCpu->iem.s.cActiveMappings);
15199 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15200}
15201
15202
15203/**
15204 * Interface for HM and EM to read from a CRx register.
15205 *
15206 * @returns Strict VBox status code.
15207 * @param pVCpu The cross context virtual CPU structure.
15208 * @param cbInstr The instruction length in bytes.
15209 * @param iGReg The general purpose register number (destination).
15210 * @param iCrReg The control register number (source).
15211 *
15212 * @remarks In ring-0 not all of the state needs to be synced in.
15213 */
15214VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15215{
15216 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15217 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15218 | CPUMCTX_EXTRN_APIC_TPR);
15219 Assert(iCrReg < 16);
15220 Assert(iGReg < 16);
15221
15222 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15223 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15224 Assert(!pVCpu->iem.s.cActiveMappings);
15225 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15226}
15227
15228
15229/**
15230 * Interface for HM and EM to clear the CR0[TS] bit.
15231 *
15232 * @returns Strict VBox status code.
15233 * @param pVCpu The cross context virtual CPU structure.
15234 * @param cbInstr The instruction length in bytes.
15235 *
15236 * @remarks In ring-0 not all of the state needs to be synced in.
15237 */
15238VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15239{
15240 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15241
15242 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15243 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15244 Assert(!pVCpu->iem.s.cActiveMappings);
15245 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15246}
15247
15248
15249/**
15250 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15251 *
15252 * @returns Strict VBox status code.
15253 * @param pVCpu The cross context virtual CPU structure.
15254 * @param cbInstr The instruction length in bytes.
15255 * @param uValue The value to load into CR0.
15256 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15257 * memory operand. Otherwise pass NIL_RTGCPTR.
15258 *
15259 * @remarks In ring-0 not all of the state needs to be synced in.
15260 */
15261VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15262{
15263 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15264
15265 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15266 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15267 Assert(!pVCpu->iem.s.cActiveMappings);
15268 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15269}
15270
15271
15272/**
15273 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15274 *
15275 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15276 *
15277 * @returns Strict VBox status code.
15278 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15279 * @param cbInstr The instruction length in bytes.
15280 * @remarks In ring-0 not all of the state needs to be synced in.
15281 * @thread EMT(pVCpu)
15282 */
15283VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15284{
15285 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15286
15287 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15288 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15289 Assert(!pVCpu->iem.s.cActiveMappings);
15290 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15291}
15292
15293
15294/**
15295 * Interface for HM and EM to emulate the WBINVD instruction.
15296 *
15297 * @returns Strict VBox status code.
15298 * @param pVCpu The cross context virtual CPU structure.
15299 * @param cbInstr The instruction length in bytes.
15300 *
15301 * @remarks In ring-0 not all of the state needs to be synced in.
15302 */
15303VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15304{
15305 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15306
15307 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15308 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15309 Assert(!pVCpu->iem.s.cActiveMappings);
15310 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15311}
15312
15313
15314/**
15315 * Interface for HM and EM to emulate the INVD instruction.
15316 *
15317 * @returns Strict VBox status code.
15318 * @param pVCpu The cross context virtual CPU structure.
15319 * @param cbInstr The instruction length in bytes.
15320 *
15321 * @remarks In ring-0 not all of the state needs to be synced in.
15322 */
15323VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15324{
15325 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15326
15327 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15328 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15329 Assert(!pVCpu->iem.s.cActiveMappings);
15330 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15331}
15332
15333
15334/**
15335 * Interface for HM and EM to emulate the INVLPG instruction.
15336 *
15337 * @returns Strict VBox status code.
15338 * @retval VINF_PGM_SYNC_CR3
15339 *
15340 * @param pVCpu The cross context virtual CPU structure.
15341 * @param cbInstr The instruction length in bytes.
15342 * @param GCPtrPage The effective address of the page to invalidate.
15343 *
15344 * @remarks In ring-0 not all of the state needs to be synced in.
15345 */
15346VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15347{
15348 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15349
15350 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15351 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15352 Assert(!pVCpu->iem.s.cActiveMappings);
15353 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15354}
15355
15356
15357/**
15358 * Interface for HM and EM to emulate the INVPCID instruction.
15359 *
15360 * @returns Strict VBox status code.
15361 * @retval VINF_PGM_SYNC_CR3
15362 *
15363 * @param pVCpu The cross context virtual CPU structure.
15364 * @param cbInstr The instruction length in bytes.
15365 * @param iEffSeg The effective segment register.
15366 * @param GCPtrDesc The effective address of the INVPCID descriptor.
15367 * @param uType The invalidation type.
15368 *
15369 * @remarks In ring-0 not all of the state needs to be synced in.
15370 */
15371VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
15372 uint64_t uType)
15373{
15374 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
15375
15376 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15377 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
15378 Assert(!pVCpu->iem.s.cActiveMappings);
15379 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15380}
15381
15382
15383/**
15384 * Interface for HM and EM to emulate the CPUID instruction.
15385 *
15386 * @returns Strict VBox status code.
15387 *
15388 * @param pVCpu The cross context virtual CPU structure.
15389 * @param cbInstr The instruction length in bytes.
15390 *
15391 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15392 */
15393VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15394{
15395 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15396 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15397
15398 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15399 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15400 Assert(!pVCpu->iem.s.cActiveMappings);
15401 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15402}
15403
15404
15405/**
15406 * Interface for HM and EM to emulate the RDPMC instruction.
15407 *
15408 * @returns Strict VBox status code.
15409 *
15410 * @param pVCpu The cross context virtual CPU structure.
15411 * @param cbInstr The instruction length in bytes.
15412 *
15413 * @remarks Not all of the state needs to be synced in.
15414 */
15415VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15416{
15417 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15418 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15419
15420 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15421 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15422 Assert(!pVCpu->iem.s.cActiveMappings);
15423 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15424}
15425
15426
15427/**
15428 * Interface for HM and EM to emulate the RDTSC instruction.
15429 *
15430 * @returns Strict VBox status code.
15431 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15432 *
15433 * @param pVCpu The cross context virtual CPU structure.
15434 * @param cbInstr The instruction length in bytes.
15435 *
15436 * @remarks Not all of the state needs to be synced in.
15437 */
15438VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15439{
15440 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15441 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15442
15443 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15444 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15445 Assert(!pVCpu->iem.s.cActiveMappings);
15446 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15447}
15448
15449
15450/**
15451 * Interface for HM and EM to emulate the RDTSCP instruction.
15452 *
15453 * @returns Strict VBox status code.
15454 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15455 *
15456 * @param pVCpu The cross context virtual CPU structure.
15457 * @param cbInstr The instruction length in bytes.
15458 *
15459 * @remarks Not all of the state needs to be synced in. Recommended
15460 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15461 */
15462VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15463{
15464 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15465 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15466
15467 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15468 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15469 Assert(!pVCpu->iem.s.cActiveMappings);
15470 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15471}
15472
15473
15474/**
15475 * Interface for HM and EM to emulate the RDMSR instruction.
15476 *
15477 * @returns Strict VBox status code.
15478 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15479 *
15480 * @param pVCpu The cross context virtual CPU structure.
15481 * @param cbInstr The instruction length in bytes.
15482 *
15483 * @remarks Not all of the state needs to be synced in. Requires RCX and
15484 * (currently) all MSRs.
15485 */
15486VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15487{
15488 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15489 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15490
15491 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15492 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15493 Assert(!pVCpu->iem.s.cActiveMappings);
15494 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15495}
15496
15497
15498/**
15499 * Interface for HM and EM to emulate the WRMSR instruction.
15500 *
15501 * @returns Strict VBox status code.
15502 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15503 *
15504 * @param pVCpu The cross context virtual CPU structure.
15505 * @param cbInstr The instruction length in bytes.
15506 *
15507 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15508 * and (currently) all MSRs.
15509 */
15510VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15511{
15512 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15513 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15514 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15515
15516 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15517 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15518 Assert(!pVCpu->iem.s.cActiveMappings);
15519 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15520}
15521
15522
15523/**
15524 * Interface for HM and EM to emulate the MONITOR instruction.
15525 *
15526 * @returns Strict VBox status code.
15527 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15528 *
15529 * @param pVCpu The cross context virtual CPU structure.
15530 * @param cbInstr The instruction length in bytes.
15531 *
15532 * @remarks Not all of the state needs to be synced in.
15533 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15534 * are used.
15535 */
15536VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
15537{
15538 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15539 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15540
15541 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15542 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15543 Assert(!pVCpu->iem.s.cActiveMappings);
15544 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15545}
15546
15547
15548/**
15549 * Interface for HM and EM to emulate the MWAIT instruction.
15550 *
15551 * @returns Strict VBox status code.
15552 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15553 *
15554 * @param pVCpu The cross context virtual CPU structure.
15555 * @param cbInstr The instruction length in bytes.
15556 *
15557 * @remarks Not all of the state needs to be synced in.
15558 */
15559VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
15560{
15561 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15562 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15563
15564 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15565 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15566 Assert(!pVCpu->iem.s.cActiveMappings);
15567 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15568}
15569
15570
15571/**
15572 * Interface for HM and EM to emulate the HLT instruction.
15573 *
15574 * @returns Strict VBox status code.
15575 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15576 *
15577 * @param pVCpu The cross context virtual CPU structure.
15578 * @param cbInstr The instruction length in bytes.
15579 *
15580 * @remarks Not all of the state needs to be synced in.
15581 */
15582VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
15583{
15584 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15585
15586 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15587 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15588 Assert(!pVCpu->iem.s.cActiveMappings);
15589 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15590}
15591
15592
15593/**
15594 * Checks if IEM is in the process of delivering an event (interrupt or
15595 * exception).
15596 *
15597 * @returns true if we're in the process of raising an interrupt or exception,
15598 * false otherwise.
15599 * @param pVCpu The cross context virtual CPU structure.
15600 * @param puVector Where to store the vector associated with the
15601 * currently delivered event, optional.
15602 * @param pfFlags Where to store th event delivery flags (see
15603 * IEM_XCPT_FLAGS_XXX), optional.
15604 * @param puErr Where to store the error code associated with the
15605 * event, optional.
15606 * @param puCr2 Where to store the CR2 associated with the event,
15607 * optional.
15608 * @remarks The caller should check the flags to determine if the error code and
15609 * CR2 are valid for the event.
15610 */
15611VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15612{
15613 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15614 if (fRaisingXcpt)
15615 {
15616 if (puVector)
15617 *puVector = pVCpu->iem.s.uCurXcpt;
15618 if (pfFlags)
15619 *pfFlags = pVCpu->iem.s.fCurXcpt;
15620 if (puErr)
15621 *puErr = pVCpu->iem.s.uCurXcptErr;
15622 if (puCr2)
15623 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15624 }
15625 return fRaisingXcpt;
15626}
15627
15628#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15629
15630/**
15631 * Interface for HM and EM to emulate the CLGI instruction.
15632 *
15633 * @returns Strict VBox status code.
15634 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15635 * @param cbInstr The instruction length in bytes.
15636 * @thread EMT(pVCpu)
15637 */
15638VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15639{
15640 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15641
15642 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15643 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15644 Assert(!pVCpu->iem.s.cActiveMappings);
15645 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15646}
15647
15648
15649/**
15650 * Interface for HM and EM to emulate the STGI instruction.
15651 *
15652 * @returns Strict VBox status code.
15653 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15654 * @param cbInstr The instruction length in bytes.
15655 * @thread EMT(pVCpu)
15656 */
15657VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15658{
15659 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15660
15661 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15662 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15663 Assert(!pVCpu->iem.s.cActiveMappings);
15664 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15665}
15666
15667
15668/**
15669 * Interface for HM and EM to emulate the VMLOAD instruction.
15670 *
15671 * @returns Strict VBox status code.
15672 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15673 * @param cbInstr The instruction length in bytes.
15674 * @thread EMT(pVCpu)
15675 */
15676VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
15677{
15678 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15679
15680 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15681 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15682 Assert(!pVCpu->iem.s.cActiveMappings);
15683 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15684}
15685
15686
15687/**
15688 * Interface for HM and EM to emulate the VMSAVE instruction.
15689 *
15690 * @returns Strict VBox status code.
15691 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15692 * @param cbInstr The instruction length in bytes.
15693 * @thread EMT(pVCpu)
15694 */
15695VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
15696{
15697 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15698
15699 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15700 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15701 Assert(!pVCpu->iem.s.cActiveMappings);
15702 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15703}
15704
15705
15706/**
15707 * Interface for HM and EM to emulate the INVLPGA instruction.
15708 *
15709 * @returns Strict VBox status code.
15710 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15711 * @param cbInstr The instruction length in bytes.
15712 * @thread EMT(pVCpu)
15713 */
15714VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
15715{
15716 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15717
15718 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15719 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15720 Assert(!pVCpu->iem.s.cActiveMappings);
15721 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15722}
15723
15724
15725/**
15726 * Interface for HM and EM to emulate the VMRUN instruction.
15727 *
15728 * @returns Strict VBox status code.
15729 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15730 * @param cbInstr The instruction length in bytes.
15731 * @thread EMT(pVCpu)
15732 */
15733VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
15734{
15735 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15736 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15737
15738 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15739 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15740 Assert(!pVCpu->iem.s.cActiveMappings);
15741 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15742}
15743
15744
15745/**
15746 * Interface for HM and EM to emulate \#VMEXIT.
15747 *
15748 * @returns Strict VBox status code.
15749 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15750 * @param uExitCode The exit code.
15751 * @param uExitInfo1 The exit info. 1 field.
15752 * @param uExitInfo2 The exit info. 2 field.
15753 * @thread EMT(pVCpu)
15754 */
15755VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15756{
15757 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15758 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15759 if (pVCpu->iem.s.cActiveMappings)
15760 iemMemRollback(pVCpu);
15761 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15762}
15763
15764#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15765
15766#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15767
15768/**
15769 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15770 *
15771 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15772 * are performed. Bounds checks are strict builds only.
15773 *
15774 * @param pVmcs Pointer to the virtual VMCS.
15775 * @param u64VmcsField The VMCS field.
15776 * @param pu64Dst Where to store the VMCS value.
15777 *
15778 * @remarks May be called with interrupts disabled.
15779 * @todo This should probably be moved to CPUM someday.
15780 */
15781VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15782{
15783 AssertPtr(pVmcs);
15784 AssertPtr(pu64Dst);
15785 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15786}
15787
15788
15789/**
15790 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15791 *
15792 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15793 * are performed. Bounds checks are strict builds only.
15794 *
15795 * @param pVmcs Pointer to the virtual VMCS.
15796 * @param u64VmcsField The VMCS field.
15797 * @param u64Val The value to write.
15798 *
15799 * @remarks May be called with interrupts disabled.
15800 * @todo This should probably be moved to CPUM someday.
15801 */
15802VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15803{
15804 AssertPtr(pVmcs);
15805 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15806}
15807
15808
15809/**
15810 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15811 *
15812 * @returns Strict VBox status code.
15813 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15814 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15815 * the x2APIC device.
15816 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15817 *
15818 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15819 * @param idMsr The MSR being read.
15820 * @param pu64Value Pointer to the value being written or where to store the
15821 * value being read.
15822 * @param fWrite Whether this is an MSR write or read access.
15823 * @thread EMT(pVCpu)
15824 */
15825VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15826{
15827 Assert(pu64Value);
15828
15829 VBOXSTRICTRC rcStrict;
15830 if (fWrite)
15831 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15832 else
15833 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15834 Assert(!pVCpu->iem.s.cActiveMappings);
15835 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15836
15837}
15838
15839
15840/**
15841 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15842 *
15843 * @returns Strict VBox status code.
15844 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15845 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15846 *
15847 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15848 * @param pExitInfo Pointer to the VM-exit information.
15849 * @param pExitEventInfo Pointer to the VM-exit event information.
15850 * @thread EMT(pVCpu)
15851 */
15852VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15853{
15854 Assert(pExitInfo);
15855 Assert(pExitEventInfo);
15856 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15857 Assert(!pVCpu->iem.s.cActiveMappings);
15858 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15859
15860}
15861
15862
15863/**
15864 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15865 * VM-exit.
15866 *
15867 * @returns Strict VBox status code.
15868 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15869 * @thread EMT(pVCpu)
15870 */
15871VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
15872{
15873 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15874 Assert(!pVCpu->iem.s.cActiveMappings);
15875 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15876}
15877
15878
15879/**
15880 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15881 *
15882 * @returns Strict VBox status code.
15883 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15884 * @thread EMT(pVCpu)
15885 */
15886VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
15887{
15888 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15889 Assert(!pVCpu->iem.s.cActiveMappings);
15890 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15891}
15892
15893
15894/**
15895 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15896 *
15897 * @returns Strict VBox status code.
15898 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15899 * @param uVector The external interrupt vector (pass 0 if the external
15900 * interrupt is still pending).
15901 * @param fIntPending Whether the external interrupt is pending or
15902 * acknowdledged in the interrupt controller.
15903 * @thread EMT(pVCpu)
15904 */
15905VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
15906{
15907 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15908 Assert(!pVCpu->iem.s.cActiveMappings);
15909 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15910}
15911
15912
15913/**
15914 * Interface for HM and EM to emulate VM-exit due to exceptions.
15915 *
15916 * Exception includes NMIs, software exceptions (those generated by INT3 or
15917 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15918 *
15919 * @returns Strict VBox status code.
15920 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15921 * @param pExitInfo Pointer to the VM-exit information.
15922 * @param pExitEventInfo Pointer to the VM-exit event information.
15923 * @thread EMT(pVCpu)
15924 */
15925VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15926{
15927 Assert(pExitInfo);
15928 Assert(pExitEventInfo);
15929 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15930 Assert(!pVCpu->iem.s.cActiveMappings);
15931 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15932}
15933
15934
15935/**
15936 * Interface for HM and EM to emulate VM-exit due to NMIs.
15937 *
15938 * @returns Strict VBox status code.
15939 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15940 * @thread EMT(pVCpu)
15941 */
15942VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
15943{
15944 VMXVEXITINFO ExitInfo;
15945 RT_ZERO(ExitInfo);
15946 ExitInfo.uReason = VMX_EXIT_XCPT_OR_NMI;
15947
15948 VMXVEXITEVENTINFO ExitEventInfo;
15949 RT_ZERO(ExitEventInfo);
15950 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15951 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15952 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15953
15954 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15955 Assert(!pVCpu->iem.s.cActiveMappings);
15956 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15957}
15958
15959
15960/**
15961 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15962 *
15963 * @returns Strict VBox status code.
15964 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15965 * @thread EMT(pVCpu)
15966 */
15967VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
15968{
15969 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15970 Assert(!pVCpu->iem.s.cActiveMappings);
15971 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15972}
15973
15974
15975/**
15976 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15977 *
15978 * @returns Strict VBox status code.
15979 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15980 * @param uVector The SIPI vector.
15981 * @thread EMT(pVCpu)
15982 */
15983VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
15984{
15985 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
15986 Assert(!pVCpu->iem.s.cActiveMappings);
15987 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15988}
15989
15990
15991/**
15992 * Interface for HM and EM to emulate a VM-exit.
15993 *
15994 * If a specialized version of a VM-exit handler exists, that must be used instead.
15995 *
15996 * @returns Strict VBox status code.
15997 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15998 * @param uExitReason The VM-exit reason.
15999 * @param u64ExitQual The Exit qualification.
16000 * @thread EMT(pVCpu)
16001 */
16002VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
16003{
16004 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
16005 Assert(!pVCpu->iem.s.cActiveMappings);
16006 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16007}
16008
16009
16010/**
16011 * Interface for HM and EM to emulate a VM-exit due to an instruction.
16012 *
16013 * This is meant to be used for those instructions that VMX provides additional
16014 * decoding information beyond just the instruction length!
16015 *
16016 * @returns Strict VBox status code.
16017 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16018 * @param pExitInfo Pointer to the VM-exit information.
16019 * @thread EMT(pVCpu)
16020 */
16021VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16022{
16023 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
16024 Assert(!pVCpu->iem.s.cActiveMappings);
16025 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16026}
16027
16028
16029/**
16030 * Interface for HM and EM to emulate a VM-exit due to an instruction.
16031 *
16032 * This is meant to be used for those instructions that VMX provides only the
16033 * instruction length.
16034 *
16035 * @returns Strict VBox status code.
16036 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16037 * @param pExitInfo Pointer to the VM-exit information.
16038 * @param cbInstr The instruction length in bytes.
16039 * @thread EMT(pVCpu)
16040 */
16041VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
16042{
16043 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
16044 Assert(!pVCpu->iem.s.cActiveMappings);
16045 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16046}
16047
16048
16049/**
16050 * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
16051 * Virtualized-EOI, TPR-below threshold).
16052 *
16053 * @returns Strict VBox status code.
16054 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16055 * @param pExitInfo Pointer to the VM-exit information.
16056 * @thread EMT(pVCpu)
16057 */
16058VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16059{
16060 Assert(pExitInfo);
16061 VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
16062 Assert(!pVCpu->iem.s.cActiveMappings);
16063 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16064}
16065
16066
16067/**
16068 * Interface for HM and EM to emulate a VM-exit due to a task switch.
16069 *
16070 * @returns Strict VBox status code.
16071 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16072 * @param pExitInfo Pointer to the VM-exit information.
16073 * @param pExitEventInfo Pointer to the VM-exit event information.
16074 * @thread EMT(pVCpu)
16075 */
16076VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16077{
16078 Assert(pExitInfo);
16079 Assert(pExitEventInfo);
16080 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
16081 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16082 Assert(!pVCpu->iem.s.cActiveMappings);
16083 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16084}
16085
16086
16087/**
16088 * Interface for HM and EM to emulate the VMREAD instruction.
16089 *
16090 * @returns Strict VBox status code.
16091 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16092 * @param pExitInfo Pointer to the VM-exit information.
16093 * @thread EMT(pVCpu)
16094 */
16095VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16096{
16097 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16098 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16099 Assert(pExitInfo);
16100
16101 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16102
16103 VBOXSTRICTRC rcStrict;
16104 uint8_t const cbInstr = pExitInfo->cbInstr;
16105 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16106 uint64_t const u64FieldEnc = fIs64BitMode
16107 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16108 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16109 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16110 {
16111 if (fIs64BitMode)
16112 {
16113 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16114 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16115 }
16116 else
16117 {
16118 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16119 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16120 }
16121 }
16122 else
16123 {
16124 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16125 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16126 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16127 }
16128 Assert(!pVCpu->iem.s.cActiveMappings);
16129 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16130}
16131
16132
16133/**
16134 * Interface for HM and EM to emulate the VMWRITE instruction.
16135 *
16136 * @returns Strict VBox status code.
16137 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16138 * @param pExitInfo Pointer to the VM-exit information.
16139 * @thread EMT(pVCpu)
16140 */
16141VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16142{
16143 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16144 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16145 Assert(pExitInfo);
16146
16147 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16148
16149 uint64_t u64Val;
16150 uint8_t iEffSeg;
16151 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16152 {
16153 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16154 iEffSeg = UINT8_MAX;
16155 }
16156 else
16157 {
16158 u64Val = pExitInfo->GCPtrEffAddr;
16159 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16160 }
16161 uint8_t const cbInstr = pExitInfo->cbInstr;
16162 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16163 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16164 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16165 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16166 Assert(!pVCpu->iem.s.cActiveMappings);
16167 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16168}
16169
16170
16171/**
16172 * Interface for HM and EM to emulate the VMPTRLD instruction.
16173 *
16174 * @returns Strict VBox status code.
16175 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16176 * @param pExitInfo Pointer to the VM-exit information.
16177 * @thread EMT(pVCpu)
16178 */
16179VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16180{
16181 Assert(pExitInfo);
16182 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16183 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16184
16185 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16186
16187 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16188 uint8_t const cbInstr = pExitInfo->cbInstr;
16189 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16190 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16191 Assert(!pVCpu->iem.s.cActiveMappings);
16192 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16193}
16194
16195
16196/**
16197 * Interface for HM and EM to emulate the VMPTRST instruction.
16198 *
16199 * @returns Strict VBox status code.
16200 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16201 * @param pExitInfo Pointer to the VM-exit information.
16202 * @thread EMT(pVCpu)
16203 */
16204VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16205{
16206 Assert(pExitInfo);
16207 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16208 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16209
16210 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16211
16212 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16213 uint8_t const cbInstr = pExitInfo->cbInstr;
16214 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16215 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16216 Assert(!pVCpu->iem.s.cActiveMappings);
16217 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16218}
16219
16220
16221/**
16222 * Interface for HM and EM to emulate the VMCLEAR instruction.
16223 *
16224 * @returns Strict VBox status code.
16225 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16226 * @param pExitInfo Pointer to the VM-exit information.
16227 * @thread EMT(pVCpu)
16228 */
16229VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16230{
16231 Assert(pExitInfo);
16232 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16233 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16234
16235 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16236
16237 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16238 uint8_t const cbInstr = pExitInfo->cbInstr;
16239 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16240 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16241 Assert(!pVCpu->iem.s.cActiveMappings);
16242 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16243}
16244
16245
16246/**
16247 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16248 *
16249 * @returns Strict VBox status code.
16250 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16251 * @param cbInstr The instruction length in bytes.
16252 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16253 * VMXINSTRID_VMRESUME).
16254 * @thread EMT(pVCpu)
16255 */
16256VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16257{
16258 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16259 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16260
16261 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16262 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16263 Assert(!pVCpu->iem.s.cActiveMappings);
16264 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16265}
16266
16267
16268/**
16269 * Interface for HM and EM to emulate the VMXON instruction.
16270 *
16271 * @returns Strict VBox status code.
16272 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16273 * @param pExitInfo Pointer to the VM-exit information.
16274 * @thread EMT(pVCpu)
16275 */
16276VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16277{
16278 Assert(pExitInfo);
16279 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16280 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16281
16282 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16283
16284 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16285 uint8_t const cbInstr = pExitInfo->cbInstr;
16286 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16287 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16288 Assert(!pVCpu->iem.s.cActiveMappings);
16289 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16290}
16291
16292
16293/**
16294 * Interface for HM and EM to emulate the VMXOFF instruction.
16295 *
16296 * @returns Strict VBox status code.
16297 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16298 * @param cbInstr The instruction length in bytes.
16299 * @thread EMT(pVCpu)
16300 */
16301VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16302{
16303 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16304 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16305
16306 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16307 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16308 Assert(!pVCpu->iem.s.cActiveMappings);
16309 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16310}
16311
16312
16313/**
16314 * Interface for HM and EM to emulate the INVVPID instruction.
16315 *
16316 * @returns Strict VBox status code.
16317 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16318 * @param pExitInfo Pointer to the VM-exit information.
16319 * @thread EMT(pVCpu)
16320 */
16321VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16322{
16323 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16324 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16325 Assert(pExitInfo);
16326
16327 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16328
16329 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16330 uint8_t const cbInstr = pExitInfo->cbInstr;
16331 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16332 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16333 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16334 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16335 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16336 Assert(!pVCpu->iem.s.cActiveMappings);
16337 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16338}
16339
16340
16341# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
16342/**
16343 * Interface for HM and EM to emulate the INVEPT instruction.
16344 *
16345 * @returns Strict VBox status code.
16346 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16347 * @param pExitInfo Pointer to the VM-exit information.
16348 * @thread EMT(pVCpu)
16349 */
16350VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvept(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16351{
16352 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16353 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16354 Assert(pExitInfo);
16355
16356 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16357
16358 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16359 uint8_t const cbInstr = pExitInfo->cbInstr;
16360 RTGCPTR const GCPtrInveptDesc = pExitInfo->GCPtrEffAddr;
16361 uint64_t const u64InveptType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16362 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16363 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16364 VBOXSTRICTRC rcStrict = iemVmxInvept(pVCpu, cbInstr, iEffSeg, GCPtrInveptDesc, u64InveptType, pExitInfo);
16365 Assert(!pVCpu->iem.s.cActiveMappings);
16366 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16367}
16368
16369
16370/**
16371 * Interface for HM and EM to emulate a VM-exit due to an EPT violation.
16372 *
16373 * @returns Strict VBox status code.
16374 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16375 * @param pExitInfo Pointer to the VM-exit information.
16376 * @param pExitEventInfo Pointer to the VM-exit event information.
16377 * @thread EMT(pVCpu)
16378 */
16379VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptViolation(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo,
16380 PCVMXVEXITEVENTINFO pExitEventInfo)
16381{
16382 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16383
16384 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16385 VBOXSTRICTRC rcStrict = iemVmxVmexitEptViolationWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16386 Assert(!pVCpu->iem.s.cActiveMappings);
16387 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16388}
16389
16390
16391/**
16392 * Interface for HM and EM to emulate a VM-exit due to an EPT misconfiguration.
16393 *
16394 * @returns Strict VBox status code.
16395 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16396 * @param GCPhysAddr The nested-guest physical address causing the EPT
16397 * misconfiguration.
16398 * @param pExitEventInfo Pointer to the VM-exit event information.
16399 * @thread EMT(pVCpu)
16400 */
16401VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitEptMisconfig(PVMCPUCC pVCpu, RTGCPHYS GCPhysAddr, PCVMXVEXITEVENTINFO pExitEventInfo)
16402{
16403 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16404
16405 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16406 VBOXSTRICTRC rcStrict = iemVmxVmexitEptMisconfigWithInfo(pVCpu, GCPhysAddr, pExitEventInfo);
16407 Assert(!pVCpu->iem.s.cActiveMappings);
16408 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16409}
16410
16411# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
16412
16413
16414/**
16415 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16416 *
16417 * @remarks The @a uUser argument is currently unused.
16418 */
16419PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16420 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16421 PGMACCESSORIGIN enmOrigin, uint64_t uUser)
16422{
16423 RT_NOREF3(pvPhys, enmOrigin, uUser);
16424
16425 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
16426 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16427 {
16428 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16429 Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16430
16431 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_DATA_W : IEM_ACCESS_DATA_R;
16432 uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
16433 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16434 if (RT_FAILURE(rcStrict))
16435 return rcStrict;
16436
16437 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16438 return VINF_SUCCESS;
16439 }
16440
16441 LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase));
16442 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16443 if (RT_FAILURE(rc))
16444 return rc;
16445
16446 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16447 return VINF_PGM_HANDLER_DO_DEFAULT;
16448}
16449
16450
16451# ifndef IN_RING3
16452/**
16453 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
16454 * \#PF access handler callback for guest VMX APIC-access page.}
16455 */
16456DECLCALLBACK(VBOXSTRICTRC) iemVmxApicAccessPagePfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame,
16457 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
16458
16459{
16460 RT_NOREF4(pVM, pRegFrame, pvFault, uUser);
16461
16462 /** @todo We lack information about such as the current instruction length, IDT
16463 * vectoring info etc. These need to be queried from HMR0. */
16464 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
16465 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16466 {
16467 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16468 Assert(CPUMGetGuestVmxApicAccessPageAddrEx(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16469
16470 uint32_t fAccess;
16471 if (uErr & X86_TRAP_PF_ID)
16472 fAccess = IEM_ACCESS_INSTRUCTION;
16473 else if (uErr & X86_TRAP_PF_RW)
16474 fAccess = IEM_ACCESS_DATA_W;
16475 else
16476 fAccess = IEM_ACCESS_DATA_R;
16477
16478 uint16_t const offAccess = GCPhysFault & GUEST_PAGE_OFFSET_MASK;
16479 bool const fIntercept = iemVmxVirtApicIsMemAccessIntercepted(pVCpu, offAccess, 0 /* cbAccess */, fAccess);
16480 if (fIntercept)
16481 {
16482 /** @todo Once HMR0 interface for querying VMXTRANSIENT info is available, use
16483 * iemVmxVmexitApicAccessWithInfo instead. This is R0-only code anyway. */
16484 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccess(pVCpu, offAccess, fAccess);
16485 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16486 }
16487
16488 /* The access isn't intercepted, which means it needs to be virtualized. */
16489 return VINF_EM_RAW_EMULATE_INSTR;
16490 }
16491
16492 LogFunc(("Accessed outside VMX non-root mode, deregistering page handler for %#RGp\n", GCPhysAccessBase));
16493 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16494 if (RT_FAILURE(rc))
16495 return rc;
16496
16497 return VINF_SUCCESS;
16498}
16499# endif /* !IN_RING3 */
16500#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16501
16502
16503#ifdef IN_RING3
16504
16505/**
16506 * Handles the unlikely and probably fatal merge cases.
16507 *
16508 * @returns Merged status code.
16509 * @param rcStrict Current EM status code.
16510 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16511 * with @a rcStrict.
16512 * @param iMemMap The memory mapping index. For error reporting only.
16513 * @param pVCpu The cross context virtual CPU structure of the calling
16514 * thread, for error reporting only.
16515 */
16516DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16517 unsigned iMemMap, PVMCPUCC pVCpu)
16518{
16519 if (RT_FAILURE_NP(rcStrict))
16520 return rcStrict;
16521
16522 if (RT_FAILURE_NP(rcStrictCommit))
16523 return rcStrictCommit;
16524
16525 if (rcStrict == rcStrictCommit)
16526 return rcStrictCommit;
16527
16528 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16529 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16530 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16531 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16532 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16533 return VERR_IOM_FF_STATUS_IPE;
16534}
16535
16536
16537/**
16538 * Helper for IOMR3ProcessForceFlag.
16539 *
16540 * @returns Merged status code.
16541 * @param rcStrict Current EM status code.
16542 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16543 * with @a rcStrict.
16544 * @param iMemMap The memory mapping index. For error reporting only.
16545 * @param pVCpu The cross context virtual CPU structure of the calling
16546 * thread, for error reporting only.
16547 */
16548DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
16549{
16550 /* Simple. */
16551 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16552 return rcStrictCommit;
16553
16554 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16555 return rcStrict;
16556
16557 /* EM scheduling status codes. */
16558 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16559 && rcStrict <= VINF_EM_LAST))
16560 {
16561 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16562 && rcStrictCommit <= VINF_EM_LAST))
16563 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16564 }
16565
16566 /* Unlikely */
16567 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16568}
16569
16570
16571/**
16572 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16573 *
16574 * @returns Merge between @a rcStrict and what the commit operation returned.
16575 * @param pVM The cross context VM structure.
16576 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16577 * @param rcStrict The status code returned by ring-0 or raw-mode.
16578 */
16579VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
16580{
16581 /*
16582 * Reset the pending commit.
16583 */
16584 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16585 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16586 ("%#x %#x %#x\n",
16587 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16588 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16589
16590 /*
16591 * Commit the pending bounce buffers (usually just one).
16592 */
16593 unsigned cBufs = 0;
16594 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16595 while (iMemMap-- > 0)
16596 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16597 {
16598 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16599 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16600 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16601
16602 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16603 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16604 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16605
16606 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16607 {
16608 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16609 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16610 pbBuf,
16611 cbFirst,
16612 PGMACCESSORIGIN_IEM);
16613 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16614 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16615 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16616 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16617 }
16618
16619 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16620 {
16621 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16622 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16623 pbBuf + cbFirst,
16624 cbSecond,
16625 PGMACCESSORIGIN_IEM);
16626 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16627 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16628 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16629 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16630 }
16631 cBufs++;
16632 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16633 }
16634
16635 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16636 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16637 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16638 pVCpu->iem.s.cActiveMappings = 0;
16639 return rcStrict;
16640}
16641
16642#endif /* IN_RING3 */
16643
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette