VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 80348

Last change on this file since 80348 was 80333, checked in by vboxsync, 5 years ago

VMM: Eliminating the VBOX_BUGREF_9217_PART_I preprocessor macro. bugref:9217

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 647.4 KB
Line 
1/* $Id: IEMAll.cpp 80333 2019-08-16 20:28:38Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vmcc.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler.
442 */
443# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
444 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
445
446#else
447# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
448# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
449# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
450# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
451# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
452# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
453# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
454# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
455# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
457
458#endif
459
460#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
461/**
462 * Check if an SVM control/instruction intercept is set.
463 */
464# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
465 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
466
467/**
468 * Check if an SVM read CRx intercept is set.
469 */
470# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
471 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
472
473/**
474 * Check if an SVM write CRx intercept is set.
475 */
476# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM read DRx intercept is set.
481 */
482# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
483 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
484
485/**
486 * Check if an SVM write DRx intercept is set.
487 */
488# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM exception intercept is set.
493 */
494# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
495 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
496
497/**
498 * Invokes the SVM \#VMEXIT handler for the nested-guest.
499 */
500# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
501 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
502
503/**
504 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
505 * corresponding decode assist information.
506 */
507# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
508 do \
509 { \
510 uint64_t uExitInfo1; \
511 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
512 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
513 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
514 else \
515 uExitInfo1 = 0; \
516 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
517 } while (0)
518
519/** Check and handles SVM nested-guest instruction intercept and updates
520 * NRIP if needed.
521 */
522# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
523 do \
524 { \
525 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
526 { \
527 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
528 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
529 } \
530 } while (0)
531
532/** Checks and handles SVM nested-guest CR0 read intercept. */
533# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
534 do \
535 { \
536 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
537 { /* probably likely */ } \
538 else \
539 { \
540 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
541 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
542 } \
543 } while (0)
544
545/**
546 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
547 */
548# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
549 do { \
550 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
551 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
552 } while (0)
553
554#else
555# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
556# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
557# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
558# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
559# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
560# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
561# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
562# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
563# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
564# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
565# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
566
567#endif
568
569
570/*********************************************************************************************************************************
571* Global Variables *
572*********************************************************************************************************************************/
573extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
574
575
576/** Function table for the ADD instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
578{
579 iemAImpl_add_u8, iemAImpl_add_u8_locked,
580 iemAImpl_add_u16, iemAImpl_add_u16_locked,
581 iemAImpl_add_u32, iemAImpl_add_u32_locked,
582 iemAImpl_add_u64, iemAImpl_add_u64_locked
583};
584
585/** Function table for the ADC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
587{
588 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
589 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
590 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
591 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
592};
593
594/** Function table for the SUB instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
596{
597 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
598 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
599 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
600 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
601};
602
603/** Function table for the SBB instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
605{
606 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
607 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
608 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
609 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
610};
611
612/** Function table for the OR instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
614{
615 iemAImpl_or_u8, iemAImpl_or_u8_locked,
616 iemAImpl_or_u16, iemAImpl_or_u16_locked,
617 iemAImpl_or_u32, iemAImpl_or_u32_locked,
618 iemAImpl_or_u64, iemAImpl_or_u64_locked
619};
620
621/** Function table for the XOR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
623{
624 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
625 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
626 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
627 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
628};
629
630/** Function table for the AND instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
632{
633 iemAImpl_and_u8, iemAImpl_and_u8_locked,
634 iemAImpl_and_u16, iemAImpl_and_u16_locked,
635 iemAImpl_and_u32, iemAImpl_and_u32_locked,
636 iemAImpl_and_u64, iemAImpl_and_u64_locked
637};
638
639/** Function table for the CMP instruction.
640 * @remarks Making operand order ASSUMPTIONS.
641 */
642IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
643{
644 iemAImpl_cmp_u8, NULL,
645 iemAImpl_cmp_u16, NULL,
646 iemAImpl_cmp_u32, NULL,
647 iemAImpl_cmp_u64, NULL
648};
649
650/** Function table for the TEST instruction.
651 * @remarks Making operand order ASSUMPTIONS.
652 */
653IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
654{
655 iemAImpl_test_u8, NULL,
656 iemAImpl_test_u16, NULL,
657 iemAImpl_test_u32, NULL,
658 iemAImpl_test_u64, NULL
659};
660
661/** Function table for the BT instruction. */
662IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
663{
664 NULL, NULL,
665 iemAImpl_bt_u16, NULL,
666 iemAImpl_bt_u32, NULL,
667 iemAImpl_bt_u64, NULL
668};
669
670/** Function table for the BTC instruction. */
671IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
672{
673 NULL, NULL,
674 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
675 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
676 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
677};
678
679/** Function table for the BTR instruction. */
680IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
681{
682 NULL, NULL,
683 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
684 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
685 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
686};
687
688/** Function table for the BTS instruction. */
689IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
690{
691 NULL, NULL,
692 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
693 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
694 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
695};
696
697/** Function table for the BSF instruction. */
698IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
699{
700 NULL, NULL,
701 iemAImpl_bsf_u16, NULL,
702 iemAImpl_bsf_u32, NULL,
703 iemAImpl_bsf_u64, NULL
704};
705
706/** Function table for the BSR instruction. */
707IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
708{
709 NULL, NULL,
710 iemAImpl_bsr_u16, NULL,
711 iemAImpl_bsr_u32, NULL,
712 iemAImpl_bsr_u64, NULL
713};
714
715/** Function table for the IMUL instruction. */
716IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
717{
718 NULL, NULL,
719 iemAImpl_imul_two_u16, NULL,
720 iemAImpl_imul_two_u32, NULL,
721 iemAImpl_imul_two_u64, NULL
722};
723
724/** Group 1 /r lookup table. */
725IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
726{
727 &g_iemAImpl_add,
728 &g_iemAImpl_or,
729 &g_iemAImpl_adc,
730 &g_iemAImpl_sbb,
731 &g_iemAImpl_and,
732 &g_iemAImpl_sub,
733 &g_iemAImpl_xor,
734 &g_iemAImpl_cmp
735};
736
737/** Function table for the INC instruction. */
738IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
739{
740 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
741 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
742 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
743 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
744};
745
746/** Function table for the DEC instruction. */
747IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
748{
749 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
750 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
751 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
752 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
753};
754
755/** Function table for the NEG instruction. */
756IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
757{
758 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
759 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
760 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
761 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
762};
763
764/** Function table for the NOT instruction. */
765IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
766{
767 iemAImpl_not_u8, iemAImpl_not_u8_locked,
768 iemAImpl_not_u16, iemAImpl_not_u16_locked,
769 iemAImpl_not_u32, iemAImpl_not_u32_locked,
770 iemAImpl_not_u64, iemAImpl_not_u64_locked
771};
772
773
774/** Function table for the ROL instruction. */
775IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
776{
777 iemAImpl_rol_u8,
778 iemAImpl_rol_u16,
779 iemAImpl_rol_u32,
780 iemAImpl_rol_u64
781};
782
783/** Function table for the ROR instruction. */
784IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
785{
786 iemAImpl_ror_u8,
787 iemAImpl_ror_u16,
788 iemAImpl_ror_u32,
789 iemAImpl_ror_u64
790};
791
792/** Function table for the RCL instruction. */
793IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
794{
795 iemAImpl_rcl_u8,
796 iemAImpl_rcl_u16,
797 iemAImpl_rcl_u32,
798 iemAImpl_rcl_u64
799};
800
801/** Function table for the RCR instruction. */
802IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
803{
804 iemAImpl_rcr_u8,
805 iemAImpl_rcr_u16,
806 iemAImpl_rcr_u32,
807 iemAImpl_rcr_u64
808};
809
810/** Function table for the SHL instruction. */
811IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
812{
813 iemAImpl_shl_u8,
814 iemAImpl_shl_u16,
815 iemAImpl_shl_u32,
816 iemAImpl_shl_u64
817};
818
819/** Function table for the SHR instruction. */
820IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
821{
822 iemAImpl_shr_u8,
823 iemAImpl_shr_u16,
824 iemAImpl_shr_u32,
825 iemAImpl_shr_u64
826};
827
828/** Function table for the SAR instruction. */
829IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
830{
831 iemAImpl_sar_u8,
832 iemAImpl_sar_u16,
833 iemAImpl_sar_u32,
834 iemAImpl_sar_u64
835};
836
837
838/** Function table for the MUL instruction. */
839IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
840{
841 iemAImpl_mul_u8,
842 iemAImpl_mul_u16,
843 iemAImpl_mul_u32,
844 iemAImpl_mul_u64
845};
846
847/** Function table for the IMUL instruction working implicitly on rAX. */
848IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
849{
850 iemAImpl_imul_u8,
851 iemAImpl_imul_u16,
852 iemAImpl_imul_u32,
853 iemAImpl_imul_u64
854};
855
856/** Function table for the DIV instruction. */
857IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
858{
859 iemAImpl_div_u8,
860 iemAImpl_div_u16,
861 iemAImpl_div_u32,
862 iemAImpl_div_u64
863};
864
865/** Function table for the MUL instruction. */
866IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
867{
868 iemAImpl_idiv_u8,
869 iemAImpl_idiv_u16,
870 iemAImpl_idiv_u32,
871 iemAImpl_idiv_u64
872};
873
874/** Function table for the SHLD instruction */
875IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
876{
877 iemAImpl_shld_u16,
878 iemAImpl_shld_u32,
879 iemAImpl_shld_u64,
880};
881
882/** Function table for the SHRD instruction */
883IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
884{
885 iemAImpl_shrd_u16,
886 iemAImpl_shrd_u32,
887 iemAImpl_shrd_u64,
888};
889
890
891/** Function table for the PUNPCKLBW instruction */
892IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
893/** Function table for the PUNPCKLBD instruction */
894IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
895/** Function table for the PUNPCKLDQ instruction */
896IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
897/** Function table for the PUNPCKLQDQ instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
899
900/** Function table for the PUNPCKHBW instruction */
901IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
902/** Function table for the PUNPCKHBD instruction */
903IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
904/** Function table for the PUNPCKHDQ instruction */
905IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
906/** Function table for the PUNPCKHQDQ instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
908
909/** Function table for the PXOR instruction */
910IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
911/** Function table for the PCMPEQB instruction */
912IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
913/** Function table for the PCMPEQW instruction */
914IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
915/** Function table for the PCMPEQD instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
917
918
919#if defined(IEM_LOG_MEMORY_WRITES)
920/** What IEM just wrote. */
921uint8_t g_abIemWrote[256];
922/** How much IEM just wrote. */
923size_t g_cbIemWrote;
924#endif
925
926
927/*********************************************************************************************************************************
928* Internal Functions *
929*********************************************************************************************************************************/
930IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
931IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
932IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
934/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
935IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
936IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
938IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
939IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
941IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
944IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
945IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
946IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
947#ifdef IEM_WITH_SETJMP
948DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
949DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
953#endif
954
955IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
956IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
957IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
958IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
959IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
960IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
965IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
966IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
967IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
969IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
970IEM_STATIC uint16_t iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
971IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
972
973#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
974IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
975IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
976IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
978IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
979IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
980IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
981#endif
982
983#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
984IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
985IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
986#endif
987
988
989/**
990 * Sets the pass up status.
991 *
992 * @returns VINF_SUCCESS.
993 * @param pVCpu The cross context virtual CPU structure of the
994 * calling thread.
995 * @param rcPassUp The pass up status. Must be informational.
996 * VINF_SUCCESS is not allowed.
997 */
998IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
999{
1000 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1001
1002 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1003 if (rcOldPassUp == VINF_SUCCESS)
1004 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1005 /* If both are EM scheduling codes, use EM priority rules. */
1006 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1007 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1008 {
1009 if (rcPassUp < rcOldPassUp)
1010 {
1011 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1012 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1013 }
1014 else
1015 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1016 }
1017 /* Override EM scheduling with specific status code. */
1018 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1019 {
1020 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1021 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1022 }
1023 /* Don't override specific status code, first come first served. */
1024 else
1025 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1026 return VINF_SUCCESS;
1027}
1028
1029
1030/**
1031 * Calculates the CPU mode.
1032 *
1033 * This is mainly for updating IEMCPU::enmCpuMode.
1034 *
1035 * @returns CPU mode.
1036 * @param pVCpu The cross context virtual CPU structure of the
1037 * calling thread.
1038 */
1039DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1040{
1041 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1042 return IEMMODE_64BIT;
1043 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1044 return IEMMODE_32BIT;
1045 return IEMMODE_16BIT;
1046}
1047
1048
1049/**
1050 * Initializes the execution state.
1051 *
1052 * @param pVCpu The cross context virtual CPU structure of the
1053 * calling thread.
1054 * @param fBypassHandlers Whether to bypass access handlers.
1055 *
1056 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1057 * side-effects in strict builds.
1058 */
1059DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1060{
1061 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1062 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1063 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1064 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1071
1072 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1073 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1074#ifdef VBOX_STRICT
1075 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1076 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1077 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1078 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1079 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1080 pVCpu->iem.s.uRexReg = 127;
1081 pVCpu->iem.s.uRexB = 127;
1082 pVCpu->iem.s.offModRm = 127;
1083 pVCpu->iem.s.uRexIndex = 127;
1084 pVCpu->iem.s.iEffSeg = 127;
1085 pVCpu->iem.s.idxPrefix = 127;
1086 pVCpu->iem.s.uVex3rdReg = 127;
1087 pVCpu->iem.s.uVexLength = 127;
1088 pVCpu->iem.s.fEvexStuff = 127;
1089 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1090# ifdef IEM_WITH_CODE_TLB
1091 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1092 pVCpu->iem.s.pbInstrBuf = NULL;
1093 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1094 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1095 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1096 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1097# else
1098 pVCpu->iem.s.offOpcode = 127;
1099 pVCpu->iem.s.cbOpcode = 127;
1100# endif
1101#endif
1102
1103 pVCpu->iem.s.cActiveMappings = 0;
1104 pVCpu->iem.s.iNextMapping = 0;
1105 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1106 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1107#if 0
1108#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1109 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1110 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1111 {
1112 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1113 Assert(pVmcs);
1114 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1115 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1116 {
1117 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1118 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1119 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1120 AssertRC(rc);
1121 }
1122 }
1123#endif
1124#endif
1125}
1126
1127#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1128/**
1129 * Performs a minimal reinitialization of the execution state.
1130 *
1131 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1132 * 'world-switch' types operations on the CPU. Currently only nested
1133 * hardware-virtualization uses it.
1134 *
1135 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1136 */
1137IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1138{
1139 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1140 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1141
1142 pVCpu->iem.s.uCpl = uCpl;
1143 pVCpu->iem.s.enmCpuMode = enmMode;
1144 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1145 pVCpu->iem.s.enmEffAddrMode = enmMode;
1146 if (enmMode != IEMMODE_64BIT)
1147 {
1148 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1149 pVCpu->iem.s.enmEffOpSize = enmMode;
1150 }
1151 else
1152 {
1153 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1154 pVCpu->iem.s.enmEffOpSize = enmMode;
1155 }
1156 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1157#ifndef IEM_WITH_CODE_TLB
1158 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1159 pVCpu->iem.s.offOpcode = 0;
1160 pVCpu->iem.s.cbOpcode = 0;
1161#endif
1162 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1163}
1164#endif
1165
1166/**
1167 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1168 *
1169 * @param pVCpu The cross context virtual CPU structure of the
1170 * calling thread.
1171 */
1172DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1173{
1174 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1175#ifdef VBOX_STRICT
1176# ifdef IEM_WITH_CODE_TLB
1177 NOREF(pVCpu);
1178# else
1179 pVCpu->iem.s.cbOpcode = 0;
1180# endif
1181#else
1182 NOREF(pVCpu);
1183#endif
1184}
1185
1186
1187/**
1188 * Initializes the decoder state.
1189 *
1190 * iemReInitDecoder is mostly a copy of this function.
1191 *
1192 * @param pVCpu The cross context virtual CPU structure of the
1193 * calling thread.
1194 * @param fBypassHandlers Whether to bypass access handlers.
1195 */
1196DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers)
1197{
1198 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1199 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1200 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1208
1209 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1210 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1211 pVCpu->iem.s.enmCpuMode = enmMode;
1212 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1213 pVCpu->iem.s.enmEffAddrMode = enmMode;
1214 if (enmMode != IEMMODE_64BIT)
1215 {
1216 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1217 pVCpu->iem.s.enmEffOpSize = enmMode;
1218 }
1219 else
1220 {
1221 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1222 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1223 }
1224 pVCpu->iem.s.fPrefixes = 0;
1225 pVCpu->iem.s.uRexReg = 0;
1226 pVCpu->iem.s.uRexB = 0;
1227 pVCpu->iem.s.uRexIndex = 0;
1228 pVCpu->iem.s.idxPrefix = 0;
1229 pVCpu->iem.s.uVex3rdReg = 0;
1230 pVCpu->iem.s.uVexLength = 0;
1231 pVCpu->iem.s.fEvexStuff = 0;
1232 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1233#ifdef IEM_WITH_CODE_TLB
1234 pVCpu->iem.s.pbInstrBuf = NULL;
1235 pVCpu->iem.s.offInstrNextByte = 0;
1236 pVCpu->iem.s.offCurInstrStart = 0;
1237# ifdef VBOX_STRICT
1238 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1239 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1240 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1241# endif
1242#else
1243 pVCpu->iem.s.offOpcode = 0;
1244 pVCpu->iem.s.cbOpcode = 0;
1245#endif
1246 pVCpu->iem.s.offModRm = 0;
1247 pVCpu->iem.s.cActiveMappings = 0;
1248 pVCpu->iem.s.iNextMapping = 0;
1249 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1250 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1251
1252#ifdef DBGFTRACE_ENABLED
1253 switch (enmMode)
1254 {
1255 case IEMMODE_64BIT:
1256 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1257 break;
1258 case IEMMODE_32BIT:
1259 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1260 break;
1261 case IEMMODE_16BIT:
1262 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1263 break;
1264 }
1265#endif
1266}
1267
1268
1269/**
1270 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1271 *
1272 * This is mostly a copy of iemInitDecoder.
1273 *
1274 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1275 */
1276DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1277{
1278 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1279 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1280 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1281 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1282 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1283 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1284 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1285 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1286 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1287
1288 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1289 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1290 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1291 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1292 pVCpu->iem.s.enmEffAddrMode = enmMode;
1293 if (enmMode != IEMMODE_64BIT)
1294 {
1295 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1296 pVCpu->iem.s.enmEffOpSize = enmMode;
1297 }
1298 else
1299 {
1300 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1301 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1302 }
1303 pVCpu->iem.s.fPrefixes = 0;
1304 pVCpu->iem.s.uRexReg = 0;
1305 pVCpu->iem.s.uRexB = 0;
1306 pVCpu->iem.s.uRexIndex = 0;
1307 pVCpu->iem.s.idxPrefix = 0;
1308 pVCpu->iem.s.uVex3rdReg = 0;
1309 pVCpu->iem.s.uVexLength = 0;
1310 pVCpu->iem.s.fEvexStuff = 0;
1311 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1312#ifdef IEM_WITH_CODE_TLB
1313 if (pVCpu->iem.s.pbInstrBuf)
1314 {
1315 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1316 - pVCpu->iem.s.uInstrBufPc;
1317 if (off < pVCpu->iem.s.cbInstrBufTotal)
1318 {
1319 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1320 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1321 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1322 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1323 else
1324 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1325 }
1326 else
1327 {
1328 pVCpu->iem.s.pbInstrBuf = NULL;
1329 pVCpu->iem.s.offInstrNextByte = 0;
1330 pVCpu->iem.s.offCurInstrStart = 0;
1331 pVCpu->iem.s.cbInstrBuf = 0;
1332 pVCpu->iem.s.cbInstrBufTotal = 0;
1333 }
1334 }
1335 else
1336 {
1337 pVCpu->iem.s.offInstrNextByte = 0;
1338 pVCpu->iem.s.offCurInstrStart = 0;
1339 pVCpu->iem.s.cbInstrBuf = 0;
1340 pVCpu->iem.s.cbInstrBufTotal = 0;
1341 }
1342#else
1343 pVCpu->iem.s.cbOpcode = 0;
1344 pVCpu->iem.s.offOpcode = 0;
1345#endif
1346 pVCpu->iem.s.offModRm = 0;
1347 Assert(pVCpu->iem.s.cActiveMappings == 0);
1348 pVCpu->iem.s.iNextMapping = 0;
1349 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1350 Assert(pVCpu->iem.s.fBypassHandlers == false);
1351
1352#ifdef DBGFTRACE_ENABLED
1353 switch (enmMode)
1354 {
1355 case IEMMODE_64BIT:
1356 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1357 break;
1358 case IEMMODE_32BIT:
1359 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1360 break;
1361 case IEMMODE_16BIT:
1362 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1363 break;
1364 }
1365#endif
1366}
1367
1368
1369
1370/**
1371 * Prefetch opcodes the first time when starting executing.
1372 *
1373 * @returns Strict VBox status code.
1374 * @param pVCpu The cross context virtual CPU structure of the
1375 * calling thread.
1376 * @param fBypassHandlers Whether to bypass access handlers.
1377 */
1378IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers)
1379{
1380 iemInitDecoder(pVCpu, fBypassHandlers);
1381
1382#ifdef IEM_WITH_CODE_TLB
1383 /** @todo Do ITLB lookup here. */
1384
1385#else /* !IEM_WITH_CODE_TLB */
1386
1387 /*
1388 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1389 *
1390 * First translate CS:rIP to a physical address.
1391 */
1392 uint32_t cbToTryRead;
1393 RTGCPTR GCPtrPC;
1394 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1395 {
1396 cbToTryRead = PAGE_SIZE;
1397 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1398 if (IEM_IS_CANONICAL(GCPtrPC))
1399 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1400 else
1401 return iemRaiseGeneralProtectionFault0(pVCpu);
1402 }
1403 else
1404 {
1405 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1406 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1407 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1408 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1409 else
1410 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1411 if (cbToTryRead) { /* likely */ }
1412 else /* overflowed */
1413 {
1414 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1415 cbToTryRead = UINT32_MAX;
1416 }
1417 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1418 Assert(GCPtrPC <= UINT32_MAX);
1419 }
1420
1421 RTGCPHYS GCPhys;
1422 uint64_t fFlags;
1423 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1424 if (RT_SUCCESS(rc)) { /* probable */ }
1425 else
1426 {
1427 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1428 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1429 }
1430 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1431 else
1432 {
1433 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1434 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1435 }
1436 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1437 else
1438 {
1439 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1440 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1441 }
1442 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1443 /** @todo Check reserved bits and such stuff. PGM is better at doing
1444 * that, so do it when implementing the guest virtual address
1445 * TLB... */
1446
1447 /*
1448 * Read the bytes at this address.
1449 */
1450 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1451 if (cbToTryRead > cbLeftOnPage)
1452 cbToTryRead = cbLeftOnPage;
1453 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1454 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1455
1456 if (!pVCpu->iem.s.fBypassHandlers)
1457 {
1458 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1459 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1460 { /* likely */ }
1461 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1462 {
1463 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1464 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1465 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1466 }
1467 else
1468 {
1469 Log((RT_SUCCESS(rcStrict)
1470 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1471 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1472 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1473 return rcStrict;
1474 }
1475 }
1476 else
1477 {
1478 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1479 if (RT_SUCCESS(rc))
1480 { /* likely */ }
1481 else
1482 {
1483 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1484 GCPtrPC, GCPhys, rc, cbToTryRead));
1485 return rc;
1486 }
1487 }
1488 pVCpu->iem.s.cbOpcode = cbToTryRead;
1489#endif /* !IEM_WITH_CODE_TLB */
1490 return VINF_SUCCESS;
1491}
1492
1493
1494/**
1495 * Invalidates the IEM TLBs.
1496 *
1497 * This is called internally as well as by PGM when moving GC mappings.
1498 *
1499 * @returns
1500 * @param pVCpu The cross context virtual CPU structure of the calling
1501 * thread.
1502 * @param fVmm Set when PGM calls us with a remapping.
1503 */
1504VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
1505{
1506#ifdef IEM_WITH_CODE_TLB
1507 pVCpu->iem.s.cbInstrBufTotal = 0;
1508 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1509 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1510 { /* very likely */ }
1511 else
1512 {
1513 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1514 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1515 while (i-- > 0)
1516 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1517 }
1518#endif
1519
1520#ifdef IEM_WITH_DATA_TLB
1521 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1522 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1523 { /* very likely */ }
1524 else
1525 {
1526 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1527 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1528 while (i-- > 0)
1529 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1530 }
1531#endif
1532 NOREF(pVCpu); NOREF(fVmm);
1533}
1534
1535
1536/**
1537 * Invalidates a page in the TLBs.
1538 *
1539 * @param pVCpu The cross context virtual CPU structure of the calling
1540 * thread.
1541 * @param GCPtr The address of the page to invalidate
1542 */
1543VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1544{
1545#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1546 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1547 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1548 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1549 uintptr_t idx = (uint8_t)GCPtr;
1550
1551# ifdef IEM_WITH_CODE_TLB
1552 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1553 {
1554 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1555 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1556 pVCpu->iem.s.cbInstrBufTotal = 0;
1557 }
1558# endif
1559
1560# ifdef IEM_WITH_DATA_TLB
1561 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1562 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1563# endif
1564#else
1565 NOREF(pVCpu); NOREF(GCPtr);
1566#endif
1567}
1568
1569
1570/**
1571 * Invalidates the host physical aspects of the IEM TLBs.
1572 *
1573 * This is called internally as well as by PGM when moving GC mappings.
1574 *
1575 * @param pVCpu The cross context virtual CPU structure of the calling
1576 * thread.
1577 */
1578VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1579{
1580#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1581 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1582
1583# ifdef IEM_WITH_CODE_TLB
1584 pVCpu->iem.s.cbInstrBufTotal = 0;
1585# endif
1586 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1587 if (uTlbPhysRev != 0)
1588 {
1589 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1590 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1591 }
1592 else
1593 {
1594 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1595 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1596
1597 unsigned i;
1598# ifdef IEM_WITH_CODE_TLB
1599 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1600 while (i-- > 0)
1601 {
1602 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1603 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1604 }
1605# endif
1606# ifdef IEM_WITH_DATA_TLB
1607 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1608 while (i-- > 0)
1609 {
1610 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1611 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1612 }
1613# endif
1614 }
1615#else
1616 NOREF(pVCpu);
1617#endif
1618}
1619
1620
1621/**
1622 * Invalidates the host physical aspects of the IEM TLBs.
1623 *
1624 * This is called internally as well as by PGM when moving GC mappings.
1625 *
1626 * @param pVM The cross context VM structure.
1627 *
1628 * @remarks Caller holds the PGM lock.
1629 */
1630VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1631{
1632 RT_NOREF_PV(pVM);
1633}
1634
1635#ifdef IEM_WITH_CODE_TLB
1636
1637/**
1638 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1639 * failure and jumps.
1640 *
1641 * We end up here for a number of reasons:
1642 * - pbInstrBuf isn't yet initialized.
1643 * - Advancing beyond the buffer boundrary (e.g. cross page).
1644 * - Advancing beyond the CS segment limit.
1645 * - Fetching from non-mappable page (e.g. MMIO).
1646 *
1647 * @param pVCpu The cross context virtual CPU structure of the
1648 * calling thread.
1649 * @param pvDst Where to return the bytes.
1650 * @param cbDst Number of bytes to read.
1651 *
1652 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1653 */
1654IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
1655{
1656#ifdef IN_RING3
1657 for (;;)
1658 {
1659 Assert(cbDst <= 8);
1660 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1661
1662 /*
1663 * We might have a partial buffer match, deal with that first to make the
1664 * rest simpler. This is the first part of the cross page/buffer case.
1665 */
1666 if (pVCpu->iem.s.pbInstrBuf != NULL)
1667 {
1668 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1669 {
1670 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1671 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1672 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1673
1674 cbDst -= cbCopy;
1675 pvDst = (uint8_t *)pvDst + cbCopy;
1676 offBuf += cbCopy;
1677 pVCpu->iem.s.offInstrNextByte += offBuf;
1678 }
1679 }
1680
1681 /*
1682 * Check segment limit, figuring how much we're allowed to access at this point.
1683 *
1684 * We will fault immediately if RIP is past the segment limit / in non-canonical
1685 * territory. If we do continue, there are one or more bytes to read before we
1686 * end up in trouble and we need to do that first before faulting.
1687 */
1688 RTGCPTR GCPtrFirst;
1689 uint32_t cbMaxRead;
1690 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1691 {
1692 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1693 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1694 { /* likely */ }
1695 else
1696 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1697 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1698 }
1699 else
1700 {
1701 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1702 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1703 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1704 { /* likely */ }
1705 else
1706 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1707 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1708 if (cbMaxRead != 0)
1709 { /* likely */ }
1710 else
1711 {
1712 /* Overflowed because address is 0 and limit is max. */
1713 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1714 cbMaxRead = X86_PAGE_SIZE;
1715 }
1716 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1717 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1718 if (cbMaxRead2 < cbMaxRead)
1719 cbMaxRead = cbMaxRead2;
1720 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1721 }
1722
1723 /*
1724 * Get the TLB entry for this piece of code.
1725 */
1726 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1727 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1728 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1729 if (pTlbe->uTag == uTag)
1730 {
1731 /* likely when executing lots of code, otherwise unlikely */
1732# ifdef VBOX_WITH_STATISTICS
1733 pVCpu->iem.s.CodeTlb.cTlbHits++;
1734# endif
1735 }
1736 else
1737 {
1738 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1739 RTGCPHYS GCPhys;
1740 uint64_t fFlags;
1741 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1742 if (RT_FAILURE(rc))
1743 {
1744 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1745 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1746 }
1747
1748 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1749 pTlbe->uTag = uTag;
1750 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1751 pTlbe->GCPhys = GCPhys;
1752 pTlbe->pbMappingR3 = NULL;
1753 }
1754
1755 /*
1756 * Check TLB page table level access flags.
1757 */
1758 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1759 {
1760 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1761 {
1762 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1763 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1764 }
1765 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1766 {
1767 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1768 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1769 }
1770 }
1771
1772 /*
1773 * Look up the physical page info if necessary.
1774 */
1775 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1776 { /* not necessary */ }
1777 else
1778 {
1779 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1780 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1781 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1782 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1783 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1784 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1785 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1786 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1787 }
1788
1789# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1790 /*
1791 * Try do a direct read using the pbMappingR3 pointer.
1792 */
1793 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1794 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1795 {
1796 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1797 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1798 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1799 {
1800 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1801 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1802 }
1803 else
1804 {
1805 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1806 Assert(cbInstr < cbMaxRead);
1807 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1808 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1809 }
1810 if (cbDst <= cbMaxRead)
1811 {
1812 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1813 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1814 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1815 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1816 return;
1817 }
1818 pVCpu->iem.s.pbInstrBuf = NULL;
1819
1820 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1821 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1822 }
1823 else
1824# endif
1825#if 0
1826 /*
1827 * If there is no special read handling, so we can read a bit more and
1828 * put it in the prefetch buffer.
1829 */
1830 if ( cbDst < cbMaxRead
1831 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1832 {
1833 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1834 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1835 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1836 { /* likely */ }
1837 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1838 {
1839 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1840 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1841 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1842 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1843 }
1844 else
1845 {
1846 Log((RT_SUCCESS(rcStrict)
1847 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1848 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1849 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1850 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1851 }
1852 }
1853 /*
1854 * Special read handling, so only read exactly what's needed.
1855 * This is a highly unlikely scenario.
1856 */
1857 else
1858#endif
1859 {
1860 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1861 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1862 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1863 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1864 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1865 { /* likely */ }
1866 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1867 {
1868 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1869 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1870 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1871 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1872 }
1873 else
1874 {
1875 Log((RT_SUCCESS(rcStrict)
1876 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1877 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1878 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1879 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1880 }
1881 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1882 if (cbToRead == cbDst)
1883 return;
1884 }
1885
1886 /*
1887 * More to read, loop.
1888 */
1889 cbDst -= cbMaxRead;
1890 pvDst = (uint8_t *)pvDst + cbMaxRead;
1891 }
1892#else
1893 RT_NOREF(pvDst, cbDst);
1894 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1895#endif
1896}
1897
1898#else
1899
1900/**
1901 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1902 * exception if it fails.
1903 *
1904 * @returns Strict VBox status code.
1905 * @param pVCpu The cross context virtual CPU structure of the
1906 * calling thread.
1907 * @param cbMin The minimum number of bytes relative offOpcode
1908 * that must be read.
1909 */
1910IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
1911{
1912 /*
1913 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1914 *
1915 * First translate CS:rIP to a physical address.
1916 */
1917 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1918 uint32_t cbToTryRead;
1919 RTGCPTR GCPtrNext;
1920 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1921 {
1922 cbToTryRead = PAGE_SIZE;
1923 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1924 if (!IEM_IS_CANONICAL(GCPtrNext))
1925 return iemRaiseGeneralProtectionFault0(pVCpu);
1926 }
1927 else
1928 {
1929 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1930 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1931 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1932 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1933 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1934 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1935 if (!cbToTryRead) /* overflowed */
1936 {
1937 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1938 cbToTryRead = UINT32_MAX;
1939 /** @todo check out wrapping around the code segment. */
1940 }
1941 if (cbToTryRead < cbMin - cbLeft)
1942 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1943 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1944 }
1945
1946 /* Only read up to the end of the page, and make sure we don't read more
1947 than the opcode buffer can hold. */
1948 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1949 if (cbToTryRead > cbLeftOnPage)
1950 cbToTryRead = cbLeftOnPage;
1951 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1952 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1953/** @todo r=bird: Convert assertion into undefined opcode exception? */
1954 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1955
1956 RTGCPHYS GCPhys;
1957 uint64_t fFlags;
1958 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1959 if (RT_FAILURE(rc))
1960 {
1961 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1962 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1963 }
1964 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1965 {
1966 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1967 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1968 }
1969 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1970 {
1971 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1972 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1973 }
1974 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1975 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1976 /** @todo Check reserved bits and such stuff. PGM is better at doing
1977 * that, so do it when implementing the guest virtual address
1978 * TLB... */
1979
1980 /*
1981 * Read the bytes at this address.
1982 *
1983 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1984 * and since PATM should only patch the start of an instruction there
1985 * should be no need to check again here.
1986 */
1987 if (!pVCpu->iem.s.fBypassHandlers)
1988 {
1989 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1990 cbToTryRead, PGMACCESSORIGIN_IEM);
1991 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1992 { /* likely */ }
1993 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1994 {
1995 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1996 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1997 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1998 }
1999 else
2000 {
2001 Log((RT_SUCCESS(rcStrict)
2002 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2003 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2004 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2005 return rcStrict;
2006 }
2007 }
2008 else
2009 {
2010 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2011 if (RT_SUCCESS(rc))
2012 { /* likely */ }
2013 else
2014 {
2015 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2016 return rc;
2017 }
2018 }
2019 pVCpu->iem.s.cbOpcode += cbToTryRead;
2020 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2021
2022 return VINF_SUCCESS;
2023}
2024
2025#endif /* !IEM_WITH_CODE_TLB */
2026#ifndef IEM_WITH_SETJMP
2027
2028/**
2029 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2030 *
2031 * @returns Strict VBox status code.
2032 * @param pVCpu The cross context virtual CPU structure of the
2033 * calling thread.
2034 * @param pb Where to return the opcode byte.
2035 */
2036DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2037{
2038 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2039 if (rcStrict == VINF_SUCCESS)
2040 {
2041 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2042 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2043 pVCpu->iem.s.offOpcode = offOpcode + 1;
2044 }
2045 else
2046 *pb = 0;
2047 return rcStrict;
2048}
2049
2050
2051/**
2052 * Fetches the next opcode byte.
2053 *
2054 * @returns Strict VBox status code.
2055 * @param pVCpu The cross context virtual CPU structure of the
2056 * calling thread.
2057 * @param pu8 Where to return the opcode byte.
2058 */
2059DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2060{
2061 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2062 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2063 {
2064 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2065 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2066 return VINF_SUCCESS;
2067 }
2068 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2069}
2070
2071#else /* IEM_WITH_SETJMP */
2072
2073/**
2074 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2075 *
2076 * @returns The opcode byte.
2077 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2078 */
2079DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2080{
2081# ifdef IEM_WITH_CODE_TLB
2082 uint8_t u8;
2083 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2084 return u8;
2085# else
2086 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2087 if (rcStrict == VINF_SUCCESS)
2088 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2089 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2090# endif
2091}
2092
2093
2094/**
2095 * Fetches the next opcode byte, longjmp on error.
2096 *
2097 * @returns The opcode byte.
2098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2099 */
2100DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2101{
2102# ifdef IEM_WITH_CODE_TLB
2103 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2104 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2105 if (RT_LIKELY( pbBuf != NULL
2106 && offBuf < pVCpu->iem.s.cbInstrBuf))
2107 {
2108 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2109 return pbBuf[offBuf];
2110 }
2111# else
2112 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2113 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2114 {
2115 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2116 return pVCpu->iem.s.abOpcode[offOpcode];
2117 }
2118# endif
2119 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2120}
2121
2122#endif /* IEM_WITH_SETJMP */
2123
2124/**
2125 * Fetches the next opcode byte, returns automatically on failure.
2126 *
2127 * @param a_pu8 Where to return the opcode byte.
2128 * @remark Implicitly references pVCpu.
2129 */
2130#ifndef IEM_WITH_SETJMP
2131# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2132 do \
2133 { \
2134 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2135 if (rcStrict2 == VINF_SUCCESS) \
2136 { /* likely */ } \
2137 else \
2138 return rcStrict2; \
2139 } while (0)
2140#else
2141# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2142#endif /* IEM_WITH_SETJMP */
2143
2144
2145#ifndef IEM_WITH_SETJMP
2146/**
2147 * Fetches the next signed byte from the opcode stream.
2148 *
2149 * @returns Strict VBox status code.
2150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2151 * @param pi8 Where to return the signed byte.
2152 */
2153DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2154{
2155 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2156}
2157#endif /* !IEM_WITH_SETJMP */
2158
2159
2160/**
2161 * Fetches the next signed byte from the opcode stream, returning automatically
2162 * on failure.
2163 *
2164 * @param a_pi8 Where to return the signed byte.
2165 * @remark Implicitly references pVCpu.
2166 */
2167#ifndef IEM_WITH_SETJMP
2168# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2169 do \
2170 { \
2171 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2172 if (rcStrict2 != VINF_SUCCESS) \
2173 return rcStrict2; \
2174 } while (0)
2175#else /* IEM_WITH_SETJMP */
2176# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2177
2178#endif /* IEM_WITH_SETJMP */
2179
2180#ifndef IEM_WITH_SETJMP
2181
2182/**
2183 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2184 *
2185 * @returns Strict VBox status code.
2186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2187 * @param pu16 Where to return the opcode dword.
2188 */
2189DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2190{
2191 uint8_t u8;
2192 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2193 if (rcStrict == VINF_SUCCESS)
2194 *pu16 = (int8_t)u8;
2195 return rcStrict;
2196}
2197
2198
2199/**
2200 * Fetches the next signed byte from the opcode stream, extending it to
2201 * unsigned 16-bit.
2202 *
2203 * @returns Strict VBox status code.
2204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2205 * @param pu16 Where to return the unsigned word.
2206 */
2207DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2208{
2209 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2210 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2211 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2212
2213 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2214 pVCpu->iem.s.offOpcode = offOpcode + 1;
2215 return VINF_SUCCESS;
2216}
2217
2218#endif /* !IEM_WITH_SETJMP */
2219
2220/**
2221 * Fetches the next signed byte from the opcode stream and sign-extending it to
2222 * a word, returning automatically on failure.
2223 *
2224 * @param a_pu16 Where to return the word.
2225 * @remark Implicitly references pVCpu.
2226 */
2227#ifndef IEM_WITH_SETJMP
2228# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2229 do \
2230 { \
2231 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2232 if (rcStrict2 != VINF_SUCCESS) \
2233 return rcStrict2; \
2234 } while (0)
2235#else
2236# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2237#endif
2238
2239#ifndef IEM_WITH_SETJMP
2240
2241/**
2242 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2243 *
2244 * @returns Strict VBox status code.
2245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2246 * @param pu32 Where to return the opcode dword.
2247 */
2248DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2249{
2250 uint8_t u8;
2251 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2252 if (rcStrict == VINF_SUCCESS)
2253 *pu32 = (int8_t)u8;
2254 return rcStrict;
2255}
2256
2257
2258/**
2259 * Fetches the next signed byte from the opcode stream, extending it to
2260 * unsigned 32-bit.
2261 *
2262 * @returns Strict VBox status code.
2263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2264 * @param pu32 Where to return the unsigned dword.
2265 */
2266DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2267{
2268 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2269 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2270 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2271
2272 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2273 pVCpu->iem.s.offOpcode = offOpcode + 1;
2274 return VINF_SUCCESS;
2275}
2276
2277#endif /* !IEM_WITH_SETJMP */
2278
2279/**
2280 * Fetches the next signed byte from the opcode stream and sign-extending it to
2281 * a word, returning automatically on failure.
2282 *
2283 * @param a_pu32 Where to return the word.
2284 * @remark Implicitly references pVCpu.
2285 */
2286#ifndef IEM_WITH_SETJMP
2287#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2288 do \
2289 { \
2290 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2291 if (rcStrict2 != VINF_SUCCESS) \
2292 return rcStrict2; \
2293 } while (0)
2294#else
2295# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2296#endif
2297
2298#ifndef IEM_WITH_SETJMP
2299
2300/**
2301 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2302 *
2303 * @returns Strict VBox status code.
2304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2305 * @param pu64 Where to return the opcode qword.
2306 */
2307DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2308{
2309 uint8_t u8;
2310 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2311 if (rcStrict == VINF_SUCCESS)
2312 *pu64 = (int8_t)u8;
2313 return rcStrict;
2314}
2315
2316
2317/**
2318 * Fetches the next signed byte from the opcode stream, extending it to
2319 * unsigned 64-bit.
2320 *
2321 * @returns Strict VBox status code.
2322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2323 * @param pu64 Where to return the unsigned qword.
2324 */
2325DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2326{
2327 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2328 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2329 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2330
2331 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2332 pVCpu->iem.s.offOpcode = offOpcode + 1;
2333 return VINF_SUCCESS;
2334}
2335
2336#endif /* !IEM_WITH_SETJMP */
2337
2338
2339/**
2340 * Fetches the next signed byte from the opcode stream and sign-extending it to
2341 * a word, returning automatically on failure.
2342 *
2343 * @param a_pu64 Where to return the word.
2344 * @remark Implicitly references pVCpu.
2345 */
2346#ifndef IEM_WITH_SETJMP
2347# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2348 do \
2349 { \
2350 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2351 if (rcStrict2 != VINF_SUCCESS) \
2352 return rcStrict2; \
2353 } while (0)
2354#else
2355# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2356#endif
2357
2358
2359#ifndef IEM_WITH_SETJMP
2360/**
2361 * Fetches the next opcode byte.
2362 *
2363 * @returns Strict VBox status code.
2364 * @param pVCpu The cross context virtual CPU structure of the
2365 * calling thread.
2366 * @param pu8 Where to return the opcode byte.
2367 */
2368DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2369{
2370 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2371 pVCpu->iem.s.offModRm = offOpcode;
2372 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2373 {
2374 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2375 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2376 return VINF_SUCCESS;
2377 }
2378 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2379}
2380#else /* IEM_WITH_SETJMP */
2381/**
2382 * Fetches the next opcode byte, longjmp on error.
2383 *
2384 * @returns The opcode byte.
2385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2386 */
2387DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2388{
2389# ifdef IEM_WITH_CODE_TLB
2390 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2391 pVCpu->iem.s.offModRm = offBuf;
2392 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2393 if (RT_LIKELY( pbBuf != NULL
2394 && offBuf < pVCpu->iem.s.cbInstrBuf))
2395 {
2396 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2397 return pbBuf[offBuf];
2398 }
2399# else
2400 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2401 pVCpu->iem.s.offModRm = offOpcode;
2402 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2403 {
2404 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2405 return pVCpu->iem.s.abOpcode[offOpcode];
2406 }
2407# endif
2408 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2409}
2410#endif /* IEM_WITH_SETJMP */
2411
2412/**
2413 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2414 * on failure.
2415 *
2416 * Will note down the position of the ModR/M byte for VT-x exits.
2417 *
2418 * @param a_pbRm Where to return the RM opcode byte.
2419 * @remark Implicitly references pVCpu.
2420 */
2421#ifndef IEM_WITH_SETJMP
2422# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2423 do \
2424 { \
2425 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2426 if (rcStrict2 == VINF_SUCCESS) \
2427 { /* likely */ } \
2428 else \
2429 return rcStrict2; \
2430 } while (0)
2431#else
2432# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2433#endif /* IEM_WITH_SETJMP */
2434
2435
2436#ifndef IEM_WITH_SETJMP
2437
2438/**
2439 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2440 *
2441 * @returns Strict VBox status code.
2442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2443 * @param pu16 Where to return the opcode word.
2444 */
2445DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2446{
2447 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2448 if (rcStrict == VINF_SUCCESS)
2449 {
2450 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2451# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2452 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2453# else
2454 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2455# endif
2456 pVCpu->iem.s.offOpcode = offOpcode + 2;
2457 }
2458 else
2459 *pu16 = 0;
2460 return rcStrict;
2461}
2462
2463
2464/**
2465 * Fetches the next opcode word.
2466 *
2467 * @returns Strict VBox status code.
2468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2469 * @param pu16 Where to return the opcode word.
2470 */
2471DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
2472{
2473 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2474 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2475 {
2476 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2477# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2478 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2479# else
2480 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2481# endif
2482 return VINF_SUCCESS;
2483 }
2484 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2485}
2486
2487#else /* IEM_WITH_SETJMP */
2488
2489/**
2490 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2491 *
2492 * @returns The opcode word.
2493 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2494 */
2495DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
2496{
2497# ifdef IEM_WITH_CODE_TLB
2498 uint16_t u16;
2499 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2500 return u16;
2501# else
2502 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2503 if (rcStrict == VINF_SUCCESS)
2504 {
2505 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2506 pVCpu->iem.s.offOpcode += 2;
2507# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2508 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2509# else
2510 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2511# endif
2512 }
2513 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2514# endif
2515}
2516
2517
2518/**
2519 * Fetches the next opcode word, longjmp on error.
2520 *
2521 * @returns The opcode word.
2522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2523 */
2524DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
2525{
2526# ifdef IEM_WITH_CODE_TLB
2527 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2528 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2529 if (RT_LIKELY( pbBuf != NULL
2530 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2531 {
2532 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2533# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2534 return *(uint16_t const *)&pbBuf[offBuf];
2535# else
2536 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2537# endif
2538 }
2539# else
2540 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2541 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2542 {
2543 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2544# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2545 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2546# else
2547 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2548# endif
2549 }
2550# endif
2551 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2552}
2553
2554#endif /* IEM_WITH_SETJMP */
2555
2556
2557/**
2558 * Fetches the next opcode word, returns automatically on failure.
2559 *
2560 * @param a_pu16 Where to return the opcode word.
2561 * @remark Implicitly references pVCpu.
2562 */
2563#ifndef IEM_WITH_SETJMP
2564# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2565 do \
2566 { \
2567 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2568 if (rcStrict2 != VINF_SUCCESS) \
2569 return rcStrict2; \
2570 } while (0)
2571#else
2572# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2573#endif
2574
2575#ifndef IEM_WITH_SETJMP
2576
2577/**
2578 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2579 *
2580 * @returns Strict VBox status code.
2581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2582 * @param pu32 Where to return the opcode double word.
2583 */
2584DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2585{
2586 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2587 if (rcStrict == VINF_SUCCESS)
2588 {
2589 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2590 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2591 pVCpu->iem.s.offOpcode = offOpcode + 2;
2592 }
2593 else
2594 *pu32 = 0;
2595 return rcStrict;
2596}
2597
2598
2599/**
2600 * Fetches the next opcode word, zero extending it to a double word.
2601 *
2602 * @returns Strict VBox status code.
2603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2604 * @param pu32 Where to return the opcode double word.
2605 */
2606DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2607{
2608 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2609 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2610 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2611
2612 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2613 pVCpu->iem.s.offOpcode = offOpcode + 2;
2614 return VINF_SUCCESS;
2615}
2616
2617#endif /* !IEM_WITH_SETJMP */
2618
2619
2620/**
2621 * Fetches the next opcode word and zero extends it to a double word, returns
2622 * automatically on failure.
2623 *
2624 * @param a_pu32 Where to return the opcode double word.
2625 * @remark Implicitly references pVCpu.
2626 */
2627#ifndef IEM_WITH_SETJMP
2628# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2629 do \
2630 { \
2631 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2632 if (rcStrict2 != VINF_SUCCESS) \
2633 return rcStrict2; \
2634 } while (0)
2635#else
2636# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2637#endif
2638
2639#ifndef IEM_WITH_SETJMP
2640
2641/**
2642 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2643 *
2644 * @returns Strict VBox status code.
2645 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2646 * @param pu64 Where to return the opcode quad word.
2647 */
2648DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2649{
2650 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2651 if (rcStrict == VINF_SUCCESS)
2652 {
2653 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2654 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2655 pVCpu->iem.s.offOpcode = offOpcode + 2;
2656 }
2657 else
2658 *pu64 = 0;
2659 return rcStrict;
2660}
2661
2662
2663/**
2664 * Fetches the next opcode word, zero extending it to a quad word.
2665 *
2666 * @returns Strict VBox status code.
2667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2668 * @param pu64 Where to return the opcode quad word.
2669 */
2670DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2671{
2672 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2673 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2674 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2675
2676 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2677 pVCpu->iem.s.offOpcode = offOpcode + 2;
2678 return VINF_SUCCESS;
2679}
2680
2681#endif /* !IEM_WITH_SETJMP */
2682
2683/**
2684 * Fetches the next opcode word and zero extends it to a quad word, returns
2685 * automatically on failure.
2686 *
2687 * @param a_pu64 Where to return the opcode quad word.
2688 * @remark Implicitly references pVCpu.
2689 */
2690#ifndef IEM_WITH_SETJMP
2691# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2692 do \
2693 { \
2694 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2695 if (rcStrict2 != VINF_SUCCESS) \
2696 return rcStrict2; \
2697 } while (0)
2698#else
2699# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2700#endif
2701
2702
2703#ifndef IEM_WITH_SETJMP
2704/**
2705 * Fetches the next signed word from the opcode stream.
2706 *
2707 * @returns Strict VBox status code.
2708 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2709 * @param pi16 Where to return the signed word.
2710 */
2711DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
2712{
2713 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2714}
2715#endif /* !IEM_WITH_SETJMP */
2716
2717
2718/**
2719 * Fetches the next signed word from the opcode stream, returning automatically
2720 * on failure.
2721 *
2722 * @param a_pi16 Where to return the signed word.
2723 * @remark Implicitly references pVCpu.
2724 */
2725#ifndef IEM_WITH_SETJMP
2726# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2727 do \
2728 { \
2729 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2730 if (rcStrict2 != VINF_SUCCESS) \
2731 return rcStrict2; \
2732 } while (0)
2733#else
2734# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2735#endif
2736
2737#ifndef IEM_WITH_SETJMP
2738
2739/**
2740 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2741 *
2742 * @returns Strict VBox status code.
2743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2744 * @param pu32 Where to return the opcode dword.
2745 */
2746DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2747{
2748 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2749 if (rcStrict == VINF_SUCCESS)
2750 {
2751 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2752# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2753 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2754# else
2755 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2756 pVCpu->iem.s.abOpcode[offOpcode + 1],
2757 pVCpu->iem.s.abOpcode[offOpcode + 2],
2758 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2759# endif
2760 pVCpu->iem.s.offOpcode = offOpcode + 4;
2761 }
2762 else
2763 *pu32 = 0;
2764 return rcStrict;
2765}
2766
2767
2768/**
2769 * Fetches the next opcode dword.
2770 *
2771 * @returns Strict VBox status code.
2772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2773 * @param pu32 Where to return the opcode double word.
2774 */
2775DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
2776{
2777 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2778 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2779 {
2780 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2781# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2782 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2783# else
2784 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2785 pVCpu->iem.s.abOpcode[offOpcode + 1],
2786 pVCpu->iem.s.abOpcode[offOpcode + 2],
2787 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2788# endif
2789 return VINF_SUCCESS;
2790 }
2791 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2792}
2793
2794#else /* !IEM_WITH_SETJMP */
2795
2796/**
2797 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2798 *
2799 * @returns The opcode dword.
2800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2801 */
2802DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
2803{
2804# ifdef IEM_WITH_CODE_TLB
2805 uint32_t u32;
2806 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2807 return u32;
2808# else
2809 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2810 if (rcStrict == VINF_SUCCESS)
2811 {
2812 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2813 pVCpu->iem.s.offOpcode = offOpcode + 4;
2814# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2815 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2816# else
2817 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2818 pVCpu->iem.s.abOpcode[offOpcode + 1],
2819 pVCpu->iem.s.abOpcode[offOpcode + 2],
2820 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2821# endif
2822 }
2823 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2824# endif
2825}
2826
2827
2828/**
2829 * Fetches the next opcode dword, longjmp on error.
2830 *
2831 * @returns The opcode dword.
2832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2833 */
2834DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
2835{
2836# ifdef IEM_WITH_CODE_TLB
2837 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2838 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2839 if (RT_LIKELY( pbBuf != NULL
2840 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2841 {
2842 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2843# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2844 return *(uint32_t const *)&pbBuf[offBuf];
2845# else
2846 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2847 pbBuf[offBuf + 1],
2848 pbBuf[offBuf + 2],
2849 pbBuf[offBuf + 3]);
2850# endif
2851 }
2852# else
2853 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2854 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2855 {
2856 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2857# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2858 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2859# else
2860 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2861 pVCpu->iem.s.abOpcode[offOpcode + 1],
2862 pVCpu->iem.s.abOpcode[offOpcode + 2],
2863 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2864# endif
2865 }
2866# endif
2867 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2868}
2869
2870#endif /* !IEM_WITH_SETJMP */
2871
2872
2873/**
2874 * Fetches the next opcode dword, returns automatically on failure.
2875 *
2876 * @param a_pu32 Where to return the opcode dword.
2877 * @remark Implicitly references pVCpu.
2878 */
2879#ifndef IEM_WITH_SETJMP
2880# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2881 do \
2882 { \
2883 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2884 if (rcStrict2 != VINF_SUCCESS) \
2885 return rcStrict2; \
2886 } while (0)
2887#else
2888# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2889#endif
2890
2891#ifndef IEM_WITH_SETJMP
2892
2893/**
2894 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2895 *
2896 * @returns Strict VBox status code.
2897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2898 * @param pu64 Where to return the opcode dword.
2899 */
2900DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2901{
2902 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2903 if (rcStrict == VINF_SUCCESS)
2904 {
2905 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2906 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2907 pVCpu->iem.s.abOpcode[offOpcode + 1],
2908 pVCpu->iem.s.abOpcode[offOpcode + 2],
2909 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2910 pVCpu->iem.s.offOpcode = offOpcode + 4;
2911 }
2912 else
2913 *pu64 = 0;
2914 return rcStrict;
2915}
2916
2917
2918/**
2919 * Fetches the next opcode dword, zero extending it to a quad word.
2920 *
2921 * @returns Strict VBox status code.
2922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2923 * @param pu64 Where to return the opcode quad word.
2924 */
2925DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2926{
2927 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2928 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2929 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2930
2931 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2932 pVCpu->iem.s.abOpcode[offOpcode + 1],
2933 pVCpu->iem.s.abOpcode[offOpcode + 2],
2934 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2935 pVCpu->iem.s.offOpcode = offOpcode + 4;
2936 return VINF_SUCCESS;
2937}
2938
2939#endif /* !IEM_WITH_SETJMP */
2940
2941
2942/**
2943 * Fetches the next opcode dword and zero extends it to a quad word, returns
2944 * automatically on failure.
2945 *
2946 * @param a_pu64 Where to return the opcode quad word.
2947 * @remark Implicitly references pVCpu.
2948 */
2949#ifndef IEM_WITH_SETJMP
2950# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2951 do \
2952 { \
2953 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2954 if (rcStrict2 != VINF_SUCCESS) \
2955 return rcStrict2; \
2956 } while (0)
2957#else
2958# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2959#endif
2960
2961
2962#ifndef IEM_WITH_SETJMP
2963/**
2964 * Fetches the next signed double word from the opcode stream.
2965 *
2966 * @returns Strict VBox status code.
2967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2968 * @param pi32 Where to return the signed double word.
2969 */
2970DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
2971{
2972 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2973}
2974#endif
2975
2976/**
2977 * Fetches the next signed double word from the opcode stream, returning
2978 * automatically on failure.
2979 *
2980 * @param a_pi32 Where to return the signed double word.
2981 * @remark Implicitly references pVCpu.
2982 */
2983#ifndef IEM_WITH_SETJMP
2984# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2985 do \
2986 { \
2987 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2988 if (rcStrict2 != VINF_SUCCESS) \
2989 return rcStrict2; \
2990 } while (0)
2991#else
2992# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2993#endif
2994
2995#ifndef IEM_WITH_SETJMP
2996
2997/**
2998 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2999 *
3000 * @returns Strict VBox status code.
3001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3002 * @param pu64 Where to return the opcode qword.
3003 */
3004DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3005{
3006 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3007 if (rcStrict == VINF_SUCCESS)
3008 {
3009 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3010 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3011 pVCpu->iem.s.abOpcode[offOpcode + 1],
3012 pVCpu->iem.s.abOpcode[offOpcode + 2],
3013 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3014 pVCpu->iem.s.offOpcode = offOpcode + 4;
3015 }
3016 else
3017 *pu64 = 0;
3018 return rcStrict;
3019}
3020
3021
3022/**
3023 * Fetches the next opcode dword, sign extending it into a quad word.
3024 *
3025 * @returns Strict VBox status code.
3026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3027 * @param pu64 Where to return the opcode quad word.
3028 */
3029DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3030{
3031 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3032 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3033 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3034
3035 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3036 pVCpu->iem.s.abOpcode[offOpcode + 1],
3037 pVCpu->iem.s.abOpcode[offOpcode + 2],
3038 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3039 *pu64 = i32;
3040 pVCpu->iem.s.offOpcode = offOpcode + 4;
3041 return VINF_SUCCESS;
3042}
3043
3044#endif /* !IEM_WITH_SETJMP */
3045
3046
3047/**
3048 * Fetches the next opcode double word and sign extends it to a quad word,
3049 * returns automatically on failure.
3050 *
3051 * @param a_pu64 Where to return the opcode quad word.
3052 * @remark Implicitly references pVCpu.
3053 */
3054#ifndef IEM_WITH_SETJMP
3055# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3056 do \
3057 { \
3058 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3059 if (rcStrict2 != VINF_SUCCESS) \
3060 return rcStrict2; \
3061 } while (0)
3062#else
3063# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3064#endif
3065
3066#ifndef IEM_WITH_SETJMP
3067
3068/**
3069 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3070 *
3071 * @returns Strict VBox status code.
3072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3073 * @param pu64 Where to return the opcode qword.
3074 */
3075DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3076{
3077 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3078 if (rcStrict == VINF_SUCCESS)
3079 {
3080 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3081# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3082 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3083# else
3084 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3085 pVCpu->iem.s.abOpcode[offOpcode + 1],
3086 pVCpu->iem.s.abOpcode[offOpcode + 2],
3087 pVCpu->iem.s.abOpcode[offOpcode + 3],
3088 pVCpu->iem.s.abOpcode[offOpcode + 4],
3089 pVCpu->iem.s.abOpcode[offOpcode + 5],
3090 pVCpu->iem.s.abOpcode[offOpcode + 6],
3091 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3092# endif
3093 pVCpu->iem.s.offOpcode = offOpcode + 8;
3094 }
3095 else
3096 *pu64 = 0;
3097 return rcStrict;
3098}
3099
3100
3101/**
3102 * Fetches the next opcode qword.
3103 *
3104 * @returns Strict VBox status code.
3105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3106 * @param pu64 Where to return the opcode qword.
3107 */
3108DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3109{
3110 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3111 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3112 {
3113# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3114 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3115# else
3116 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3117 pVCpu->iem.s.abOpcode[offOpcode + 1],
3118 pVCpu->iem.s.abOpcode[offOpcode + 2],
3119 pVCpu->iem.s.abOpcode[offOpcode + 3],
3120 pVCpu->iem.s.abOpcode[offOpcode + 4],
3121 pVCpu->iem.s.abOpcode[offOpcode + 5],
3122 pVCpu->iem.s.abOpcode[offOpcode + 6],
3123 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3124# endif
3125 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3126 return VINF_SUCCESS;
3127 }
3128 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3129}
3130
3131#else /* IEM_WITH_SETJMP */
3132
3133/**
3134 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3135 *
3136 * @returns The opcode qword.
3137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3138 */
3139DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3140{
3141# ifdef IEM_WITH_CODE_TLB
3142 uint64_t u64;
3143 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3144 return u64;
3145# else
3146 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3147 if (rcStrict == VINF_SUCCESS)
3148 {
3149 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3150 pVCpu->iem.s.offOpcode = offOpcode + 8;
3151# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3152 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3153# else
3154 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3155 pVCpu->iem.s.abOpcode[offOpcode + 1],
3156 pVCpu->iem.s.abOpcode[offOpcode + 2],
3157 pVCpu->iem.s.abOpcode[offOpcode + 3],
3158 pVCpu->iem.s.abOpcode[offOpcode + 4],
3159 pVCpu->iem.s.abOpcode[offOpcode + 5],
3160 pVCpu->iem.s.abOpcode[offOpcode + 6],
3161 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3162# endif
3163 }
3164 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3165# endif
3166}
3167
3168
3169/**
3170 * Fetches the next opcode qword, longjmp on error.
3171 *
3172 * @returns The opcode qword.
3173 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3174 */
3175DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3176{
3177# ifdef IEM_WITH_CODE_TLB
3178 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3179 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3180 if (RT_LIKELY( pbBuf != NULL
3181 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3182 {
3183 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3184# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3185 return *(uint64_t const *)&pbBuf[offBuf];
3186# else
3187 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3188 pbBuf[offBuf + 1],
3189 pbBuf[offBuf + 2],
3190 pbBuf[offBuf + 3],
3191 pbBuf[offBuf + 4],
3192 pbBuf[offBuf + 5],
3193 pbBuf[offBuf + 6],
3194 pbBuf[offBuf + 7]);
3195# endif
3196 }
3197# else
3198 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3199 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3200 {
3201 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3202# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3203 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3204# else
3205 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3206 pVCpu->iem.s.abOpcode[offOpcode + 1],
3207 pVCpu->iem.s.abOpcode[offOpcode + 2],
3208 pVCpu->iem.s.abOpcode[offOpcode + 3],
3209 pVCpu->iem.s.abOpcode[offOpcode + 4],
3210 pVCpu->iem.s.abOpcode[offOpcode + 5],
3211 pVCpu->iem.s.abOpcode[offOpcode + 6],
3212 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3213# endif
3214 }
3215# endif
3216 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3217}
3218
3219#endif /* IEM_WITH_SETJMP */
3220
3221/**
3222 * Fetches the next opcode quad word, returns automatically on failure.
3223 *
3224 * @param a_pu64 Where to return the opcode quad word.
3225 * @remark Implicitly references pVCpu.
3226 */
3227#ifndef IEM_WITH_SETJMP
3228# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3229 do \
3230 { \
3231 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3232 if (rcStrict2 != VINF_SUCCESS) \
3233 return rcStrict2; \
3234 } while (0)
3235#else
3236# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3237#endif
3238
3239
3240/** @name Misc Worker Functions.
3241 * @{
3242 */
3243
3244/**
3245 * Gets the exception class for the specified exception vector.
3246 *
3247 * @returns The class of the specified exception.
3248 * @param uVector The exception vector.
3249 */
3250IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3251{
3252 Assert(uVector <= X86_XCPT_LAST);
3253 switch (uVector)
3254 {
3255 case X86_XCPT_DE:
3256 case X86_XCPT_TS:
3257 case X86_XCPT_NP:
3258 case X86_XCPT_SS:
3259 case X86_XCPT_GP:
3260 case X86_XCPT_SX: /* AMD only */
3261 return IEMXCPTCLASS_CONTRIBUTORY;
3262
3263 case X86_XCPT_PF:
3264 case X86_XCPT_VE: /* Intel only */
3265 return IEMXCPTCLASS_PAGE_FAULT;
3266
3267 case X86_XCPT_DF:
3268 return IEMXCPTCLASS_DOUBLE_FAULT;
3269 }
3270 return IEMXCPTCLASS_BENIGN;
3271}
3272
3273
3274/**
3275 * Evaluates how to handle an exception caused during delivery of another event
3276 * (exception / interrupt).
3277 *
3278 * @returns How to handle the recursive exception.
3279 * @param pVCpu The cross context virtual CPU structure of the
3280 * calling thread.
3281 * @param fPrevFlags The flags of the previous event.
3282 * @param uPrevVector The vector of the previous event.
3283 * @param fCurFlags The flags of the current exception.
3284 * @param uCurVector The vector of the current exception.
3285 * @param pfXcptRaiseInfo Where to store additional information about the
3286 * exception condition. Optional.
3287 */
3288VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3289 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3290{
3291 /*
3292 * Only CPU exceptions can be raised while delivering other events, software interrupt
3293 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3294 */
3295 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3296 Assert(pVCpu); RT_NOREF(pVCpu);
3297 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3298
3299 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3300 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3301 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3302 {
3303 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3304 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3305 {
3306 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3307 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3308 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3309 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3310 {
3311 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3312 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3313 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3314 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3315 uCurVector, pVCpu->cpum.GstCtx.cr2));
3316 }
3317 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3318 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3319 {
3320 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3321 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3322 }
3323 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3324 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3325 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3326 {
3327 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3328 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3329 }
3330 }
3331 else
3332 {
3333 if (uPrevVector == X86_XCPT_NMI)
3334 {
3335 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3336 if (uCurVector == X86_XCPT_PF)
3337 {
3338 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3339 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3340 }
3341 }
3342 else if ( uPrevVector == X86_XCPT_AC
3343 && uCurVector == X86_XCPT_AC)
3344 {
3345 enmRaise = IEMXCPTRAISE_CPU_HANG;
3346 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3347 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3348 }
3349 }
3350 }
3351 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3352 {
3353 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3354 if (uCurVector == X86_XCPT_PF)
3355 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3356 }
3357 else
3358 {
3359 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3360 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3361 }
3362
3363 if (pfXcptRaiseInfo)
3364 *pfXcptRaiseInfo = fRaiseInfo;
3365 return enmRaise;
3366}
3367
3368
3369/**
3370 * Enters the CPU shutdown state initiated by a triple fault or other
3371 * unrecoverable conditions.
3372 *
3373 * @returns Strict VBox status code.
3374 * @param pVCpu The cross context virtual CPU structure of the
3375 * calling thread.
3376 */
3377IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3378{
3379 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3380 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3381
3382 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3383 {
3384 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3385 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3386 }
3387
3388 RT_NOREF(pVCpu);
3389 return VINF_EM_TRIPLE_FAULT;
3390}
3391
3392
3393/**
3394 * Validates a new SS segment.
3395 *
3396 * @returns VBox strict status code.
3397 * @param pVCpu The cross context virtual CPU structure of the
3398 * calling thread.
3399 * @param NewSS The new SS selctor.
3400 * @param uCpl The CPL to load the stack for.
3401 * @param pDesc Where to return the descriptor.
3402 */
3403IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3404{
3405 /* Null selectors are not allowed (we're not called for dispatching
3406 interrupts with SS=0 in long mode). */
3407 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3408 {
3409 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3410 return iemRaiseTaskSwitchFault0(pVCpu);
3411 }
3412
3413 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3414 if ((NewSS & X86_SEL_RPL) != uCpl)
3415 {
3416 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3417 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3418 }
3419
3420 /*
3421 * Read the descriptor.
3422 */
3423 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3424 if (rcStrict != VINF_SUCCESS)
3425 return rcStrict;
3426
3427 /*
3428 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3429 */
3430 if (!pDesc->Legacy.Gen.u1DescType)
3431 {
3432 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3433 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3434 }
3435
3436 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3437 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3438 {
3439 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3440 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3441 }
3442 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3443 {
3444 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3445 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3446 }
3447
3448 /* Is it there? */
3449 /** @todo testcase: Is this checked before the canonical / limit check below? */
3450 if (!pDesc->Legacy.Gen.u1Present)
3451 {
3452 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3453 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3454 }
3455
3456 return VINF_SUCCESS;
3457}
3458
3459
3460/**
3461 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3462 * not (kind of obsolete now).
3463 *
3464 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3465 */
3466#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3467
3468/**
3469 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
3470 *
3471 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3472 * @param a_fEfl The new EFLAGS.
3473 */
3474#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3475
3476/** @} */
3477
3478
3479/** @name Raising Exceptions.
3480 *
3481 * @{
3482 */
3483
3484
3485/**
3486 * Loads the specified stack far pointer from the TSS.
3487 *
3488 * @returns VBox strict status code.
3489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3490 * @param uCpl The CPL to load the stack for.
3491 * @param pSelSS Where to return the new stack segment.
3492 * @param puEsp Where to return the new stack pointer.
3493 */
3494IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3495{
3496 VBOXSTRICTRC rcStrict;
3497 Assert(uCpl < 4);
3498
3499 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3500 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3501 {
3502 /*
3503 * 16-bit TSS (X86TSS16).
3504 */
3505 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3506 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3507 {
3508 uint32_t off = uCpl * 4 + 2;
3509 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3510 {
3511 /** @todo check actual access pattern here. */
3512 uint32_t u32Tmp = 0; /* gcc maybe... */
3513 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3514 if (rcStrict == VINF_SUCCESS)
3515 {
3516 *puEsp = RT_LOWORD(u32Tmp);
3517 *pSelSS = RT_HIWORD(u32Tmp);
3518 return VINF_SUCCESS;
3519 }
3520 }
3521 else
3522 {
3523 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3524 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3525 }
3526 break;
3527 }
3528
3529 /*
3530 * 32-bit TSS (X86TSS32).
3531 */
3532 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3533 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3534 {
3535 uint32_t off = uCpl * 8 + 4;
3536 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3537 {
3538/** @todo check actual access pattern here. */
3539 uint64_t u64Tmp;
3540 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3541 if (rcStrict == VINF_SUCCESS)
3542 {
3543 *puEsp = u64Tmp & UINT32_MAX;
3544 *pSelSS = (RTSEL)(u64Tmp >> 32);
3545 return VINF_SUCCESS;
3546 }
3547 }
3548 else
3549 {
3550 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3551 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3552 }
3553 break;
3554 }
3555
3556 default:
3557 AssertFailed();
3558 rcStrict = VERR_IEM_IPE_4;
3559 break;
3560 }
3561
3562 *puEsp = 0; /* make gcc happy */
3563 *pSelSS = 0; /* make gcc happy */
3564 return rcStrict;
3565}
3566
3567
3568/**
3569 * Loads the specified stack pointer from the 64-bit TSS.
3570 *
3571 * @returns VBox strict status code.
3572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3573 * @param uCpl The CPL to load the stack for.
3574 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3575 * @param puRsp Where to return the new stack pointer.
3576 */
3577IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3578{
3579 Assert(uCpl < 4);
3580 Assert(uIst < 8);
3581 *puRsp = 0; /* make gcc happy */
3582
3583 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3584 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3585
3586 uint32_t off;
3587 if (uIst)
3588 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3589 else
3590 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3591 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3592 {
3593 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3594 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3595 }
3596
3597 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3598}
3599
3600
3601/**
3602 * Adjust the CPU state according to the exception being raised.
3603 *
3604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3605 * @param u8Vector The exception that has been raised.
3606 */
3607DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
3608{
3609 switch (u8Vector)
3610 {
3611 case X86_XCPT_DB:
3612 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3613 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3614 break;
3615 /** @todo Read the AMD and Intel exception reference... */
3616 }
3617}
3618
3619
3620/**
3621 * Implements exceptions and interrupts for real mode.
3622 *
3623 * @returns VBox strict status code.
3624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3625 * @param cbInstr The number of bytes to offset rIP by in the return
3626 * address.
3627 * @param u8Vector The interrupt / exception vector number.
3628 * @param fFlags The flags.
3629 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3630 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3631 */
3632IEM_STATIC VBOXSTRICTRC
3633iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
3634 uint8_t cbInstr,
3635 uint8_t u8Vector,
3636 uint32_t fFlags,
3637 uint16_t uErr,
3638 uint64_t uCr2)
3639{
3640 NOREF(uErr); NOREF(uCr2);
3641 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3642
3643 /*
3644 * Read the IDT entry.
3645 */
3646 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3647 {
3648 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3649 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3650 }
3651 RTFAR16 Idte;
3652 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3653 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3654 {
3655 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3656 return rcStrict;
3657 }
3658
3659 /*
3660 * Push the stack frame.
3661 */
3662 uint16_t *pu16Frame;
3663 uint64_t uNewRsp;
3664 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3665 if (rcStrict != VINF_SUCCESS)
3666 return rcStrict;
3667
3668 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3669#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3670 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3671 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3672 fEfl |= UINT16_C(0xf000);
3673#endif
3674 pu16Frame[2] = (uint16_t)fEfl;
3675 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3676 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3677 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3678 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3679 return rcStrict;
3680
3681 /*
3682 * Load the vector address into cs:ip and make exception specific state
3683 * adjustments.
3684 */
3685 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3686 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3687 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3688 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3689 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3690 pVCpu->cpum.GstCtx.rip = Idte.off;
3691 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3692 IEMMISC_SET_EFL(pVCpu, fEfl);
3693
3694 /** @todo do we actually do this in real mode? */
3695 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3696 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3697
3698 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3699}
3700
3701
3702/**
3703 * Loads a NULL data selector into when coming from V8086 mode.
3704 *
3705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3706 * @param pSReg Pointer to the segment register.
3707 */
3708IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
3709{
3710 pSReg->Sel = 0;
3711 pSReg->ValidSel = 0;
3712 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3713 {
3714 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3715 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3716 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3717 }
3718 else
3719 {
3720 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3721 /** @todo check this on AMD-V */
3722 pSReg->u64Base = 0;
3723 pSReg->u32Limit = 0;
3724 }
3725}
3726
3727
3728/**
3729 * Loads a segment selector during a task switch in V8086 mode.
3730 *
3731 * @param pSReg Pointer to the segment register.
3732 * @param uSel The selector value to load.
3733 */
3734IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3735{
3736 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3737 pSReg->Sel = uSel;
3738 pSReg->ValidSel = uSel;
3739 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3740 pSReg->u64Base = uSel << 4;
3741 pSReg->u32Limit = 0xffff;
3742 pSReg->Attr.u = 0xf3;
3743}
3744
3745
3746/**
3747 * Loads a NULL data selector into a selector register, both the hidden and
3748 * visible parts, in protected mode.
3749 *
3750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3751 * @param pSReg Pointer to the segment register.
3752 * @param uRpl The RPL.
3753 */
3754IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3755{
3756 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3757 * data selector in protected mode. */
3758 pSReg->Sel = uRpl;
3759 pSReg->ValidSel = uRpl;
3760 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3761 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3762 {
3763 /* VT-x (Intel 3960x) observed doing something like this. */
3764 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3765 pSReg->u32Limit = UINT32_MAX;
3766 pSReg->u64Base = 0;
3767 }
3768 else
3769 {
3770 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3771 pSReg->u32Limit = 0;
3772 pSReg->u64Base = 0;
3773 }
3774}
3775
3776
3777/**
3778 * Loads a segment selector during a task switch in protected mode.
3779 *
3780 * In this task switch scenario, we would throw \#TS exceptions rather than
3781 * \#GPs.
3782 *
3783 * @returns VBox strict status code.
3784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3785 * @param pSReg Pointer to the segment register.
3786 * @param uSel The new selector value.
3787 *
3788 * @remarks This does _not_ handle CS or SS.
3789 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3790 */
3791IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3792{
3793 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3794
3795 /* Null data selector. */
3796 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3797 {
3798 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3799 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3800 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3801 return VINF_SUCCESS;
3802 }
3803
3804 /* Fetch the descriptor. */
3805 IEMSELDESC Desc;
3806 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3807 if (rcStrict != VINF_SUCCESS)
3808 {
3809 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3810 VBOXSTRICTRC_VAL(rcStrict)));
3811 return rcStrict;
3812 }
3813
3814 /* Must be a data segment or readable code segment. */
3815 if ( !Desc.Legacy.Gen.u1DescType
3816 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3817 {
3818 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3819 Desc.Legacy.Gen.u4Type));
3820 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3821 }
3822
3823 /* Check privileges for data segments and non-conforming code segments. */
3824 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3825 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3826 {
3827 /* The RPL and the new CPL must be less than or equal to the DPL. */
3828 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3829 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3830 {
3831 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3832 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3833 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3834 }
3835 }
3836
3837 /* Is it there? */
3838 if (!Desc.Legacy.Gen.u1Present)
3839 {
3840 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3841 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3842 }
3843
3844 /* The base and limit. */
3845 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3846 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3847
3848 /*
3849 * Ok, everything checked out fine. Now set the accessed bit before
3850 * committing the result into the registers.
3851 */
3852 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3853 {
3854 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3855 if (rcStrict != VINF_SUCCESS)
3856 return rcStrict;
3857 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3858 }
3859
3860 /* Commit */
3861 pSReg->Sel = uSel;
3862 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3863 pSReg->u32Limit = cbLimit;
3864 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3865 pSReg->ValidSel = uSel;
3866 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3867 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3868 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3869
3870 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3871 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3872 return VINF_SUCCESS;
3873}
3874
3875
3876/**
3877 * Performs a task switch.
3878 *
3879 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3880 * caller is responsible for performing the necessary checks (like DPL, TSS
3881 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3882 * reference for JMP, CALL, IRET.
3883 *
3884 * If the task switch is the due to a software interrupt or hardware exception,
3885 * the caller is responsible for validating the TSS selector and descriptor. See
3886 * Intel Instruction reference for INT n.
3887 *
3888 * @returns VBox strict status code.
3889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3890 * @param enmTaskSwitch The cause of the task switch.
3891 * @param uNextEip The EIP effective after the task switch.
3892 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3893 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3894 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3895 * @param SelTSS The TSS selector of the new task.
3896 * @param pNewDescTSS Pointer to the new TSS descriptor.
3897 */
3898IEM_STATIC VBOXSTRICTRC
3899iemTaskSwitch(PVMCPUCC pVCpu,
3900 IEMTASKSWITCH enmTaskSwitch,
3901 uint32_t uNextEip,
3902 uint32_t fFlags,
3903 uint16_t uErr,
3904 uint64_t uCr2,
3905 RTSEL SelTSS,
3906 PIEMSELDESC pNewDescTSS)
3907{
3908 Assert(!IEM_IS_REAL_MODE(pVCpu));
3909 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3910 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3911
3912 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3913 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3914 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3915 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3916 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3917
3918 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3919 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3920
3921 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3922 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3923
3924 /* Update CR2 in case it's a page-fault. */
3925 /** @todo This should probably be done much earlier in IEM/PGM. See
3926 * @bugref{5653#c49}. */
3927 if (fFlags & IEM_XCPT_FLAGS_CR2)
3928 pVCpu->cpum.GstCtx.cr2 = uCr2;
3929
3930 /*
3931 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3932 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3933 */
3934 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3935 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3936 if (uNewTSSLimit < uNewTSSLimitMin)
3937 {
3938 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3939 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3940 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3941 }
3942
3943 /*
3944 * Task switches in VMX non-root mode always cause task switches.
3945 * The new TSS must have been read and validated (DPL, limits etc.) before a
3946 * task-switch VM-exit commences.
3947 *
3948 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
3949 */
3950 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3951 {
3952 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
3953 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
3954 }
3955
3956 /*
3957 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3958 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3959 */
3960 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3961 {
3962 uint32_t const uExitInfo1 = SelTSS;
3963 uint32_t uExitInfo2 = uErr;
3964 switch (enmTaskSwitch)
3965 {
3966 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3967 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3968 default: break;
3969 }
3970 if (fFlags & IEM_XCPT_FLAGS_ERR)
3971 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3972 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3973 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3974
3975 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3976 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3977 RT_NOREF2(uExitInfo1, uExitInfo2);
3978 }
3979
3980 /*
3981 * Check the current TSS limit. The last written byte to the current TSS during the
3982 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3983 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3984 *
3985 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3986 * end up with smaller than "legal" TSS limits.
3987 */
3988 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
3989 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3990 if (uCurTSSLimit < uCurTSSLimitMin)
3991 {
3992 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3993 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3994 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3995 }
3996
3997 /*
3998 * Verify that the new TSS can be accessed and map it. Map only the required contents
3999 * and not the entire TSS.
4000 */
4001 void *pvNewTSS;
4002 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4003 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4004 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4005 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4006 * not perform correct translation if this happens. See Intel spec. 7.2.1
4007 * "Task-State Segment" */
4008 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4009 if (rcStrict != VINF_SUCCESS)
4010 {
4011 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4012 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4013 return rcStrict;
4014 }
4015
4016 /*
4017 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4018 */
4019 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4020 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4021 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4022 {
4023 PX86DESC pDescCurTSS;
4024 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4025 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4026 if (rcStrict != VINF_SUCCESS)
4027 {
4028 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4029 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4030 return rcStrict;
4031 }
4032
4033 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4034 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4035 if (rcStrict != VINF_SUCCESS)
4036 {
4037 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4038 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4039 return rcStrict;
4040 }
4041
4042 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4043 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4044 {
4045 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4046 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4047 u32EFlags &= ~X86_EFL_NT;
4048 }
4049 }
4050
4051 /*
4052 * Save the CPU state into the current TSS.
4053 */
4054 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4055 if (GCPtrNewTSS == GCPtrCurTSS)
4056 {
4057 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4058 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4059 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4060 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4061 pVCpu->cpum.GstCtx.ldtr.Sel));
4062 }
4063 if (fIsNewTSS386)
4064 {
4065 /*
4066 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4067 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4068 */
4069 void *pvCurTSS32;
4070 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4071 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4072 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4073 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4074 if (rcStrict != VINF_SUCCESS)
4075 {
4076 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4077 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4078 return rcStrict;
4079 }
4080
4081 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4082 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4083 pCurTSS32->eip = uNextEip;
4084 pCurTSS32->eflags = u32EFlags;
4085 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4086 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4087 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4088 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4089 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4090 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4091 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4092 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4093 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4094 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4095 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4096 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4097 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4098 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4099
4100 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4101 if (rcStrict != VINF_SUCCESS)
4102 {
4103 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4104 VBOXSTRICTRC_VAL(rcStrict)));
4105 return rcStrict;
4106 }
4107 }
4108 else
4109 {
4110 /*
4111 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4112 */
4113 void *pvCurTSS16;
4114 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4115 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4116 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4117 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4118 if (rcStrict != VINF_SUCCESS)
4119 {
4120 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4121 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4122 return rcStrict;
4123 }
4124
4125 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4126 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4127 pCurTSS16->ip = uNextEip;
4128 pCurTSS16->flags = u32EFlags;
4129 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4130 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4131 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4132 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4133 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4134 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4135 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4136 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4137 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4138 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4139 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4140 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4141
4142 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4143 if (rcStrict != VINF_SUCCESS)
4144 {
4145 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4146 VBOXSTRICTRC_VAL(rcStrict)));
4147 return rcStrict;
4148 }
4149 }
4150
4151 /*
4152 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4153 */
4154 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4155 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4156 {
4157 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4158 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4159 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4160 }
4161
4162 /*
4163 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4164 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4165 */
4166 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4167 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4168 bool fNewDebugTrap;
4169 if (fIsNewTSS386)
4170 {
4171 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4172 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4173 uNewEip = pNewTSS32->eip;
4174 uNewEflags = pNewTSS32->eflags;
4175 uNewEax = pNewTSS32->eax;
4176 uNewEcx = pNewTSS32->ecx;
4177 uNewEdx = pNewTSS32->edx;
4178 uNewEbx = pNewTSS32->ebx;
4179 uNewEsp = pNewTSS32->esp;
4180 uNewEbp = pNewTSS32->ebp;
4181 uNewEsi = pNewTSS32->esi;
4182 uNewEdi = pNewTSS32->edi;
4183 uNewES = pNewTSS32->es;
4184 uNewCS = pNewTSS32->cs;
4185 uNewSS = pNewTSS32->ss;
4186 uNewDS = pNewTSS32->ds;
4187 uNewFS = pNewTSS32->fs;
4188 uNewGS = pNewTSS32->gs;
4189 uNewLdt = pNewTSS32->selLdt;
4190 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4191 }
4192 else
4193 {
4194 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4195 uNewCr3 = 0;
4196 uNewEip = pNewTSS16->ip;
4197 uNewEflags = pNewTSS16->flags;
4198 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4199 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4200 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4201 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4202 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4203 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4204 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4205 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4206 uNewES = pNewTSS16->es;
4207 uNewCS = pNewTSS16->cs;
4208 uNewSS = pNewTSS16->ss;
4209 uNewDS = pNewTSS16->ds;
4210 uNewFS = 0;
4211 uNewGS = 0;
4212 uNewLdt = pNewTSS16->selLdt;
4213 fNewDebugTrap = false;
4214 }
4215
4216 if (GCPtrNewTSS == GCPtrCurTSS)
4217 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4218 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4219
4220 /*
4221 * We're done accessing the new TSS.
4222 */
4223 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4224 if (rcStrict != VINF_SUCCESS)
4225 {
4226 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4227 return rcStrict;
4228 }
4229
4230 /*
4231 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4232 */
4233 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4234 {
4235 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4236 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4237 if (rcStrict != VINF_SUCCESS)
4238 {
4239 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4240 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4241 return rcStrict;
4242 }
4243
4244 /* Check that the descriptor indicates the new TSS is available (not busy). */
4245 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4246 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4247 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4248
4249 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4250 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4251 if (rcStrict != VINF_SUCCESS)
4252 {
4253 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4254 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4255 return rcStrict;
4256 }
4257 }
4258
4259 /*
4260 * From this point on, we're technically in the new task. We will defer exceptions
4261 * until the completion of the task switch but before executing any instructions in the new task.
4262 */
4263 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4264 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4265 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4266 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4267 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4268 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4269 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4270
4271 /* Set the busy bit in TR. */
4272 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4273 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4274 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4275 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4276 {
4277 uNewEflags |= X86_EFL_NT;
4278 }
4279
4280 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4281 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4282 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4283
4284 pVCpu->cpum.GstCtx.eip = uNewEip;
4285 pVCpu->cpum.GstCtx.eax = uNewEax;
4286 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4287 pVCpu->cpum.GstCtx.edx = uNewEdx;
4288 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4289 pVCpu->cpum.GstCtx.esp = uNewEsp;
4290 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4291 pVCpu->cpum.GstCtx.esi = uNewEsi;
4292 pVCpu->cpum.GstCtx.edi = uNewEdi;
4293
4294 uNewEflags &= X86_EFL_LIVE_MASK;
4295 uNewEflags |= X86_EFL_RA1_MASK;
4296 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4297
4298 /*
4299 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4300 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4301 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4302 */
4303 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4304 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4305
4306 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4307 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4308
4309 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4310 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4311
4312 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4313 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4314
4315 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4316 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4317
4318 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4319 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4320 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4321
4322 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4323 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4324 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4325 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4326
4327 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4328 {
4329 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4330 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4331 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4332 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4333 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4334 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4335 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4336 }
4337
4338 /*
4339 * Switch CR3 for the new task.
4340 */
4341 if ( fIsNewTSS386
4342 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4343 {
4344 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4345 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4346 AssertRCSuccessReturn(rc, rc);
4347
4348 /* Inform PGM. */
4349 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4350 AssertRCReturn(rc, rc);
4351 /* ignore informational status codes */
4352
4353 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4354 }
4355
4356 /*
4357 * Switch LDTR for the new task.
4358 */
4359 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4360 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4361 else
4362 {
4363 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4364
4365 IEMSELDESC DescNewLdt;
4366 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4367 if (rcStrict != VINF_SUCCESS)
4368 {
4369 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4370 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4371 return rcStrict;
4372 }
4373 if ( !DescNewLdt.Legacy.Gen.u1Present
4374 || DescNewLdt.Legacy.Gen.u1DescType
4375 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4376 {
4377 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4378 uNewLdt, DescNewLdt.Legacy.u));
4379 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4380 }
4381
4382 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4383 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4384 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4385 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4386 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4387 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4388 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4389 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4390 }
4391
4392 IEMSELDESC DescSS;
4393 if (IEM_IS_V86_MODE(pVCpu))
4394 {
4395 pVCpu->iem.s.uCpl = 3;
4396 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4397 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4398 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4399 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4400 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4401 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4402
4403 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4404 DescSS.Legacy.u = 0;
4405 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4406 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4407 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4408 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4409 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4410 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4411 DescSS.Legacy.Gen.u2Dpl = 3;
4412 }
4413 else
4414 {
4415 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4416
4417 /*
4418 * Load the stack segment for the new task.
4419 */
4420 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4421 {
4422 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4423 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4424 }
4425
4426 /* Fetch the descriptor. */
4427 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4428 if (rcStrict != VINF_SUCCESS)
4429 {
4430 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4431 VBOXSTRICTRC_VAL(rcStrict)));
4432 return rcStrict;
4433 }
4434
4435 /* SS must be a data segment and writable. */
4436 if ( !DescSS.Legacy.Gen.u1DescType
4437 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4438 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4439 {
4440 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4441 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4442 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4443 }
4444
4445 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4446 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4447 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4448 {
4449 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4450 uNewCpl));
4451 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4452 }
4453
4454 /* Is it there? */
4455 if (!DescSS.Legacy.Gen.u1Present)
4456 {
4457 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4458 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4459 }
4460
4461 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4462 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4463
4464 /* Set the accessed bit before committing the result into SS. */
4465 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4466 {
4467 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4468 if (rcStrict != VINF_SUCCESS)
4469 return rcStrict;
4470 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4471 }
4472
4473 /* Commit SS. */
4474 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4475 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4476 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4477 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4478 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4479 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4480 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4481
4482 /* CPL has changed, update IEM before loading rest of segments. */
4483 pVCpu->iem.s.uCpl = uNewCpl;
4484
4485 /*
4486 * Load the data segments for the new task.
4487 */
4488 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4489 if (rcStrict != VINF_SUCCESS)
4490 return rcStrict;
4491 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4492 if (rcStrict != VINF_SUCCESS)
4493 return rcStrict;
4494 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4495 if (rcStrict != VINF_SUCCESS)
4496 return rcStrict;
4497 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4498 if (rcStrict != VINF_SUCCESS)
4499 return rcStrict;
4500
4501 /*
4502 * Load the code segment for the new task.
4503 */
4504 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4505 {
4506 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4507 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4508 }
4509
4510 /* Fetch the descriptor. */
4511 IEMSELDESC DescCS;
4512 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4513 if (rcStrict != VINF_SUCCESS)
4514 {
4515 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4516 return rcStrict;
4517 }
4518
4519 /* CS must be a code segment. */
4520 if ( !DescCS.Legacy.Gen.u1DescType
4521 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4522 {
4523 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4524 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4525 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4526 }
4527
4528 /* For conforming CS, DPL must be less than or equal to the RPL. */
4529 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4530 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4531 {
4532 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4533 DescCS.Legacy.Gen.u2Dpl));
4534 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4535 }
4536
4537 /* For non-conforming CS, DPL must match RPL. */
4538 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4539 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4540 {
4541 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4542 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4543 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4544 }
4545
4546 /* Is it there? */
4547 if (!DescCS.Legacy.Gen.u1Present)
4548 {
4549 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4550 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4551 }
4552
4553 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4554 u64Base = X86DESC_BASE(&DescCS.Legacy);
4555
4556 /* Set the accessed bit before committing the result into CS. */
4557 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4558 {
4559 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4560 if (rcStrict != VINF_SUCCESS)
4561 return rcStrict;
4562 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4563 }
4564
4565 /* Commit CS. */
4566 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4567 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4568 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4569 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4570 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4571 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4572 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4573 }
4574
4575 /** @todo Debug trap. */
4576 if (fIsNewTSS386 && fNewDebugTrap)
4577 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4578
4579 /*
4580 * Construct the error code masks based on what caused this task switch.
4581 * See Intel Instruction reference for INT.
4582 */
4583 uint16_t uExt;
4584 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4585 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4586 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4587 {
4588 uExt = 1;
4589 }
4590 else
4591 uExt = 0;
4592
4593 /*
4594 * Push any error code on to the new stack.
4595 */
4596 if (fFlags & IEM_XCPT_FLAGS_ERR)
4597 {
4598 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4599 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4600 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4601
4602 /* Check that there is sufficient space on the stack. */
4603 /** @todo Factor out segment limit checking for normal/expand down segments
4604 * into a separate function. */
4605 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4606 {
4607 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4608 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4609 {
4610 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4611 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4612 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4613 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4614 }
4615 }
4616 else
4617 {
4618 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4619 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4620 {
4621 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4622 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4623 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4624 }
4625 }
4626
4627
4628 if (fIsNewTSS386)
4629 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4630 else
4631 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4632 if (rcStrict != VINF_SUCCESS)
4633 {
4634 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4635 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4636 return rcStrict;
4637 }
4638 }
4639
4640 /* Check the new EIP against the new CS limit. */
4641 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4642 {
4643 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4644 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4645 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4646 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4647 }
4648
4649 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4650 pVCpu->cpum.GstCtx.ss.Sel));
4651 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4652}
4653
4654
4655/**
4656 * Implements exceptions and interrupts for protected mode.
4657 *
4658 * @returns VBox strict status code.
4659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4660 * @param cbInstr The number of bytes to offset rIP by in the return
4661 * address.
4662 * @param u8Vector The interrupt / exception vector number.
4663 * @param fFlags The flags.
4664 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4665 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4666 */
4667IEM_STATIC VBOXSTRICTRC
4668iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
4669 uint8_t cbInstr,
4670 uint8_t u8Vector,
4671 uint32_t fFlags,
4672 uint16_t uErr,
4673 uint64_t uCr2)
4674{
4675 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4676
4677 /*
4678 * Read the IDT entry.
4679 */
4680 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4681 {
4682 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4683 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4684 }
4685 X86DESC Idte;
4686 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4687 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4688 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4689 {
4690 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4691 return rcStrict;
4692 }
4693 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4694 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4695 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4696
4697 /*
4698 * Check the descriptor type, DPL and such.
4699 * ASSUMES this is done in the same order as described for call-gate calls.
4700 */
4701 if (Idte.Gate.u1DescType)
4702 {
4703 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4704 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4705 }
4706 bool fTaskGate = false;
4707 uint8_t f32BitGate = true;
4708 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4709 switch (Idte.Gate.u4Type)
4710 {
4711 case X86_SEL_TYPE_SYS_UNDEFINED:
4712 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4713 case X86_SEL_TYPE_SYS_LDT:
4714 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4715 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4716 case X86_SEL_TYPE_SYS_UNDEFINED2:
4717 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4718 case X86_SEL_TYPE_SYS_UNDEFINED3:
4719 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4720 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4721 case X86_SEL_TYPE_SYS_UNDEFINED4:
4722 {
4723 /** @todo check what actually happens when the type is wrong...
4724 * esp. call gates. */
4725 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4726 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4727 }
4728
4729 case X86_SEL_TYPE_SYS_286_INT_GATE:
4730 f32BitGate = false;
4731 RT_FALL_THRU();
4732 case X86_SEL_TYPE_SYS_386_INT_GATE:
4733 fEflToClear |= X86_EFL_IF;
4734 break;
4735
4736 case X86_SEL_TYPE_SYS_TASK_GATE:
4737 fTaskGate = true;
4738#ifndef IEM_IMPLEMENTS_TASKSWITCH
4739 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4740#endif
4741 break;
4742
4743 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4744 f32BitGate = false;
4745 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4746 break;
4747
4748 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4749 }
4750
4751 /* Check DPL against CPL if applicable. */
4752 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4753 {
4754 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4755 {
4756 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4757 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4758 }
4759 }
4760
4761 /* Is it there? */
4762 if (!Idte.Gate.u1Present)
4763 {
4764 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4765 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4766 }
4767
4768 /* Is it a task-gate? */
4769 if (fTaskGate)
4770 {
4771 /*
4772 * Construct the error code masks based on what caused this task switch.
4773 * See Intel Instruction reference for INT.
4774 */
4775 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4776 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4777 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4778 RTSEL SelTSS = Idte.Gate.u16Sel;
4779
4780 /*
4781 * Fetch the TSS descriptor in the GDT.
4782 */
4783 IEMSELDESC DescTSS;
4784 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4785 if (rcStrict != VINF_SUCCESS)
4786 {
4787 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4788 VBOXSTRICTRC_VAL(rcStrict)));
4789 return rcStrict;
4790 }
4791
4792 /* The TSS descriptor must be a system segment and be available (not busy). */
4793 if ( DescTSS.Legacy.Gen.u1DescType
4794 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4795 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4796 {
4797 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4798 u8Vector, SelTSS, DescTSS.Legacy.au64));
4799 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4800 }
4801
4802 /* The TSS must be present. */
4803 if (!DescTSS.Legacy.Gen.u1Present)
4804 {
4805 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4806 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4807 }
4808
4809 /* Do the actual task switch. */
4810 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4811 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4812 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4813 }
4814
4815 /* A null CS is bad. */
4816 RTSEL NewCS = Idte.Gate.u16Sel;
4817 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4818 {
4819 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4820 return iemRaiseGeneralProtectionFault0(pVCpu);
4821 }
4822
4823 /* Fetch the descriptor for the new CS. */
4824 IEMSELDESC DescCS;
4825 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4826 if (rcStrict != VINF_SUCCESS)
4827 {
4828 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4829 return rcStrict;
4830 }
4831
4832 /* Must be a code segment. */
4833 if (!DescCS.Legacy.Gen.u1DescType)
4834 {
4835 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4836 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4837 }
4838 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4839 {
4840 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4841 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4842 }
4843
4844 /* Don't allow lowering the privilege level. */
4845 /** @todo Does the lowering of privileges apply to software interrupts
4846 * only? This has bearings on the more-privileged or
4847 * same-privilege stack behavior further down. A testcase would
4848 * be nice. */
4849 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4850 {
4851 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4852 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4853 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4854 }
4855
4856 /* Make sure the selector is present. */
4857 if (!DescCS.Legacy.Gen.u1Present)
4858 {
4859 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4860 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4861 }
4862
4863 /* Check the new EIP against the new CS limit. */
4864 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4865 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4866 ? Idte.Gate.u16OffsetLow
4867 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4868 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4869 if (uNewEip > cbLimitCS)
4870 {
4871 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4872 u8Vector, uNewEip, cbLimitCS, NewCS));
4873 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4874 }
4875 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4876
4877 /* Calc the flag image to push. */
4878 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4879 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4880 fEfl &= ~X86_EFL_RF;
4881 else
4882 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4883
4884 /* From V8086 mode only go to CPL 0. */
4885 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4886 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4887 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4888 {
4889 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4890 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4891 }
4892
4893 /*
4894 * If the privilege level changes, we need to get a new stack from the TSS.
4895 * This in turns means validating the new SS and ESP...
4896 */
4897 if (uNewCpl != pVCpu->iem.s.uCpl)
4898 {
4899 RTSEL NewSS;
4900 uint32_t uNewEsp;
4901 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4902 if (rcStrict != VINF_SUCCESS)
4903 return rcStrict;
4904
4905 IEMSELDESC DescSS;
4906 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4907 if (rcStrict != VINF_SUCCESS)
4908 return rcStrict;
4909 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4910 if (!DescSS.Legacy.Gen.u1DefBig)
4911 {
4912 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4913 uNewEsp = (uint16_t)uNewEsp;
4914 }
4915
4916 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4917
4918 /* Check that there is sufficient space for the stack frame. */
4919 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4920 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4921 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4922 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4923
4924 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4925 {
4926 if ( uNewEsp - 1 > cbLimitSS
4927 || uNewEsp < cbStackFrame)
4928 {
4929 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4930 u8Vector, NewSS, uNewEsp, cbStackFrame));
4931 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4932 }
4933 }
4934 else
4935 {
4936 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4937 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4938 {
4939 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4940 u8Vector, NewSS, uNewEsp, cbStackFrame));
4941 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4942 }
4943 }
4944
4945 /*
4946 * Start making changes.
4947 */
4948
4949 /* Set the new CPL so that stack accesses use it. */
4950 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4951 pVCpu->iem.s.uCpl = uNewCpl;
4952
4953 /* Create the stack frame. */
4954 RTPTRUNION uStackFrame;
4955 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4956 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4957 if (rcStrict != VINF_SUCCESS)
4958 return rcStrict;
4959 void * const pvStackFrame = uStackFrame.pv;
4960 if (f32BitGate)
4961 {
4962 if (fFlags & IEM_XCPT_FLAGS_ERR)
4963 *uStackFrame.pu32++ = uErr;
4964 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4965 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4966 uStackFrame.pu32[2] = fEfl;
4967 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4968 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4969 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4970 if (fEfl & X86_EFL_VM)
4971 {
4972 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4973 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4974 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4975 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4976 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4977 }
4978 }
4979 else
4980 {
4981 if (fFlags & IEM_XCPT_FLAGS_ERR)
4982 *uStackFrame.pu16++ = uErr;
4983 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4984 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4985 uStackFrame.pu16[2] = fEfl;
4986 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4987 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4988 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4989 if (fEfl & X86_EFL_VM)
4990 {
4991 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
4992 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
4993 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
4994 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
4995 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
4996 }
4997 }
4998 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4999 if (rcStrict != VINF_SUCCESS)
5000 return rcStrict;
5001
5002 /* Mark the selectors 'accessed' (hope this is the correct time). */
5003 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5004 * after pushing the stack frame? (Write protect the gdt + stack to
5005 * find out.) */
5006 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5007 {
5008 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5009 if (rcStrict != VINF_SUCCESS)
5010 return rcStrict;
5011 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5012 }
5013
5014 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5015 {
5016 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5017 if (rcStrict != VINF_SUCCESS)
5018 return rcStrict;
5019 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5020 }
5021
5022 /*
5023 * Start comitting the register changes (joins with the DPL=CPL branch).
5024 */
5025 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5026 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5027 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5028 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5029 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5030 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5031 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5032 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5033 * SP is loaded).
5034 * Need to check the other combinations too:
5035 * - 16-bit TSS, 32-bit handler
5036 * - 32-bit TSS, 16-bit handler */
5037 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5038 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5039 else
5040 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5041
5042 if (fEfl & X86_EFL_VM)
5043 {
5044 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5045 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5046 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5047 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5048 }
5049 }
5050 /*
5051 * Same privilege, no stack change and smaller stack frame.
5052 */
5053 else
5054 {
5055 uint64_t uNewRsp;
5056 RTPTRUNION uStackFrame;
5057 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5058 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5059 if (rcStrict != VINF_SUCCESS)
5060 return rcStrict;
5061 void * const pvStackFrame = uStackFrame.pv;
5062
5063 if (f32BitGate)
5064 {
5065 if (fFlags & IEM_XCPT_FLAGS_ERR)
5066 *uStackFrame.pu32++ = uErr;
5067 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5068 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5069 uStackFrame.pu32[2] = fEfl;
5070 }
5071 else
5072 {
5073 if (fFlags & IEM_XCPT_FLAGS_ERR)
5074 *uStackFrame.pu16++ = uErr;
5075 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5076 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5077 uStackFrame.pu16[2] = fEfl;
5078 }
5079 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5080 if (rcStrict != VINF_SUCCESS)
5081 return rcStrict;
5082
5083 /* Mark the CS selector as 'accessed'. */
5084 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5085 {
5086 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5087 if (rcStrict != VINF_SUCCESS)
5088 return rcStrict;
5089 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5090 }
5091
5092 /*
5093 * Start committing the register changes (joins with the other branch).
5094 */
5095 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5096 }
5097
5098 /* ... register committing continues. */
5099 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5100 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5101 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5102 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5103 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5104 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5105
5106 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5107 fEfl &= ~fEflToClear;
5108 IEMMISC_SET_EFL(pVCpu, fEfl);
5109
5110 if (fFlags & IEM_XCPT_FLAGS_CR2)
5111 pVCpu->cpum.GstCtx.cr2 = uCr2;
5112
5113 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5114 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5115
5116 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5117}
5118
5119
5120/**
5121 * Implements exceptions and interrupts for long mode.
5122 *
5123 * @returns VBox strict status code.
5124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5125 * @param cbInstr The number of bytes to offset rIP by in the return
5126 * address.
5127 * @param u8Vector The interrupt / exception vector number.
5128 * @param fFlags The flags.
5129 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5130 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5131 */
5132IEM_STATIC VBOXSTRICTRC
5133iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5134 uint8_t cbInstr,
5135 uint8_t u8Vector,
5136 uint32_t fFlags,
5137 uint16_t uErr,
5138 uint64_t uCr2)
5139{
5140 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5141
5142 /*
5143 * Read the IDT entry.
5144 */
5145 uint16_t offIdt = (uint16_t)u8Vector << 4;
5146 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5147 {
5148 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5149 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5150 }
5151 X86DESC64 Idte;
5152 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5153 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5154 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5155 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5156 {
5157 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5158 return rcStrict;
5159 }
5160 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5161 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5162 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5163
5164 /*
5165 * Check the descriptor type, DPL and such.
5166 * ASSUMES this is done in the same order as described for call-gate calls.
5167 */
5168 if (Idte.Gate.u1DescType)
5169 {
5170 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5171 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5172 }
5173 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5174 switch (Idte.Gate.u4Type)
5175 {
5176 case AMD64_SEL_TYPE_SYS_INT_GATE:
5177 fEflToClear |= X86_EFL_IF;
5178 break;
5179 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5180 break;
5181
5182 default:
5183 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5184 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5185 }
5186
5187 /* Check DPL against CPL if applicable. */
5188 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5189 {
5190 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5191 {
5192 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5193 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5194 }
5195 }
5196
5197 /* Is it there? */
5198 if (!Idte.Gate.u1Present)
5199 {
5200 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5201 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5202 }
5203
5204 /* A null CS is bad. */
5205 RTSEL NewCS = Idte.Gate.u16Sel;
5206 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5207 {
5208 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5209 return iemRaiseGeneralProtectionFault0(pVCpu);
5210 }
5211
5212 /* Fetch the descriptor for the new CS. */
5213 IEMSELDESC DescCS;
5214 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5215 if (rcStrict != VINF_SUCCESS)
5216 {
5217 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5218 return rcStrict;
5219 }
5220
5221 /* Must be a 64-bit code segment. */
5222 if (!DescCS.Long.Gen.u1DescType)
5223 {
5224 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5225 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5226 }
5227 if ( !DescCS.Long.Gen.u1Long
5228 || DescCS.Long.Gen.u1DefBig
5229 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5230 {
5231 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5232 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5233 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5234 }
5235
5236 /* Don't allow lowering the privilege level. For non-conforming CS
5237 selectors, the CS.DPL sets the privilege level the trap/interrupt
5238 handler runs at. For conforming CS selectors, the CPL remains
5239 unchanged, but the CS.DPL must be <= CPL. */
5240 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5241 * when CPU in Ring-0. Result \#GP? */
5242 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5243 {
5244 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5245 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5246 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5247 }
5248
5249
5250 /* Make sure the selector is present. */
5251 if (!DescCS.Legacy.Gen.u1Present)
5252 {
5253 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5254 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5255 }
5256
5257 /* Check that the new RIP is canonical. */
5258 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5259 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5260 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5261 if (!IEM_IS_CANONICAL(uNewRip))
5262 {
5263 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5264 return iemRaiseGeneralProtectionFault0(pVCpu);
5265 }
5266
5267 /*
5268 * If the privilege level changes or if the IST isn't zero, we need to get
5269 * a new stack from the TSS.
5270 */
5271 uint64_t uNewRsp;
5272 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5273 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5274 if ( uNewCpl != pVCpu->iem.s.uCpl
5275 || Idte.Gate.u3IST != 0)
5276 {
5277 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5278 if (rcStrict != VINF_SUCCESS)
5279 return rcStrict;
5280 }
5281 else
5282 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5283 uNewRsp &= ~(uint64_t)0xf;
5284
5285 /*
5286 * Calc the flag image to push.
5287 */
5288 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5289 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5290 fEfl &= ~X86_EFL_RF;
5291 else
5292 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5293
5294 /*
5295 * Start making changes.
5296 */
5297 /* Set the new CPL so that stack accesses use it. */
5298 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5299 pVCpu->iem.s.uCpl = uNewCpl;
5300
5301 /* Create the stack frame. */
5302 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5303 RTPTRUNION uStackFrame;
5304 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5305 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5306 if (rcStrict != VINF_SUCCESS)
5307 return rcStrict;
5308 void * const pvStackFrame = uStackFrame.pv;
5309
5310 if (fFlags & IEM_XCPT_FLAGS_ERR)
5311 *uStackFrame.pu64++ = uErr;
5312 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5313 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5314 uStackFrame.pu64[2] = fEfl;
5315 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5316 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5317 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5318 if (rcStrict != VINF_SUCCESS)
5319 return rcStrict;
5320
5321 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5322 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5323 * after pushing the stack frame? (Write protect the gdt + stack to
5324 * find out.) */
5325 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5326 {
5327 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5328 if (rcStrict != VINF_SUCCESS)
5329 return rcStrict;
5330 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5331 }
5332
5333 /*
5334 * Start comitting the register changes.
5335 */
5336 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5337 * hidden registers when interrupting 32-bit or 16-bit code! */
5338 if (uNewCpl != uOldCpl)
5339 {
5340 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5341 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5342 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5343 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5344 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5345 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5346 }
5347 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5348 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5349 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5350 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5351 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5352 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5353 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5354 pVCpu->cpum.GstCtx.rip = uNewRip;
5355
5356 fEfl &= ~fEflToClear;
5357 IEMMISC_SET_EFL(pVCpu, fEfl);
5358
5359 if (fFlags & IEM_XCPT_FLAGS_CR2)
5360 pVCpu->cpum.GstCtx.cr2 = uCr2;
5361
5362 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5363 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5364
5365 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5366}
5367
5368
5369/**
5370 * Implements exceptions and interrupts.
5371 *
5372 * All exceptions and interrupts goes thru this function!
5373 *
5374 * @returns VBox strict status code.
5375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5376 * @param cbInstr The number of bytes to offset rIP by in the return
5377 * address.
5378 * @param u8Vector The interrupt / exception vector number.
5379 * @param fFlags The flags.
5380 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5381 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5382 */
5383DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5384iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5385 uint8_t cbInstr,
5386 uint8_t u8Vector,
5387 uint32_t fFlags,
5388 uint16_t uErr,
5389 uint64_t uCr2)
5390{
5391 /*
5392 * Get all the state that we might need here.
5393 */
5394 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5395 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5396
5397#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5398 /*
5399 * Flush prefetch buffer
5400 */
5401 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5402#endif
5403
5404 /*
5405 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5406 */
5407 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5408 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5409 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5410 | IEM_XCPT_FLAGS_BP_INSTR
5411 | IEM_XCPT_FLAGS_ICEBP_INSTR
5412 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5413 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5414 {
5415 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5416 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5417 u8Vector = X86_XCPT_GP;
5418 uErr = 0;
5419 }
5420#ifdef DBGFTRACE_ENABLED
5421 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5422 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5423 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5424#endif
5425
5426 /*
5427 * Evaluate whether NMI blocking should be in effect.
5428 * Normally, NMI blocking is in effect whenever we inject an NMI.
5429 */
5430 bool fBlockNmi;
5431 if ( u8Vector == X86_XCPT_NMI
5432 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5433 fBlockNmi = true;
5434 else
5435 fBlockNmi = false;
5436
5437#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5438 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5439 {
5440 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5441 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5442 return rcStrict0;
5443
5444 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5445 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5446 {
5447 Assert(CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5448 fBlockNmi = false;
5449 }
5450 }
5451#endif
5452
5453#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5454 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5455 {
5456 /*
5457 * If the event is being injected as part of VMRUN, it isn't subject to event
5458 * intercepts in the nested-guest. However, secondary exceptions that occur
5459 * during injection of any event -are- subject to exception intercepts.
5460 *
5461 * See AMD spec. 15.20 "Event Injection".
5462 */
5463 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5464 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5465 else
5466 {
5467 /*
5468 * Check and handle if the event being raised is intercepted.
5469 */
5470 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5471 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5472 return rcStrict0;
5473 }
5474 }
5475#endif
5476
5477 /*
5478 * Set NMI blocking if necessary.
5479 */
5480 if ( fBlockNmi
5481 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5482 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5483
5484 /*
5485 * Do recursion accounting.
5486 */
5487 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5488 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5489 if (pVCpu->iem.s.cXcptRecursions == 0)
5490 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5491 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5492 else
5493 {
5494 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5495 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5496 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5497
5498 if (pVCpu->iem.s.cXcptRecursions >= 4)
5499 {
5500#ifdef DEBUG_bird
5501 AssertFailed();
5502#endif
5503 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5504 }
5505
5506 /*
5507 * Evaluate the sequence of recurring events.
5508 */
5509 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5510 NULL /* pXcptRaiseInfo */);
5511 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5512 { /* likely */ }
5513 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5514 {
5515 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5516 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5517 u8Vector = X86_XCPT_DF;
5518 uErr = 0;
5519#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5520 /* VMX nested-guest #DF intercept needs to be checked here. */
5521 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5522 {
5523 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5524 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5525 return rcStrict0;
5526 }
5527#endif
5528 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5529 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5530 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5531 }
5532 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5533 {
5534 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5535 return iemInitiateCpuShutdown(pVCpu);
5536 }
5537 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5538 {
5539 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5540 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5541 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5542 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5543 return VERR_EM_GUEST_CPU_HANG;
5544 }
5545 else
5546 {
5547 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5548 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5549 return VERR_IEM_IPE_9;
5550 }
5551
5552 /*
5553 * The 'EXT' bit is set when an exception occurs during deliver of an external
5554 * event (such as an interrupt or earlier exception)[1]. Privileged software
5555 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5556 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5557 *
5558 * [1] - Intel spec. 6.13 "Error Code"
5559 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5560 * [3] - Intel Instruction reference for INT n.
5561 */
5562 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5563 && (fFlags & IEM_XCPT_FLAGS_ERR)
5564 && u8Vector != X86_XCPT_PF
5565 && u8Vector != X86_XCPT_DF)
5566 {
5567 uErr |= X86_TRAP_ERR_EXTERNAL;
5568 }
5569 }
5570
5571 pVCpu->iem.s.cXcptRecursions++;
5572 pVCpu->iem.s.uCurXcpt = u8Vector;
5573 pVCpu->iem.s.fCurXcpt = fFlags;
5574 pVCpu->iem.s.uCurXcptErr = uErr;
5575 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5576
5577 /*
5578 * Extensive logging.
5579 */
5580#if defined(LOG_ENABLED) && defined(IN_RING3)
5581 if (LogIs3Enabled())
5582 {
5583 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5584 PVM pVM = pVCpu->CTX_SUFF(pVM);
5585 char szRegs[4096];
5586 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5587 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5588 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5589 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5590 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5591 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5592 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5593 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5594 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5595 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5596 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5597 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5598 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5599 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5600 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5601 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5602 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5603 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5604 " efer=%016VR{efer}\n"
5605 " pat=%016VR{pat}\n"
5606 " sf_mask=%016VR{sf_mask}\n"
5607 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5608 " lstar=%016VR{lstar}\n"
5609 " star=%016VR{star} cstar=%016VR{cstar}\n"
5610 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5611 );
5612
5613 char szInstr[256];
5614 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5615 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5616 szInstr, sizeof(szInstr), NULL);
5617 Log3(("%s%s\n", szRegs, szInstr));
5618 }
5619#endif /* LOG_ENABLED */
5620
5621 /*
5622 * Call the mode specific worker function.
5623 */
5624 VBOXSTRICTRC rcStrict;
5625 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5626 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5627 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5628 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5629 else
5630 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5631
5632 /* Flush the prefetch buffer. */
5633#ifdef IEM_WITH_CODE_TLB
5634 pVCpu->iem.s.pbInstrBuf = NULL;
5635#else
5636 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5637#endif
5638
5639 /*
5640 * Unwind.
5641 */
5642 pVCpu->iem.s.cXcptRecursions--;
5643 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5644 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5645 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5646 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5647 pVCpu->iem.s.cXcptRecursions + 1));
5648 return rcStrict;
5649}
5650
5651#ifdef IEM_WITH_SETJMP
5652/**
5653 * See iemRaiseXcptOrInt. Will not return.
5654 */
5655IEM_STATIC DECL_NO_RETURN(void)
5656iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
5657 uint8_t cbInstr,
5658 uint8_t u8Vector,
5659 uint32_t fFlags,
5660 uint16_t uErr,
5661 uint64_t uCr2)
5662{
5663 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5664 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5665}
5666#endif
5667
5668
5669/** \#DE - 00. */
5670DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
5671{
5672 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5673}
5674
5675
5676/** \#DB - 01.
5677 * @note This automatically clear DR7.GD. */
5678DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
5679{
5680 /** @todo set/clear RF. */
5681 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5682 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5683}
5684
5685
5686/** \#BR - 05. */
5687DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
5688{
5689 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5690}
5691
5692
5693/** \#UD - 06. */
5694DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
5695{
5696 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5697}
5698
5699
5700/** \#NM - 07. */
5701DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
5702{
5703 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5704}
5705
5706
5707/** \#TS(err) - 0a. */
5708DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5709{
5710 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5711}
5712
5713
5714/** \#TS(tr) - 0a. */
5715DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
5716{
5717 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5718 pVCpu->cpum.GstCtx.tr.Sel, 0);
5719}
5720
5721
5722/** \#TS(0) - 0a. */
5723DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
5724{
5725 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5726 0, 0);
5727}
5728
5729
5730/** \#TS(err) - 0a. */
5731DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5732{
5733 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5734 uSel & X86_SEL_MASK_OFF_RPL, 0);
5735}
5736
5737
5738/** \#NP(err) - 0b. */
5739DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5740{
5741 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5742}
5743
5744
5745/** \#NP(sel) - 0b. */
5746DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5747{
5748 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5749 uSel & ~X86_SEL_RPL, 0);
5750}
5751
5752
5753/** \#SS(seg) - 0c. */
5754DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5755{
5756 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5757 uSel & ~X86_SEL_RPL, 0);
5758}
5759
5760
5761/** \#SS(err) - 0c. */
5762DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5763{
5764 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5765}
5766
5767
5768/** \#GP(n) - 0d. */
5769DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
5770{
5771 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5772}
5773
5774
5775/** \#GP(0) - 0d. */
5776DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
5777{
5778 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5779}
5780
5781#ifdef IEM_WITH_SETJMP
5782/** \#GP(0) - 0d. */
5783DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
5784{
5785 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5786}
5787#endif
5788
5789
5790/** \#GP(sel) - 0d. */
5791DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5792{
5793 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5794 Sel & ~X86_SEL_RPL, 0);
5795}
5796
5797
5798/** \#GP(0) - 0d. */
5799DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
5800{
5801 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5802}
5803
5804
5805/** \#GP(sel) - 0d. */
5806DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5807{
5808 NOREF(iSegReg); NOREF(fAccess);
5809 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5810 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5811}
5812
5813#ifdef IEM_WITH_SETJMP
5814/** \#GP(sel) - 0d, longjmp. */
5815DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5816{
5817 NOREF(iSegReg); NOREF(fAccess);
5818 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5819 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5820}
5821#endif
5822
5823/** \#GP(sel) - 0d. */
5824DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5825{
5826 NOREF(Sel);
5827 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5828}
5829
5830#ifdef IEM_WITH_SETJMP
5831/** \#GP(sel) - 0d, longjmp. */
5832DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
5833{
5834 NOREF(Sel);
5835 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5836}
5837#endif
5838
5839
5840/** \#GP(sel) - 0d. */
5841DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5842{
5843 NOREF(iSegReg); NOREF(fAccess);
5844 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5845}
5846
5847#ifdef IEM_WITH_SETJMP
5848/** \#GP(sel) - 0d, longjmp. */
5849DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
5850 uint32_t fAccess)
5851{
5852 NOREF(iSegReg); NOREF(fAccess);
5853 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5854}
5855#endif
5856
5857
5858/** \#PF(n) - 0e. */
5859DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5860{
5861 uint16_t uErr;
5862 switch (rc)
5863 {
5864 case VERR_PAGE_NOT_PRESENT:
5865 case VERR_PAGE_TABLE_NOT_PRESENT:
5866 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5867 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5868 uErr = 0;
5869 break;
5870
5871 default:
5872 AssertMsgFailed(("%Rrc\n", rc));
5873 RT_FALL_THRU();
5874 case VERR_ACCESS_DENIED:
5875 uErr = X86_TRAP_PF_P;
5876 break;
5877
5878 /** @todo reserved */
5879 }
5880
5881 if (pVCpu->iem.s.uCpl == 3)
5882 uErr |= X86_TRAP_PF_US;
5883
5884 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5885 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5886 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5887 uErr |= X86_TRAP_PF_ID;
5888
5889#if 0 /* This is so much non-sense, really. Why was it done like that? */
5890 /* Note! RW access callers reporting a WRITE protection fault, will clear
5891 the READ flag before calling. So, read-modify-write accesses (RW)
5892 can safely be reported as READ faults. */
5893 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5894 uErr |= X86_TRAP_PF_RW;
5895#else
5896 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5897 {
5898 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5899 uErr |= X86_TRAP_PF_RW;
5900 }
5901#endif
5902
5903 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5904 uErr, GCPtrWhere);
5905}
5906
5907#ifdef IEM_WITH_SETJMP
5908/** \#PF(n) - 0e, longjmp. */
5909IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5910{
5911 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5912}
5913#endif
5914
5915
5916/** \#MF(0) - 10. */
5917DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
5918{
5919 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5920}
5921
5922
5923/** \#AC(0) - 11. */
5924DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
5925{
5926 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5927}
5928
5929
5930/**
5931 * Macro for calling iemCImplRaiseDivideError().
5932 *
5933 * This enables us to add/remove arguments and force different levels of
5934 * inlining as we wish.
5935 *
5936 * @return Strict VBox status code.
5937 */
5938#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5939IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5940{
5941 NOREF(cbInstr);
5942 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5943}
5944
5945
5946/**
5947 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5948 *
5949 * This enables us to add/remove arguments and force different levels of
5950 * inlining as we wish.
5951 *
5952 * @return Strict VBox status code.
5953 */
5954#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5955IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5956{
5957 NOREF(cbInstr);
5958 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5959}
5960
5961
5962/**
5963 * Macro for calling iemCImplRaiseInvalidOpcode().
5964 *
5965 * This enables us to add/remove arguments and force different levels of
5966 * inlining as we wish.
5967 *
5968 * @return Strict VBox status code.
5969 */
5970#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5971IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5972{
5973 NOREF(cbInstr);
5974 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5975}
5976
5977
5978/** @} */
5979
5980
5981/*
5982 *
5983 * Helpers routines.
5984 * Helpers routines.
5985 * Helpers routines.
5986 *
5987 */
5988
5989/**
5990 * Recalculates the effective operand size.
5991 *
5992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5993 */
5994IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
5995{
5996 switch (pVCpu->iem.s.enmCpuMode)
5997 {
5998 case IEMMODE_16BIT:
5999 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6000 break;
6001 case IEMMODE_32BIT:
6002 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6003 break;
6004 case IEMMODE_64BIT:
6005 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6006 {
6007 case 0:
6008 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6009 break;
6010 case IEM_OP_PRF_SIZE_OP:
6011 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6012 break;
6013 case IEM_OP_PRF_SIZE_REX_W:
6014 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6015 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6016 break;
6017 }
6018 break;
6019 default:
6020 AssertFailed();
6021 }
6022}
6023
6024
6025/**
6026 * Sets the default operand size to 64-bit and recalculates the effective
6027 * operand size.
6028 *
6029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6030 */
6031IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6032{
6033 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6034 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6035 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6036 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6037 else
6038 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6039}
6040
6041
6042/*
6043 *
6044 * Common opcode decoders.
6045 * Common opcode decoders.
6046 * Common opcode decoders.
6047 *
6048 */
6049//#include <iprt/mem.h>
6050
6051/**
6052 * Used to add extra details about a stub case.
6053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6054 */
6055IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6056{
6057#if defined(LOG_ENABLED) && defined(IN_RING3)
6058 PVM pVM = pVCpu->CTX_SUFF(pVM);
6059 char szRegs[4096];
6060 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6061 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6062 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6063 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6064 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6065 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6066 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6067 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6068 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6069 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6070 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6071 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6072 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6073 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6074 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6075 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6076 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6077 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6078 " efer=%016VR{efer}\n"
6079 " pat=%016VR{pat}\n"
6080 " sf_mask=%016VR{sf_mask}\n"
6081 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6082 " lstar=%016VR{lstar}\n"
6083 " star=%016VR{star} cstar=%016VR{cstar}\n"
6084 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6085 );
6086
6087 char szInstr[256];
6088 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6089 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6090 szInstr, sizeof(szInstr), NULL);
6091
6092 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6093#else
6094 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6095#endif
6096}
6097
6098/**
6099 * Complains about a stub.
6100 *
6101 * Providing two versions of this macro, one for daily use and one for use when
6102 * working on IEM.
6103 */
6104#if 0
6105# define IEMOP_BITCH_ABOUT_STUB() \
6106 do { \
6107 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6108 iemOpStubMsg2(pVCpu); \
6109 RTAssertPanic(); \
6110 } while (0)
6111#else
6112# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6113#endif
6114
6115/** Stubs an opcode. */
6116#define FNIEMOP_STUB(a_Name) \
6117 FNIEMOP_DEF(a_Name) \
6118 { \
6119 RT_NOREF_PV(pVCpu); \
6120 IEMOP_BITCH_ABOUT_STUB(); \
6121 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6122 } \
6123 typedef int ignore_semicolon
6124
6125/** Stubs an opcode. */
6126#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6127 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6128 { \
6129 RT_NOREF_PV(pVCpu); \
6130 RT_NOREF_PV(a_Name0); \
6131 IEMOP_BITCH_ABOUT_STUB(); \
6132 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6133 } \
6134 typedef int ignore_semicolon
6135
6136/** Stubs an opcode which currently should raise \#UD. */
6137#define FNIEMOP_UD_STUB(a_Name) \
6138 FNIEMOP_DEF(a_Name) \
6139 { \
6140 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6141 return IEMOP_RAISE_INVALID_OPCODE(); \
6142 } \
6143 typedef int ignore_semicolon
6144
6145/** Stubs an opcode which currently should raise \#UD. */
6146#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6147 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6148 { \
6149 RT_NOREF_PV(pVCpu); \
6150 RT_NOREF_PV(a_Name0); \
6151 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6152 return IEMOP_RAISE_INVALID_OPCODE(); \
6153 } \
6154 typedef int ignore_semicolon
6155
6156
6157
6158/** @name Register Access.
6159 * @{
6160 */
6161
6162/**
6163 * Gets a reference (pointer) to the specified hidden segment register.
6164 *
6165 * @returns Hidden register reference.
6166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6167 * @param iSegReg The segment register.
6168 */
6169IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6170{
6171 Assert(iSegReg < X86_SREG_COUNT);
6172 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6173 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6174
6175 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6176 return pSReg;
6177}
6178
6179
6180/**
6181 * Ensures that the given hidden segment register is up to date.
6182 *
6183 * @returns Hidden register reference.
6184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6185 * @param pSReg The segment register.
6186 */
6187IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6188{
6189 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6190 NOREF(pVCpu);
6191 return pSReg;
6192}
6193
6194
6195/**
6196 * Gets a reference (pointer) to the specified segment register (the selector
6197 * value).
6198 *
6199 * @returns Pointer to the selector variable.
6200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6201 * @param iSegReg The segment register.
6202 */
6203DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6204{
6205 Assert(iSegReg < X86_SREG_COUNT);
6206 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6207 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6208}
6209
6210
6211/**
6212 * Fetches the selector value of a segment register.
6213 *
6214 * @returns The selector value.
6215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6216 * @param iSegReg The segment register.
6217 */
6218DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6219{
6220 Assert(iSegReg < X86_SREG_COUNT);
6221 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6222 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6223}
6224
6225
6226/**
6227 * Fetches the base address value of a segment register.
6228 *
6229 * @returns The selector value.
6230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6231 * @param iSegReg The segment register.
6232 */
6233DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6234{
6235 Assert(iSegReg < X86_SREG_COUNT);
6236 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6237 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6238}
6239
6240
6241/**
6242 * Gets a reference (pointer) to the specified general purpose register.
6243 *
6244 * @returns Register reference.
6245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6246 * @param iReg The general purpose register.
6247 */
6248DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6249{
6250 Assert(iReg < 16);
6251 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6252}
6253
6254
6255/**
6256 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6257 *
6258 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6259 *
6260 * @returns Register reference.
6261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6262 * @param iReg The register.
6263 */
6264DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6265{
6266 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6267 {
6268 Assert(iReg < 16);
6269 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6270 }
6271 /* high 8-bit register. */
6272 Assert(iReg < 8);
6273 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6274}
6275
6276
6277/**
6278 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6279 *
6280 * @returns Register reference.
6281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6282 * @param iReg The register.
6283 */
6284DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6285{
6286 Assert(iReg < 16);
6287 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6288}
6289
6290
6291/**
6292 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6293 *
6294 * @returns Register reference.
6295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6296 * @param iReg The register.
6297 */
6298DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6299{
6300 Assert(iReg < 16);
6301 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6302}
6303
6304
6305/**
6306 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6307 *
6308 * @returns Register reference.
6309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6310 * @param iReg The register.
6311 */
6312DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6313{
6314 Assert(iReg < 64);
6315 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6316}
6317
6318
6319/**
6320 * Gets a reference (pointer) to the specified segment register's base address.
6321 *
6322 * @returns Segment register base address reference.
6323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6324 * @param iSegReg The segment selector.
6325 */
6326DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6327{
6328 Assert(iSegReg < X86_SREG_COUNT);
6329 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6330 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6331}
6332
6333
6334/**
6335 * Fetches the value of a 8-bit general purpose register.
6336 *
6337 * @returns The register value.
6338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6339 * @param iReg The register.
6340 */
6341DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6342{
6343 return *iemGRegRefU8(pVCpu, iReg);
6344}
6345
6346
6347/**
6348 * Fetches the value of a 16-bit general purpose register.
6349 *
6350 * @returns The register value.
6351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6352 * @param iReg The register.
6353 */
6354DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6355{
6356 Assert(iReg < 16);
6357 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6358}
6359
6360
6361/**
6362 * Fetches the value of a 32-bit general purpose register.
6363 *
6364 * @returns The register value.
6365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6366 * @param iReg The register.
6367 */
6368DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6369{
6370 Assert(iReg < 16);
6371 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6372}
6373
6374
6375/**
6376 * Fetches the value of a 64-bit general purpose register.
6377 *
6378 * @returns The register value.
6379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6380 * @param iReg The register.
6381 */
6382DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6383{
6384 Assert(iReg < 16);
6385 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6386}
6387
6388
6389/**
6390 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6391 *
6392 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6393 * segment limit.
6394 *
6395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6396 * @param offNextInstr The offset of the next instruction.
6397 */
6398IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6399{
6400 switch (pVCpu->iem.s.enmEffOpSize)
6401 {
6402 case IEMMODE_16BIT:
6403 {
6404 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6405 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6406 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6407 return iemRaiseGeneralProtectionFault0(pVCpu);
6408 pVCpu->cpum.GstCtx.rip = uNewIp;
6409 break;
6410 }
6411
6412 case IEMMODE_32BIT:
6413 {
6414 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6415 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6416
6417 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6418 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6419 return iemRaiseGeneralProtectionFault0(pVCpu);
6420 pVCpu->cpum.GstCtx.rip = uNewEip;
6421 break;
6422 }
6423
6424 case IEMMODE_64BIT:
6425 {
6426 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6427
6428 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6429 if (!IEM_IS_CANONICAL(uNewRip))
6430 return iemRaiseGeneralProtectionFault0(pVCpu);
6431 pVCpu->cpum.GstCtx.rip = uNewRip;
6432 break;
6433 }
6434
6435 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6436 }
6437
6438 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6439
6440#ifndef IEM_WITH_CODE_TLB
6441 /* Flush the prefetch buffer. */
6442 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6443#endif
6444
6445 return VINF_SUCCESS;
6446}
6447
6448
6449/**
6450 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6451 *
6452 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6453 * segment limit.
6454 *
6455 * @returns Strict VBox status code.
6456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6457 * @param offNextInstr The offset of the next instruction.
6458 */
6459IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
6460{
6461 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6462
6463 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6464 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6465 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6466 return iemRaiseGeneralProtectionFault0(pVCpu);
6467 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6468 pVCpu->cpum.GstCtx.rip = uNewIp;
6469 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6470
6471#ifndef IEM_WITH_CODE_TLB
6472 /* Flush the prefetch buffer. */
6473 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6474#endif
6475
6476 return VINF_SUCCESS;
6477}
6478
6479
6480/**
6481 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6482 *
6483 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6484 * segment limit.
6485 *
6486 * @returns Strict VBox status code.
6487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6488 * @param offNextInstr The offset of the next instruction.
6489 */
6490IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
6491{
6492 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6493
6494 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6495 {
6496 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6497
6498 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6499 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6500 return iemRaiseGeneralProtectionFault0(pVCpu);
6501 pVCpu->cpum.GstCtx.rip = uNewEip;
6502 }
6503 else
6504 {
6505 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6506
6507 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6508 if (!IEM_IS_CANONICAL(uNewRip))
6509 return iemRaiseGeneralProtectionFault0(pVCpu);
6510 pVCpu->cpum.GstCtx.rip = uNewRip;
6511 }
6512 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6513
6514#ifndef IEM_WITH_CODE_TLB
6515 /* Flush the prefetch buffer. */
6516 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6517#endif
6518
6519 return VINF_SUCCESS;
6520}
6521
6522
6523/**
6524 * Performs a near jump to the specified address.
6525 *
6526 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6527 * segment limit.
6528 *
6529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6530 * @param uNewRip The new RIP value.
6531 */
6532IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
6533{
6534 switch (pVCpu->iem.s.enmEffOpSize)
6535 {
6536 case IEMMODE_16BIT:
6537 {
6538 Assert(uNewRip <= UINT16_MAX);
6539 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6540 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6541 return iemRaiseGeneralProtectionFault0(pVCpu);
6542 /** @todo Test 16-bit jump in 64-bit mode. */
6543 pVCpu->cpum.GstCtx.rip = uNewRip;
6544 break;
6545 }
6546
6547 case IEMMODE_32BIT:
6548 {
6549 Assert(uNewRip <= UINT32_MAX);
6550 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6551 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6552
6553 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6554 return iemRaiseGeneralProtectionFault0(pVCpu);
6555 pVCpu->cpum.GstCtx.rip = uNewRip;
6556 break;
6557 }
6558
6559 case IEMMODE_64BIT:
6560 {
6561 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6562
6563 if (!IEM_IS_CANONICAL(uNewRip))
6564 return iemRaiseGeneralProtectionFault0(pVCpu);
6565 pVCpu->cpum.GstCtx.rip = uNewRip;
6566 break;
6567 }
6568
6569 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6570 }
6571
6572 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6573
6574#ifndef IEM_WITH_CODE_TLB
6575 /* Flush the prefetch buffer. */
6576 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6577#endif
6578
6579 return VINF_SUCCESS;
6580}
6581
6582
6583/**
6584 * Get the address of the top of the stack.
6585 *
6586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6587 */
6588DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6589{
6590 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6591 return pVCpu->cpum.GstCtx.rsp;
6592 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6593 return pVCpu->cpum.GstCtx.esp;
6594 return pVCpu->cpum.GstCtx.sp;
6595}
6596
6597
6598/**
6599 * Updates the RIP/EIP/IP to point to the next instruction.
6600 *
6601 * This function leaves the EFLAGS.RF flag alone.
6602 *
6603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6604 * @param cbInstr The number of bytes to add.
6605 */
6606IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6607{
6608 switch (pVCpu->iem.s.enmCpuMode)
6609 {
6610 case IEMMODE_16BIT:
6611 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6612 pVCpu->cpum.GstCtx.eip += cbInstr;
6613 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6614 break;
6615
6616 case IEMMODE_32BIT:
6617 pVCpu->cpum.GstCtx.eip += cbInstr;
6618 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6619 break;
6620
6621 case IEMMODE_64BIT:
6622 pVCpu->cpum.GstCtx.rip += cbInstr;
6623 break;
6624 default: AssertFailed();
6625 }
6626}
6627
6628
6629#if 0
6630/**
6631 * Updates the RIP/EIP/IP to point to the next instruction.
6632 *
6633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6634 */
6635IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
6636{
6637 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6638}
6639#endif
6640
6641
6642
6643/**
6644 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6645 *
6646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6647 * @param cbInstr The number of bytes to add.
6648 */
6649IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6650{
6651 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6652
6653 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6654#if ARCH_BITS >= 64
6655 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6656 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6657 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6658#else
6659 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6660 pVCpu->cpum.GstCtx.rip += cbInstr;
6661 else
6662 pVCpu->cpum.GstCtx.eip += cbInstr;
6663#endif
6664}
6665
6666
6667/**
6668 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6669 *
6670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6671 */
6672IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
6673{
6674 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6675}
6676
6677
6678/**
6679 * Adds to the stack pointer.
6680 *
6681 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6682 * @param cbToAdd The number of bytes to add (8-bit!).
6683 */
6684DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
6685{
6686 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6687 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6688 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6689 pVCpu->cpum.GstCtx.esp += cbToAdd;
6690 else
6691 pVCpu->cpum.GstCtx.sp += cbToAdd;
6692}
6693
6694
6695/**
6696 * Subtracts from the stack pointer.
6697 *
6698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6699 * @param cbToSub The number of bytes to subtract (8-bit!).
6700 */
6701DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
6702{
6703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6704 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6705 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6706 pVCpu->cpum.GstCtx.esp -= cbToSub;
6707 else
6708 pVCpu->cpum.GstCtx.sp -= cbToSub;
6709}
6710
6711
6712/**
6713 * Adds to the temporary stack pointer.
6714 *
6715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6716 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6717 * @param cbToAdd The number of bytes to add (16-bit).
6718 */
6719DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6720{
6721 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6722 pTmpRsp->u += cbToAdd;
6723 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6724 pTmpRsp->DWords.dw0 += cbToAdd;
6725 else
6726 pTmpRsp->Words.w0 += cbToAdd;
6727}
6728
6729
6730/**
6731 * Subtracts from the temporary stack pointer.
6732 *
6733 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6734 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6735 * @param cbToSub The number of bytes to subtract.
6736 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6737 * expecting that.
6738 */
6739DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6740{
6741 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6742 pTmpRsp->u -= cbToSub;
6743 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6744 pTmpRsp->DWords.dw0 -= cbToSub;
6745 else
6746 pTmpRsp->Words.w0 -= cbToSub;
6747}
6748
6749
6750/**
6751 * Calculates the effective stack address for a push of the specified size as
6752 * well as the new RSP value (upper bits may be masked).
6753 *
6754 * @returns Effective stack addressf for the push.
6755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6756 * @param cbItem The size of the stack item to pop.
6757 * @param puNewRsp Where to return the new RSP value.
6758 */
6759DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6760{
6761 RTUINT64U uTmpRsp;
6762 RTGCPTR GCPtrTop;
6763 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6764
6765 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6766 GCPtrTop = uTmpRsp.u -= cbItem;
6767 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6768 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6769 else
6770 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6771 *puNewRsp = uTmpRsp.u;
6772 return GCPtrTop;
6773}
6774
6775
6776/**
6777 * Gets the current stack pointer and calculates the value after a pop of the
6778 * specified size.
6779 *
6780 * @returns Current stack pointer.
6781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6782 * @param cbItem The size of the stack item to pop.
6783 * @param puNewRsp Where to return the new RSP value.
6784 */
6785DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6786{
6787 RTUINT64U uTmpRsp;
6788 RTGCPTR GCPtrTop;
6789 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6790
6791 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6792 {
6793 GCPtrTop = uTmpRsp.u;
6794 uTmpRsp.u += cbItem;
6795 }
6796 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6797 {
6798 GCPtrTop = uTmpRsp.DWords.dw0;
6799 uTmpRsp.DWords.dw0 += cbItem;
6800 }
6801 else
6802 {
6803 GCPtrTop = uTmpRsp.Words.w0;
6804 uTmpRsp.Words.w0 += cbItem;
6805 }
6806 *puNewRsp = uTmpRsp.u;
6807 return GCPtrTop;
6808}
6809
6810
6811/**
6812 * Calculates the effective stack address for a push of the specified size as
6813 * well as the new temporary RSP value (upper bits may be masked).
6814 *
6815 * @returns Effective stack addressf for the push.
6816 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6817 * @param pTmpRsp The temporary stack pointer. This is updated.
6818 * @param cbItem The size of the stack item to pop.
6819 */
6820DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6821{
6822 RTGCPTR GCPtrTop;
6823
6824 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6825 GCPtrTop = pTmpRsp->u -= cbItem;
6826 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6827 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6828 else
6829 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6830 return GCPtrTop;
6831}
6832
6833
6834/**
6835 * Gets the effective stack address for a pop of the specified size and
6836 * calculates and updates the temporary RSP.
6837 *
6838 * @returns Current stack pointer.
6839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6840 * @param pTmpRsp The temporary stack pointer. This is updated.
6841 * @param cbItem The size of the stack item to pop.
6842 */
6843DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6844{
6845 RTGCPTR GCPtrTop;
6846 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6847 {
6848 GCPtrTop = pTmpRsp->u;
6849 pTmpRsp->u += cbItem;
6850 }
6851 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6852 {
6853 GCPtrTop = pTmpRsp->DWords.dw0;
6854 pTmpRsp->DWords.dw0 += cbItem;
6855 }
6856 else
6857 {
6858 GCPtrTop = pTmpRsp->Words.w0;
6859 pTmpRsp->Words.w0 += cbItem;
6860 }
6861 return GCPtrTop;
6862}
6863
6864/** @} */
6865
6866
6867/** @name FPU access and helpers.
6868 *
6869 * @{
6870 */
6871
6872
6873/**
6874 * Hook for preparing to use the host FPU.
6875 *
6876 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6877 *
6878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6879 */
6880DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
6881{
6882#ifdef IN_RING3
6883 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6884#else
6885 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6886#endif
6887 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6888}
6889
6890
6891/**
6892 * Hook for preparing to use the host FPU for SSE.
6893 *
6894 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6895 *
6896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6897 */
6898DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
6899{
6900 iemFpuPrepareUsage(pVCpu);
6901}
6902
6903
6904/**
6905 * Hook for preparing to use the host FPU for AVX.
6906 *
6907 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6908 *
6909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6910 */
6911DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
6912{
6913 iemFpuPrepareUsage(pVCpu);
6914}
6915
6916
6917/**
6918 * Hook for actualizing the guest FPU state before the interpreter reads it.
6919 *
6920 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6921 *
6922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6923 */
6924DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
6925{
6926#ifdef IN_RING3
6927 NOREF(pVCpu);
6928#else
6929 CPUMRZFpuStateActualizeForRead(pVCpu);
6930#endif
6931 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6932}
6933
6934
6935/**
6936 * Hook for actualizing the guest FPU state before the interpreter changes it.
6937 *
6938 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6939 *
6940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6941 */
6942DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
6943{
6944#ifdef IN_RING3
6945 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6946#else
6947 CPUMRZFpuStateActualizeForChange(pVCpu);
6948#endif
6949 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6950}
6951
6952
6953/**
6954 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6955 * only.
6956 *
6957 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6958 *
6959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6960 */
6961DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
6962{
6963#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6964 NOREF(pVCpu);
6965#else
6966 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6967#endif
6968 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6969}
6970
6971
6972/**
6973 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6974 * read+write.
6975 *
6976 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6977 *
6978 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6979 */
6980DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
6981{
6982#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6983 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6984#else
6985 CPUMRZFpuStateActualizeForChange(pVCpu);
6986#endif
6987 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6988}
6989
6990
6991/**
6992 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6993 * only.
6994 *
6995 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6996 *
6997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6998 */
6999DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7000{
7001#ifdef IN_RING3
7002 NOREF(pVCpu);
7003#else
7004 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7005#endif
7006 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7007}
7008
7009
7010/**
7011 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7012 * read+write.
7013 *
7014 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7015 *
7016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7017 */
7018DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7019{
7020#ifdef IN_RING3
7021 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7022#else
7023 CPUMRZFpuStateActualizeForChange(pVCpu);
7024#endif
7025 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7026}
7027
7028
7029/**
7030 * Stores a QNaN value into a FPU register.
7031 *
7032 * @param pReg Pointer to the register.
7033 */
7034DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7035{
7036 pReg->au32[0] = UINT32_C(0x00000000);
7037 pReg->au32[1] = UINT32_C(0xc0000000);
7038 pReg->au16[4] = UINT16_C(0xffff);
7039}
7040
7041
7042/**
7043 * Updates the FOP, FPU.CS and FPUIP registers.
7044 *
7045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7046 * @param pFpuCtx The FPU context.
7047 */
7048DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7049{
7050 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7051 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7052 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7053 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7054 {
7055 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7056 * happens in real mode here based on the fnsave and fnstenv images. */
7057 pFpuCtx->CS = 0;
7058 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7059 }
7060 else
7061 {
7062 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7063 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7064 }
7065}
7066
7067
7068/**
7069 * Updates the x87.DS and FPUDP registers.
7070 *
7071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7072 * @param pFpuCtx The FPU context.
7073 * @param iEffSeg The effective segment register.
7074 * @param GCPtrEff The effective address relative to @a iEffSeg.
7075 */
7076DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7077{
7078 RTSEL sel;
7079 switch (iEffSeg)
7080 {
7081 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7082 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7083 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7084 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7085 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7086 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7087 default:
7088 AssertMsgFailed(("%d\n", iEffSeg));
7089 sel = pVCpu->cpum.GstCtx.ds.Sel;
7090 }
7091 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7092 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7093 {
7094 pFpuCtx->DS = 0;
7095 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7096 }
7097 else
7098 {
7099 pFpuCtx->DS = sel;
7100 pFpuCtx->FPUDP = GCPtrEff;
7101 }
7102}
7103
7104
7105/**
7106 * Rotates the stack registers in the push direction.
7107 *
7108 * @param pFpuCtx The FPU context.
7109 * @remarks This is a complete waste of time, but fxsave stores the registers in
7110 * stack order.
7111 */
7112DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7113{
7114 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7115 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7116 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7117 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7118 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7119 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7120 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7121 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7122 pFpuCtx->aRegs[0].r80 = r80Tmp;
7123}
7124
7125
7126/**
7127 * Rotates the stack registers in the pop direction.
7128 *
7129 * @param pFpuCtx The FPU context.
7130 * @remarks This is a complete waste of time, but fxsave stores the registers in
7131 * stack order.
7132 */
7133DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7134{
7135 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7136 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7137 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7138 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7139 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7140 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7141 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7142 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7143 pFpuCtx->aRegs[7].r80 = r80Tmp;
7144}
7145
7146
7147/**
7148 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7149 * exception prevents it.
7150 *
7151 * @param pResult The FPU operation result to push.
7152 * @param pFpuCtx The FPU context.
7153 */
7154IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7155{
7156 /* Update FSW and bail if there are pending exceptions afterwards. */
7157 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7158 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7159 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7160 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7161 {
7162 pFpuCtx->FSW = fFsw;
7163 return;
7164 }
7165
7166 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7167 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7168 {
7169 /* All is fine, push the actual value. */
7170 pFpuCtx->FTW |= RT_BIT(iNewTop);
7171 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7172 }
7173 else if (pFpuCtx->FCW & X86_FCW_IM)
7174 {
7175 /* Masked stack overflow, push QNaN. */
7176 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7177 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7178 }
7179 else
7180 {
7181 /* Raise stack overflow, don't push anything. */
7182 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7183 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7184 return;
7185 }
7186
7187 fFsw &= ~X86_FSW_TOP_MASK;
7188 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7189 pFpuCtx->FSW = fFsw;
7190
7191 iemFpuRotateStackPush(pFpuCtx);
7192}
7193
7194
7195/**
7196 * Stores a result in a FPU register and updates the FSW and FTW.
7197 *
7198 * @param pFpuCtx The FPU context.
7199 * @param pResult The result to store.
7200 * @param iStReg Which FPU register to store it in.
7201 */
7202IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7203{
7204 Assert(iStReg < 8);
7205 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7206 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7207 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7208 pFpuCtx->FTW |= RT_BIT(iReg);
7209 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7210}
7211
7212
7213/**
7214 * Only updates the FPU status word (FSW) with the result of the current
7215 * instruction.
7216 *
7217 * @param pFpuCtx The FPU context.
7218 * @param u16FSW The FSW output of the current instruction.
7219 */
7220IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7221{
7222 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7223 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7224}
7225
7226
7227/**
7228 * Pops one item off the FPU stack if no pending exception prevents it.
7229 *
7230 * @param pFpuCtx The FPU context.
7231 */
7232IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7233{
7234 /* Check pending exceptions. */
7235 uint16_t uFSW = pFpuCtx->FSW;
7236 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7237 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7238 return;
7239
7240 /* TOP--. */
7241 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7242 uFSW &= ~X86_FSW_TOP_MASK;
7243 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7244 pFpuCtx->FSW = uFSW;
7245
7246 /* Mark the previous ST0 as empty. */
7247 iOldTop >>= X86_FSW_TOP_SHIFT;
7248 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7249
7250 /* Rotate the registers. */
7251 iemFpuRotateStackPop(pFpuCtx);
7252}
7253
7254
7255/**
7256 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7257 *
7258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7259 * @param pResult The FPU operation result to push.
7260 */
7261IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7262{
7263 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7264 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7265 iemFpuMaybePushResult(pResult, pFpuCtx);
7266}
7267
7268
7269/**
7270 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7271 * and sets FPUDP and FPUDS.
7272 *
7273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7274 * @param pResult The FPU operation result to push.
7275 * @param iEffSeg The effective segment register.
7276 * @param GCPtrEff The effective address relative to @a iEffSeg.
7277 */
7278IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7279{
7280 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7281 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7282 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7283 iemFpuMaybePushResult(pResult, pFpuCtx);
7284}
7285
7286
7287/**
7288 * Replace ST0 with the first value and push the second onto the FPU stack,
7289 * unless a pending exception prevents it.
7290 *
7291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7292 * @param pResult The FPU operation result to store and push.
7293 */
7294IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7295{
7296 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7297 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7298
7299 /* Update FSW and bail if there are pending exceptions afterwards. */
7300 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7301 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7302 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7303 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7304 {
7305 pFpuCtx->FSW = fFsw;
7306 return;
7307 }
7308
7309 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7310 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7311 {
7312 /* All is fine, push the actual value. */
7313 pFpuCtx->FTW |= RT_BIT(iNewTop);
7314 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7315 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7316 }
7317 else if (pFpuCtx->FCW & X86_FCW_IM)
7318 {
7319 /* Masked stack overflow, push QNaN. */
7320 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7321 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7322 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7323 }
7324 else
7325 {
7326 /* Raise stack overflow, don't push anything. */
7327 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7328 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7329 return;
7330 }
7331
7332 fFsw &= ~X86_FSW_TOP_MASK;
7333 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7334 pFpuCtx->FSW = fFsw;
7335
7336 iemFpuRotateStackPush(pFpuCtx);
7337}
7338
7339
7340/**
7341 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7342 * FOP.
7343 *
7344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7345 * @param pResult The result to store.
7346 * @param iStReg Which FPU register to store it in.
7347 */
7348IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7349{
7350 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7351 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7352 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7353}
7354
7355
7356/**
7357 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7358 * FOP, and then pops the stack.
7359 *
7360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7361 * @param pResult The result to store.
7362 * @param iStReg Which FPU register to store it in.
7363 */
7364IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7365{
7366 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7367 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7368 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7369 iemFpuMaybePopOne(pFpuCtx);
7370}
7371
7372
7373/**
7374 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7375 * FPUDP, and FPUDS.
7376 *
7377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7378 * @param pResult The result to store.
7379 * @param iStReg Which FPU register to store it in.
7380 * @param iEffSeg The effective memory operand selector register.
7381 * @param GCPtrEff The effective memory operand offset.
7382 */
7383IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7384 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7385{
7386 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7387 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7388 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7389 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7390}
7391
7392
7393/**
7394 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7395 * FPUDP, and FPUDS, and then pops the stack.
7396 *
7397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7398 * @param pResult The result to store.
7399 * @param iStReg Which FPU register to store it in.
7400 * @param iEffSeg The effective memory operand selector register.
7401 * @param GCPtrEff The effective memory operand offset.
7402 */
7403IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7404 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7405{
7406 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7407 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7408 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7409 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7410 iemFpuMaybePopOne(pFpuCtx);
7411}
7412
7413
7414/**
7415 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7416 *
7417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7418 */
7419IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7420{
7421 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7422 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7423}
7424
7425
7426/**
7427 * Marks the specified stack register as free (for FFREE).
7428 *
7429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7430 * @param iStReg The register to free.
7431 */
7432IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7433{
7434 Assert(iStReg < 8);
7435 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7436 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7437 pFpuCtx->FTW &= ~RT_BIT(iReg);
7438}
7439
7440
7441/**
7442 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7443 *
7444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7445 */
7446IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
7447{
7448 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7449 uint16_t uFsw = pFpuCtx->FSW;
7450 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7451 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7452 uFsw &= ~X86_FSW_TOP_MASK;
7453 uFsw |= uTop;
7454 pFpuCtx->FSW = uFsw;
7455}
7456
7457
7458/**
7459 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7460 *
7461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7462 */
7463IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
7464{
7465 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7466 uint16_t uFsw = pFpuCtx->FSW;
7467 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7468 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7469 uFsw &= ~X86_FSW_TOP_MASK;
7470 uFsw |= uTop;
7471 pFpuCtx->FSW = uFsw;
7472}
7473
7474
7475/**
7476 * Updates the FSW, FOP, FPUIP, and FPUCS.
7477 *
7478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7479 * @param u16FSW The FSW from the current instruction.
7480 */
7481IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
7482{
7483 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7484 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7485 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7486}
7487
7488
7489/**
7490 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7491 *
7492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7493 * @param u16FSW The FSW from the current instruction.
7494 */
7495IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7496{
7497 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7498 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7499 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7500 iemFpuMaybePopOne(pFpuCtx);
7501}
7502
7503
7504/**
7505 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7506 *
7507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7508 * @param u16FSW The FSW from the current instruction.
7509 * @param iEffSeg The effective memory operand selector register.
7510 * @param GCPtrEff The effective memory operand offset.
7511 */
7512IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7513{
7514 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7515 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7516 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7517 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7518}
7519
7520
7521/**
7522 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7523 *
7524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7525 * @param u16FSW The FSW from the current instruction.
7526 */
7527IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7528{
7529 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7530 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7531 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7532 iemFpuMaybePopOne(pFpuCtx);
7533 iemFpuMaybePopOne(pFpuCtx);
7534}
7535
7536
7537/**
7538 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7539 *
7540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7541 * @param u16FSW The FSW from the current instruction.
7542 * @param iEffSeg The effective memory operand selector register.
7543 * @param GCPtrEff The effective memory operand offset.
7544 */
7545IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7546{
7547 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7548 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7549 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7550 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7551 iemFpuMaybePopOne(pFpuCtx);
7552}
7553
7554
7555/**
7556 * Worker routine for raising an FPU stack underflow exception.
7557 *
7558 * @param pFpuCtx The FPU context.
7559 * @param iStReg The stack register being accessed.
7560 */
7561IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7562{
7563 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7564 if (pFpuCtx->FCW & X86_FCW_IM)
7565 {
7566 /* Masked underflow. */
7567 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7568 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7569 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7570 if (iStReg != UINT8_MAX)
7571 {
7572 pFpuCtx->FTW |= RT_BIT(iReg);
7573 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7574 }
7575 }
7576 else
7577 {
7578 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7579 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7580 }
7581}
7582
7583
7584/**
7585 * Raises a FPU stack underflow exception.
7586 *
7587 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7588 * @param iStReg The destination register that should be loaded
7589 * with QNaN if \#IS is not masked. Specify
7590 * UINT8_MAX if none (like for fcom).
7591 */
7592DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
7593{
7594 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7595 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7596 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7597}
7598
7599
7600DECL_NO_INLINE(IEM_STATIC, void)
7601iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7602{
7603 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7604 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7605 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7606 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7607}
7608
7609
7610DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
7611{
7612 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7613 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7614 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7615 iemFpuMaybePopOne(pFpuCtx);
7616}
7617
7618
7619DECL_NO_INLINE(IEM_STATIC, void)
7620iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7621{
7622 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7623 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7624 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7625 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7626 iemFpuMaybePopOne(pFpuCtx);
7627}
7628
7629
7630DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
7631{
7632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7633 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7634 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7635 iemFpuMaybePopOne(pFpuCtx);
7636 iemFpuMaybePopOne(pFpuCtx);
7637}
7638
7639
7640DECL_NO_INLINE(IEM_STATIC, void)
7641iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
7642{
7643 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7644 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7645
7646 if (pFpuCtx->FCW & X86_FCW_IM)
7647 {
7648 /* Masked overflow - Push QNaN. */
7649 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7650 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7651 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7652 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7653 pFpuCtx->FTW |= RT_BIT(iNewTop);
7654 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7655 iemFpuRotateStackPush(pFpuCtx);
7656 }
7657 else
7658 {
7659 /* Exception pending - don't change TOP or the register stack. */
7660 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7661 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7662 }
7663}
7664
7665
7666DECL_NO_INLINE(IEM_STATIC, void)
7667iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
7668{
7669 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7670 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7671
7672 if (pFpuCtx->FCW & X86_FCW_IM)
7673 {
7674 /* Masked overflow - Push QNaN. */
7675 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7676 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7677 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7678 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7679 pFpuCtx->FTW |= RT_BIT(iNewTop);
7680 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7681 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7682 iemFpuRotateStackPush(pFpuCtx);
7683 }
7684 else
7685 {
7686 /* Exception pending - don't change TOP or the register stack. */
7687 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7688 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7689 }
7690}
7691
7692
7693/**
7694 * Worker routine for raising an FPU stack overflow exception on a push.
7695 *
7696 * @param pFpuCtx The FPU context.
7697 */
7698IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7699{
7700 if (pFpuCtx->FCW & X86_FCW_IM)
7701 {
7702 /* Masked overflow. */
7703 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7704 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7705 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7706 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7707 pFpuCtx->FTW |= RT_BIT(iNewTop);
7708 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7709 iemFpuRotateStackPush(pFpuCtx);
7710 }
7711 else
7712 {
7713 /* Exception pending - don't change TOP or the register stack. */
7714 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7715 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7716 }
7717}
7718
7719
7720/**
7721 * Raises a FPU stack overflow exception on a push.
7722 *
7723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7724 */
7725DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
7726{
7727 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7728 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7729 iemFpuStackPushOverflowOnly(pFpuCtx);
7730}
7731
7732
7733/**
7734 * Raises a FPU stack overflow exception on a push with a memory operand.
7735 *
7736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7737 * @param iEffSeg The effective memory operand selector register.
7738 * @param GCPtrEff The effective memory operand offset.
7739 */
7740DECL_NO_INLINE(IEM_STATIC, void)
7741iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7742{
7743 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7744 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7745 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7746 iemFpuStackPushOverflowOnly(pFpuCtx);
7747}
7748
7749
7750IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
7751{
7752 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7753 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7754 if (pFpuCtx->FTW & RT_BIT(iReg))
7755 return VINF_SUCCESS;
7756 return VERR_NOT_FOUND;
7757}
7758
7759
7760IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7761{
7762 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7763 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7764 if (pFpuCtx->FTW & RT_BIT(iReg))
7765 {
7766 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7767 return VINF_SUCCESS;
7768 }
7769 return VERR_NOT_FOUND;
7770}
7771
7772
7773IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7774 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7775{
7776 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7777 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7778 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7779 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7780 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7781 {
7782 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7783 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7784 return VINF_SUCCESS;
7785 }
7786 return VERR_NOT_FOUND;
7787}
7788
7789
7790IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7791{
7792 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7793 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7794 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7795 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7796 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7797 {
7798 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7799 return VINF_SUCCESS;
7800 }
7801 return VERR_NOT_FOUND;
7802}
7803
7804
7805/**
7806 * Updates the FPU exception status after FCW is changed.
7807 *
7808 * @param pFpuCtx The FPU context.
7809 */
7810IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7811{
7812 uint16_t u16Fsw = pFpuCtx->FSW;
7813 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7814 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7815 else
7816 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7817 pFpuCtx->FSW = u16Fsw;
7818}
7819
7820
7821/**
7822 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7823 *
7824 * @returns The full FTW.
7825 * @param pFpuCtx The FPU context.
7826 */
7827IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7828{
7829 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7830 uint16_t u16Ftw = 0;
7831 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7832 for (unsigned iSt = 0; iSt < 8; iSt++)
7833 {
7834 unsigned const iReg = (iSt + iTop) & 7;
7835 if (!(u8Ftw & RT_BIT(iReg)))
7836 u16Ftw |= 3 << (iReg * 2); /* empty */
7837 else
7838 {
7839 uint16_t uTag;
7840 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7841 if (pr80Reg->s.uExponent == 0x7fff)
7842 uTag = 2; /* Exponent is all 1's => Special. */
7843 else if (pr80Reg->s.uExponent == 0x0000)
7844 {
7845 if (pr80Reg->s.u64Mantissa == 0x0000)
7846 uTag = 1; /* All bits are zero => Zero. */
7847 else
7848 uTag = 2; /* Must be special. */
7849 }
7850 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7851 uTag = 0; /* Valid. */
7852 else
7853 uTag = 2; /* Must be special. */
7854
7855 u16Ftw |= uTag << (iReg * 2); /* empty */
7856 }
7857 }
7858
7859 return u16Ftw;
7860}
7861
7862
7863/**
7864 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7865 *
7866 * @returns The compressed FTW.
7867 * @param u16FullFtw The full FTW to convert.
7868 */
7869IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7870{
7871 uint8_t u8Ftw = 0;
7872 for (unsigned i = 0; i < 8; i++)
7873 {
7874 if ((u16FullFtw & 3) != 3 /*empty*/)
7875 u8Ftw |= RT_BIT(i);
7876 u16FullFtw >>= 2;
7877 }
7878
7879 return u8Ftw;
7880}
7881
7882/** @} */
7883
7884
7885/** @name Memory access.
7886 *
7887 * @{
7888 */
7889
7890
7891/**
7892 * Updates the IEMCPU::cbWritten counter if applicable.
7893 *
7894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7895 * @param fAccess The access being accounted for.
7896 * @param cbMem The access size.
7897 */
7898DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
7899{
7900 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7901 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7902 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7903}
7904
7905
7906/**
7907 * Checks if the given segment can be written to, raise the appropriate
7908 * exception if not.
7909 *
7910 * @returns VBox strict status code.
7911 *
7912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7913 * @param pHid Pointer to the hidden register.
7914 * @param iSegReg The register number.
7915 * @param pu64BaseAddr Where to return the base address to use for the
7916 * segment. (In 64-bit code it may differ from the
7917 * base in the hidden segment.)
7918 */
7919IEM_STATIC VBOXSTRICTRC
7920iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7921{
7922 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7923
7924 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7925 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7926 else
7927 {
7928 if (!pHid->Attr.n.u1Present)
7929 {
7930 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7931 AssertRelease(uSel == 0);
7932 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7933 return iemRaiseGeneralProtectionFault0(pVCpu);
7934 }
7935
7936 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7937 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7938 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7939 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7940 *pu64BaseAddr = pHid->u64Base;
7941 }
7942 return VINF_SUCCESS;
7943}
7944
7945
7946/**
7947 * Checks if the given segment can be read from, raise the appropriate
7948 * exception if not.
7949 *
7950 * @returns VBox strict status code.
7951 *
7952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7953 * @param pHid Pointer to the hidden register.
7954 * @param iSegReg The register number.
7955 * @param pu64BaseAddr Where to return the base address to use for the
7956 * segment. (In 64-bit code it may differ from the
7957 * base in the hidden segment.)
7958 */
7959IEM_STATIC VBOXSTRICTRC
7960iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7961{
7962 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7963
7964 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7965 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7966 else
7967 {
7968 if (!pHid->Attr.n.u1Present)
7969 {
7970 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7971 AssertRelease(uSel == 0);
7972 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7973 return iemRaiseGeneralProtectionFault0(pVCpu);
7974 }
7975
7976 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7977 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7978 *pu64BaseAddr = pHid->u64Base;
7979 }
7980 return VINF_SUCCESS;
7981}
7982
7983
7984/**
7985 * Applies the segment limit, base and attributes.
7986 *
7987 * This may raise a \#GP or \#SS.
7988 *
7989 * @returns VBox strict status code.
7990 *
7991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7992 * @param fAccess The kind of access which is being performed.
7993 * @param iSegReg The index of the segment register to apply.
7994 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7995 * TSS, ++).
7996 * @param cbMem The access size.
7997 * @param pGCPtrMem Pointer to the guest memory address to apply
7998 * segmentation to. Input and output parameter.
7999 */
8000IEM_STATIC VBOXSTRICTRC
8001iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8002{
8003 if (iSegReg == UINT8_MAX)
8004 return VINF_SUCCESS;
8005
8006 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8007 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8008 switch (pVCpu->iem.s.enmCpuMode)
8009 {
8010 case IEMMODE_16BIT:
8011 case IEMMODE_32BIT:
8012 {
8013 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8014 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8015
8016 if ( pSel->Attr.n.u1Present
8017 && !pSel->Attr.n.u1Unusable)
8018 {
8019 Assert(pSel->Attr.n.u1DescType);
8020 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8021 {
8022 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8023 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8024 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8025
8026 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8027 {
8028 /** @todo CPL check. */
8029 }
8030
8031 /*
8032 * There are two kinds of data selectors, normal and expand down.
8033 */
8034 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8035 {
8036 if ( GCPtrFirst32 > pSel->u32Limit
8037 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8038 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8039 }
8040 else
8041 {
8042 /*
8043 * The upper boundary is defined by the B bit, not the G bit!
8044 */
8045 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8046 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8047 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8048 }
8049 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8050 }
8051 else
8052 {
8053
8054 /*
8055 * Code selector and usually be used to read thru, writing is
8056 * only permitted in real and V8086 mode.
8057 */
8058 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8059 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8060 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8061 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8062 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8063
8064 if ( GCPtrFirst32 > pSel->u32Limit
8065 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8066 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8067
8068 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8069 {
8070 /** @todo CPL check. */
8071 }
8072
8073 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8074 }
8075 }
8076 else
8077 return iemRaiseGeneralProtectionFault0(pVCpu);
8078 return VINF_SUCCESS;
8079 }
8080
8081 case IEMMODE_64BIT:
8082 {
8083 RTGCPTR GCPtrMem = *pGCPtrMem;
8084 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8085 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8086
8087 Assert(cbMem >= 1);
8088 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8089 return VINF_SUCCESS;
8090 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8091 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8092 return iemRaiseGeneralProtectionFault0(pVCpu);
8093 }
8094
8095 default:
8096 AssertFailedReturn(VERR_IEM_IPE_7);
8097 }
8098}
8099
8100
8101/**
8102 * Translates a virtual address to a physical physical address and checks if we
8103 * can access the page as specified.
8104 *
8105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8106 * @param GCPtrMem The virtual address.
8107 * @param fAccess The intended access.
8108 * @param pGCPhysMem Where to return the physical address.
8109 */
8110IEM_STATIC VBOXSTRICTRC
8111iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8112{
8113 /** @todo Need a different PGM interface here. We're currently using
8114 * generic / REM interfaces. this won't cut it for R0. */
8115 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8116 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8117 RTGCPHYS GCPhys;
8118 uint64_t fFlags;
8119 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8120 if (RT_FAILURE(rc))
8121 {
8122 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8123 /** @todo Check unassigned memory in unpaged mode. */
8124 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8125 *pGCPhysMem = NIL_RTGCPHYS;
8126 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8127 }
8128
8129 /* If the page is writable and does not have the no-exec bit set, all
8130 access is allowed. Otherwise we'll have to check more carefully... */
8131 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8132 {
8133 /* Write to read only memory? */
8134 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8135 && !(fFlags & X86_PTE_RW)
8136 && ( (pVCpu->iem.s.uCpl == 3
8137 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8138 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8139 {
8140 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8141 *pGCPhysMem = NIL_RTGCPHYS;
8142 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8143 }
8144
8145 /* Kernel memory accessed by userland? */
8146 if ( !(fFlags & X86_PTE_US)
8147 && pVCpu->iem.s.uCpl == 3
8148 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8149 {
8150 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8151 *pGCPhysMem = NIL_RTGCPHYS;
8152 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8153 }
8154
8155 /* Executing non-executable memory? */
8156 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8157 && (fFlags & X86_PTE_PAE_NX)
8158 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8159 {
8160 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8161 *pGCPhysMem = NIL_RTGCPHYS;
8162 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8163 VERR_ACCESS_DENIED);
8164 }
8165 }
8166
8167 /*
8168 * Set the dirty / access flags.
8169 * ASSUMES this is set when the address is translated rather than on committ...
8170 */
8171 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8172 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8173 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8174 {
8175 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8176 AssertRC(rc2);
8177 }
8178
8179 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8180 *pGCPhysMem = GCPhys;
8181 return VINF_SUCCESS;
8182}
8183
8184
8185
8186/**
8187 * Maps a physical page.
8188 *
8189 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8191 * @param GCPhysMem The physical address.
8192 * @param fAccess The intended access.
8193 * @param ppvMem Where to return the mapping address.
8194 * @param pLock The PGM lock.
8195 */
8196IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8197{
8198#ifdef IEM_LOG_MEMORY_WRITES
8199 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8200 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8201#endif
8202
8203 /** @todo This API may require some improving later. A private deal with PGM
8204 * regarding locking and unlocking needs to be struct. A couple of TLBs
8205 * living in PGM, but with publicly accessible inlined access methods
8206 * could perhaps be an even better solution. */
8207 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8208 GCPhysMem,
8209 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8210 pVCpu->iem.s.fBypassHandlers,
8211 ppvMem,
8212 pLock);
8213 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8214 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8215
8216 return rc;
8217}
8218
8219
8220/**
8221 * Unmap a page previously mapped by iemMemPageMap.
8222 *
8223 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8224 * @param GCPhysMem The physical address.
8225 * @param fAccess The intended access.
8226 * @param pvMem What iemMemPageMap returned.
8227 * @param pLock The PGM lock.
8228 */
8229DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8230{
8231 NOREF(pVCpu);
8232 NOREF(GCPhysMem);
8233 NOREF(fAccess);
8234 NOREF(pvMem);
8235 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8236}
8237
8238
8239/**
8240 * Looks up a memory mapping entry.
8241 *
8242 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8244 * @param pvMem The memory address.
8245 * @param fAccess The access to.
8246 */
8247DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8248{
8249 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8250 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8251 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8252 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8253 return 0;
8254 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8255 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8256 return 1;
8257 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8258 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8259 return 2;
8260 return VERR_NOT_FOUND;
8261}
8262
8263
8264/**
8265 * Finds a free memmap entry when using iNextMapping doesn't work.
8266 *
8267 * @returns Memory mapping index, 1024 on failure.
8268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8269 */
8270IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8271{
8272 /*
8273 * The easy case.
8274 */
8275 if (pVCpu->iem.s.cActiveMappings == 0)
8276 {
8277 pVCpu->iem.s.iNextMapping = 1;
8278 return 0;
8279 }
8280
8281 /* There should be enough mappings for all instructions. */
8282 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8283
8284 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8285 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8286 return i;
8287
8288 AssertFailedReturn(1024);
8289}
8290
8291
8292/**
8293 * Commits a bounce buffer that needs writing back and unmaps it.
8294 *
8295 * @returns Strict VBox status code.
8296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8297 * @param iMemMap The index of the buffer to commit.
8298 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8299 * Always false in ring-3, obviously.
8300 */
8301IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8302{
8303 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8304 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8305#ifdef IN_RING3
8306 Assert(!fPostponeFail);
8307 RT_NOREF_PV(fPostponeFail);
8308#endif
8309
8310 /*
8311 * Do the writing.
8312 */
8313 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8314 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8315 {
8316 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8317 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8318 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8319 if (!pVCpu->iem.s.fBypassHandlers)
8320 {
8321 /*
8322 * Carefully and efficiently dealing with access handler return
8323 * codes make this a little bloated.
8324 */
8325 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8326 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8327 pbBuf,
8328 cbFirst,
8329 PGMACCESSORIGIN_IEM);
8330 if (rcStrict == VINF_SUCCESS)
8331 {
8332 if (cbSecond)
8333 {
8334 rcStrict = PGMPhysWrite(pVM,
8335 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8336 pbBuf + cbFirst,
8337 cbSecond,
8338 PGMACCESSORIGIN_IEM);
8339 if (rcStrict == VINF_SUCCESS)
8340 { /* nothing */ }
8341 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8342 {
8343 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8344 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8345 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8346 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8347 }
8348#ifndef IN_RING3
8349 else if (fPostponeFail)
8350 {
8351 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8352 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8353 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8354 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8355 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8356 return iemSetPassUpStatus(pVCpu, rcStrict);
8357 }
8358#endif
8359 else
8360 {
8361 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8362 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8363 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8364 return rcStrict;
8365 }
8366 }
8367 }
8368 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8369 {
8370 if (!cbSecond)
8371 {
8372 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8373 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8374 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8375 }
8376 else
8377 {
8378 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8379 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8380 pbBuf + cbFirst,
8381 cbSecond,
8382 PGMACCESSORIGIN_IEM);
8383 if (rcStrict2 == VINF_SUCCESS)
8384 {
8385 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8386 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8387 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8388 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8389 }
8390 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8391 {
8392 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8393 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8394 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8395 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8396 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8397 }
8398#ifndef IN_RING3
8399 else if (fPostponeFail)
8400 {
8401 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8402 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8404 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8405 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8406 return iemSetPassUpStatus(pVCpu, rcStrict);
8407 }
8408#endif
8409 else
8410 {
8411 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8412 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8413 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8414 return rcStrict2;
8415 }
8416 }
8417 }
8418#ifndef IN_RING3
8419 else if (fPostponeFail)
8420 {
8421 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8422 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8423 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8424 if (!cbSecond)
8425 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8426 else
8427 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8428 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8429 return iemSetPassUpStatus(pVCpu, rcStrict);
8430 }
8431#endif
8432 else
8433 {
8434 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8435 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8437 return rcStrict;
8438 }
8439 }
8440 else
8441 {
8442 /*
8443 * No access handlers, much simpler.
8444 */
8445 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8446 if (RT_SUCCESS(rc))
8447 {
8448 if (cbSecond)
8449 {
8450 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8451 if (RT_SUCCESS(rc))
8452 { /* likely */ }
8453 else
8454 {
8455 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8456 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8457 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8458 return rc;
8459 }
8460 }
8461 }
8462 else
8463 {
8464 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8465 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8466 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8467 return rc;
8468 }
8469 }
8470 }
8471
8472#if defined(IEM_LOG_MEMORY_WRITES)
8473 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8474 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8475 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8476 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8477 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8478 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8479
8480 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8481 g_cbIemWrote = cbWrote;
8482 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8483#endif
8484
8485 /*
8486 * Free the mapping entry.
8487 */
8488 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8489 Assert(pVCpu->iem.s.cActiveMappings != 0);
8490 pVCpu->iem.s.cActiveMappings--;
8491 return VINF_SUCCESS;
8492}
8493
8494
8495/**
8496 * iemMemMap worker that deals with a request crossing pages.
8497 */
8498IEM_STATIC VBOXSTRICTRC
8499iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8500{
8501 /*
8502 * Do the address translations.
8503 */
8504 RTGCPHYS GCPhysFirst;
8505 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8506 if (rcStrict != VINF_SUCCESS)
8507 return rcStrict;
8508
8509 RTGCPHYS GCPhysSecond;
8510 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8511 fAccess, &GCPhysSecond);
8512 if (rcStrict != VINF_SUCCESS)
8513 return rcStrict;
8514 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8515
8516 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8517
8518 /*
8519 * Read in the current memory content if it's a read, execute or partial
8520 * write access.
8521 */
8522 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8523 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8524 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8525
8526 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8527 {
8528 if (!pVCpu->iem.s.fBypassHandlers)
8529 {
8530 /*
8531 * Must carefully deal with access handler status codes here,
8532 * makes the code a bit bloated.
8533 */
8534 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8535 if (rcStrict == VINF_SUCCESS)
8536 {
8537 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8538 if (rcStrict == VINF_SUCCESS)
8539 { /*likely */ }
8540 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8541 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8542 else
8543 {
8544 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8545 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8546 return rcStrict;
8547 }
8548 }
8549 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8550 {
8551 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8552 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8553 {
8554 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8555 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8556 }
8557 else
8558 {
8559 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8560 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8561 return rcStrict2;
8562 }
8563 }
8564 else
8565 {
8566 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8567 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8568 return rcStrict;
8569 }
8570 }
8571 else
8572 {
8573 /*
8574 * No informational status codes here, much more straight forward.
8575 */
8576 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8577 if (RT_SUCCESS(rc))
8578 {
8579 Assert(rc == VINF_SUCCESS);
8580 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8581 if (RT_SUCCESS(rc))
8582 Assert(rc == VINF_SUCCESS);
8583 else
8584 {
8585 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8586 return rc;
8587 }
8588 }
8589 else
8590 {
8591 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8592 return rc;
8593 }
8594 }
8595 }
8596#ifdef VBOX_STRICT
8597 else
8598 memset(pbBuf, 0xcc, cbMem);
8599 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8600 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8601#endif
8602
8603 /*
8604 * Commit the bounce buffer entry.
8605 */
8606 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8607 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8608 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8609 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8610 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8611 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8612 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8613 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8614 pVCpu->iem.s.cActiveMappings++;
8615
8616 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8617 *ppvMem = pbBuf;
8618 return VINF_SUCCESS;
8619}
8620
8621
8622/**
8623 * iemMemMap woker that deals with iemMemPageMap failures.
8624 */
8625IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8626 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8627{
8628 /*
8629 * Filter out conditions we can handle and the ones which shouldn't happen.
8630 */
8631 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8632 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8633 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8634 {
8635 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8636 return rcMap;
8637 }
8638 pVCpu->iem.s.cPotentialExits++;
8639
8640 /*
8641 * Read in the current memory content if it's a read, execute or partial
8642 * write access.
8643 */
8644 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8645 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8646 {
8647 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8648 memset(pbBuf, 0xff, cbMem);
8649 else
8650 {
8651 int rc;
8652 if (!pVCpu->iem.s.fBypassHandlers)
8653 {
8654 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8655 if (rcStrict == VINF_SUCCESS)
8656 { /* nothing */ }
8657 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8658 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8659 else
8660 {
8661 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8662 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8663 return rcStrict;
8664 }
8665 }
8666 else
8667 {
8668 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8669 if (RT_SUCCESS(rc))
8670 { /* likely */ }
8671 else
8672 {
8673 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8674 GCPhysFirst, rc));
8675 return rc;
8676 }
8677 }
8678 }
8679 }
8680#ifdef VBOX_STRICT
8681 else
8682 memset(pbBuf, 0xcc, cbMem);
8683#endif
8684#ifdef VBOX_STRICT
8685 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8686 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8687#endif
8688
8689 /*
8690 * Commit the bounce buffer entry.
8691 */
8692 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8693 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8694 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8695 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8696 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8697 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8698 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8699 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8700 pVCpu->iem.s.cActiveMappings++;
8701
8702 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8703 *ppvMem = pbBuf;
8704 return VINF_SUCCESS;
8705}
8706
8707
8708
8709/**
8710 * Maps the specified guest memory for the given kind of access.
8711 *
8712 * This may be using bounce buffering of the memory if it's crossing a page
8713 * boundary or if there is an access handler installed for any of it. Because
8714 * of lock prefix guarantees, we're in for some extra clutter when this
8715 * happens.
8716 *
8717 * This may raise a \#GP, \#SS, \#PF or \#AC.
8718 *
8719 * @returns VBox strict status code.
8720 *
8721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8722 * @param ppvMem Where to return the pointer to the mapped
8723 * memory.
8724 * @param cbMem The number of bytes to map. This is usually 1,
8725 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8726 * string operations it can be up to a page.
8727 * @param iSegReg The index of the segment register to use for
8728 * this access. The base and limits are checked.
8729 * Use UINT8_MAX to indicate that no segmentation
8730 * is required (for IDT, GDT and LDT accesses).
8731 * @param GCPtrMem The address of the guest memory.
8732 * @param fAccess How the memory is being accessed. The
8733 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8734 * how to map the memory, while the
8735 * IEM_ACCESS_WHAT_XXX bit is used when raising
8736 * exceptions.
8737 */
8738IEM_STATIC VBOXSTRICTRC
8739iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8740{
8741 /*
8742 * Check the input and figure out which mapping entry to use.
8743 */
8744 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8745 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8746 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8747
8748 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8749 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8750 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8751 {
8752 iMemMap = iemMemMapFindFree(pVCpu);
8753 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8754 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8755 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8756 pVCpu->iem.s.aMemMappings[2].fAccess),
8757 VERR_IEM_IPE_9);
8758 }
8759
8760 /*
8761 * Map the memory, checking that we can actually access it. If something
8762 * slightly complicated happens, fall back on bounce buffering.
8763 */
8764 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8765 if (rcStrict != VINF_SUCCESS)
8766 return rcStrict;
8767
8768 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8769 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8770
8771 RTGCPHYS GCPhysFirst;
8772 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8773 if (rcStrict != VINF_SUCCESS)
8774 return rcStrict;
8775
8776 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8777 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8778 if (fAccess & IEM_ACCESS_TYPE_READ)
8779 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8780
8781 void *pvMem;
8782 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8783 if (rcStrict != VINF_SUCCESS)
8784 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8785
8786 /*
8787 * Fill in the mapping table entry.
8788 */
8789 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8790 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8791 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8792 pVCpu->iem.s.cActiveMappings++;
8793
8794 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8795 *ppvMem = pvMem;
8796
8797 return VINF_SUCCESS;
8798}
8799
8800
8801/**
8802 * Commits the guest memory if bounce buffered and unmaps it.
8803 *
8804 * @returns Strict VBox status code.
8805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8806 * @param pvMem The mapping.
8807 * @param fAccess The kind of access.
8808 */
8809IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8810{
8811 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8812 AssertReturn(iMemMap >= 0, iMemMap);
8813
8814 /* If it's bounce buffered, we may need to write back the buffer. */
8815 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8816 {
8817 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8818 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8819 }
8820 /* Otherwise unlock it. */
8821 else
8822 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8823
8824 /* Free the entry. */
8825 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8826 Assert(pVCpu->iem.s.cActiveMappings != 0);
8827 pVCpu->iem.s.cActiveMappings--;
8828 return VINF_SUCCESS;
8829}
8830
8831#ifdef IEM_WITH_SETJMP
8832
8833/**
8834 * Maps the specified guest memory for the given kind of access, longjmp on
8835 * error.
8836 *
8837 * This may be using bounce buffering of the memory if it's crossing a page
8838 * boundary or if there is an access handler installed for any of it. Because
8839 * of lock prefix guarantees, we're in for some extra clutter when this
8840 * happens.
8841 *
8842 * This may raise a \#GP, \#SS, \#PF or \#AC.
8843 *
8844 * @returns Pointer to the mapped memory.
8845 *
8846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8847 * @param cbMem The number of bytes to map. This is usually 1,
8848 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8849 * string operations it can be up to a page.
8850 * @param iSegReg The index of the segment register to use for
8851 * this access. The base and limits are checked.
8852 * Use UINT8_MAX to indicate that no segmentation
8853 * is required (for IDT, GDT and LDT accesses).
8854 * @param GCPtrMem The address of the guest memory.
8855 * @param fAccess How the memory is being accessed. The
8856 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8857 * how to map the memory, while the
8858 * IEM_ACCESS_WHAT_XXX bit is used when raising
8859 * exceptions.
8860 */
8861IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8862{
8863 /*
8864 * Check the input and figure out which mapping entry to use.
8865 */
8866 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8867 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8868 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8869
8870 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8871 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8872 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8873 {
8874 iMemMap = iemMemMapFindFree(pVCpu);
8875 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8876 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8877 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8878 pVCpu->iem.s.aMemMappings[2].fAccess),
8879 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8880 }
8881
8882 /*
8883 * Map the memory, checking that we can actually access it. If something
8884 * slightly complicated happens, fall back on bounce buffering.
8885 */
8886 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8887 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8888 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8889
8890 /* Crossing a page boundary? */
8891 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8892 { /* No (likely). */ }
8893 else
8894 {
8895 void *pvMem;
8896 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8897 if (rcStrict == VINF_SUCCESS)
8898 return pvMem;
8899 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8900 }
8901
8902 RTGCPHYS GCPhysFirst;
8903 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8904 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8905 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8906
8907 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8908 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8909 if (fAccess & IEM_ACCESS_TYPE_READ)
8910 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8911
8912 void *pvMem;
8913 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8914 if (rcStrict == VINF_SUCCESS)
8915 { /* likely */ }
8916 else
8917 {
8918 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8919 if (rcStrict == VINF_SUCCESS)
8920 return pvMem;
8921 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8922 }
8923
8924 /*
8925 * Fill in the mapping table entry.
8926 */
8927 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8928 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8929 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8930 pVCpu->iem.s.cActiveMappings++;
8931
8932 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8933 return pvMem;
8934}
8935
8936
8937/**
8938 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8939 *
8940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8941 * @param pvMem The mapping.
8942 * @param fAccess The kind of access.
8943 */
8944IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8945{
8946 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8947 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8948
8949 /* If it's bounce buffered, we may need to write back the buffer. */
8950 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8951 {
8952 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8953 {
8954 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8955 if (rcStrict == VINF_SUCCESS)
8956 return;
8957 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8958 }
8959 }
8960 /* Otherwise unlock it. */
8961 else
8962 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8963
8964 /* Free the entry. */
8965 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8966 Assert(pVCpu->iem.s.cActiveMappings != 0);
8967 pVCpu->iem.s.cActiveMappings--;
8968}
8969
8970#endif /* IEM_WITH_SETJMP */
8971
8972#ifndef IN_RING3
8973/**
8974 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8975 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8976 *
8977 * Allows the instruction to be completed and retired, while the IEM user will
8978 * return to ring-3 immediately afterwards and do the postponed writes there.
8979 *
8980 * @returns VBox status code (no strict statuses). Caller must check
8981 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8982 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8983 * @param pvMem The mapping.
8984 * @param fAccess The kind of access.
8985 */
8986IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8987{
8988 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8989 AssertReturn(iMemMap >= 0, iMemMap);
8990
8991 /* If it's bounce buffered, we may need to write back the buffer. */
8992 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8993 {
8994 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8995 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8996 }
8997 /* Otherwise unlock it. */
8998 else
8999 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9000
9001 /* Free the entry. */
9002 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9003 Assert(pVCpu->iem.s.cActiveMappings != 0);
9004 pVCpu->iem.s.cActiveMappings--;
9005 return VINF_SUCCESS;
9006}
9007#endif
9008
9009
9010/**
9011 * Rollbacks mappings, releasing page locks and such.
9012 *
9013 * The caller shall only call this after checking cActiveMappings.
9014 *
9015 * @returns Strict VBox status code to pass up.
9016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9017 */
9018IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9019{
9020 Assert(pVCpu->iem.s.cActiveMappings > 0);
9021
9022 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9023 while (iMemMap-- > 0)
9024 {
9025 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9026 if (fAccess != IEM_ACCESS_INVALID)
9027 {
9028 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9029 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9030 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9031 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9032 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9033 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9034 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9035 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9036 pVCpu->iem.s.cActiveMappings--;
9037 }
9038 }
9039}
9040
9041
9042/**
9043 * Fetches a data byte.
9044 *
9045 * @returns Strict VBox status code.
9046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9047 * @param pu8Dst Where to return the byte.
9048 * @param iSegReg The index of the segment register to use for
9049 * this access. The base and limits are checked.
9050 * @param GCPtrMem The address of the guest memory.
9051 */
9052IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9053{
9054 /* The lazy approach for now... */
9055 uint8_t const *pu8Src;
9056 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9057 if (rc == VINF_SUCCESS)
9058 {
9059 *pu8Dst = *pu8Src;
9060 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9061 }
9062 return rc;
9063}
9064
9065
9066#ifdef IEM_WITH_SETJMP
9067/**
9068 * Fetches a data byte, longjmp on error.
9069 *
9070 * @returns The byte.
9071 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9072 * @param iSegReg The index of the segment register to use for
9073 * this access. The base and limits are checked.
9074 * @param GCPtrMem The address of the guest memory.
9075 */
9076DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9077{
9078 /* The lazy approach for now... */
9079 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9080 uint8_t const bRet = *pu8Src;
9081 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9082 return bRet;
9083}
9084#endif /* IEM_WITH_SETJMP */
9085
9086
9087/**
9088 * Fetches a data word.
9089 *
9090 * @returns Strict VBox status code.
9091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9092 * @param pu16Dst Where to return the word.
9093 * @param iSegReg The index of the segment register to use for
9094 * this access. The base and limits are checked.
9095 * @param GCPtrMem The address of the guest memory.
9096 */
9097IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9098{
9099 /* The lazy approach for now... */
9100 uint16_t const *pu16Src;
9101 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9102 if (rc == VINF_SUCCESS)
9103 {
9104 *pu16Dst = *pu16Src;
9105 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9106 }
9107 return rc;
9108}
9109
9110
9111#ifdef IEM_WITH_SETJMP
9112/**
9113 * Fetches a data word, longjmp on error.
9114 *
9115 * @returns The word
9116 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9117 * @param iSegReg The index of the segment register to use for
9118 * this access. The base and limits are checked.
9119 * @param GCPtrMem The address of the guest memory.
9120 */
9121DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9122{
9123 /* The lazy approach for now... */
9124 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9125 uint16_t const u16Ret = *pu16Src;
9126 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9127 return u16Ret;
9128}
9129#endif
9130
9131
9132/**
9133 * Fetches a data dword.
9134 *
9135 * @returns Strict VBox status code.
9136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9137 * @param pu32Dst Where to return the dword.
9138 * @param iSegReg The index of the segment register to use for
9139 * this access. The base and limits are checked.
9140 * @param GCPtrMem The address of the guest memory.
9141 */
9142IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9143{
9144 /* The lazy approach for now... */
9145 uint32_t const *pu32Src;
9146 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9147 if (rc == VINF_SUCCESS)
9148 {
9149 *pu32Dst = *pu32Src;
9150 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9151 }
9152 return rc;
9153}
9154
9155
9156#ifdef IEM_WITH_SETJMP
9157
9158IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9159{
9160 Assert(cbMem >= 1);
9161 Assert(iSegReg < X86_SREG_COUNT);
9162
9163 /*
9164 * 64-bit mode is simpler.
9165 */
9166 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9167 {
9168 if (iSegReg >= X86_SREG_FS)
9169 {
9170 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9171 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9172 GCPtrMem += pSel->u64Base;
9173 }
9174
9175 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9176 return GCPtrMem;
9177 }
9178 /*
9179 * 16-bit and 32-bit segmentation.
9180 */
9181 else
9182 {
9183 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9184 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9185 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9186 == X86DESCATTR_P /* data, expand up */
9187 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9188 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9189 {
9190 /* expand up */
9191 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9192 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9193 && GCPtrLast32 > (uint32_t)GCPtrMem))
9194 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9195 }
9196 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9197 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9198 {
9199 /* expand down */
9200 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9201 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9202 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9203 && GCPtrLast32 > (uint32_t)GCPtrMem))
9204 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9205 }
9206 else
9207 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9208 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9209 }
9210 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9211}
9212
9213
9214IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9215{
9216 Assert(cbMem >= 1);
9217 Assert(iSegReg < X86_SREG_COUNT);
9218
9219 /*
9220 * 64-bit mode is simpler.
9221 */
9222 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9223 {
9224 if (iSegReg >= X86_SREG_FS)
9225 {
9226 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9227 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9228 GCPtrMem += pSel->u64Base;
9229 }
9230
9231 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9232 return GCPtrMem;
9233 }
9234 /*
9235 * 16-bit and 32-bit segmentation.
9236 */
9237 else
9238 {
9239 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9240 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9241 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9242 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9243 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9244 {
9245 /* expand up */
9246 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9247 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9248 && GCPtrLast32 > (uint32_t)GCPtrMem))
9249 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9250 }
9251 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9252 {
9253 /* expand down */
9254 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9255 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9256 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9257 && GCPtrLast32 > (uint32_t)GCPtrMem))
9258 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9259 }
9260 else
9261 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9262 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9263 }
9264 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9265}
9266
9267
9268/**
9269 * Fetches a data dword, longjmp on error, fallback/safe version.
9270 *
9271 * @returns The dword
9272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9273 * @param iSegReg The index of the segment register to use for
9274 * this access. The base and limits are checked.
9275 * @param GCPtrMem The address of the guest memory.
9276 */
9277IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9278{
9279 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9280 uint32_t const u32Ret = *pu32Src;
9281 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9282 return u32Ret;
9283}
9284
9285
9286/**
9287 * Fetches a data dword, longjmp on error.
9288 *
9289 * @returns The dword
9290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9291 * @param iSegReg The index of the segment register to use for
9292 * this access. The base and limits are checked.
9293 * @param GCPtrMem The address of the guest memory.
9294 */
9295DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9296{
9297# ifdef IEM_WITH_DATA_TLB
9298 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9299 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9300 {
9301 /// @todo more later.
9302 }
9303
9304 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9305# else
9306 /* The lazy approach. */
9307 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9308 uint32_t const u32Ret = *pu32Src;
9309 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9310 return u32Ret;
9311# endif
9312}
9313#endif
9314
9315
9316#ifdef SOME_UNUSED_FUNCTION
9317/**
9318 * Fetches a data dword and sign extends it to a qword.
9319 *
9320 * @returns Strict VBox status code.
9321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9322 * @param pu64Dst Where to return the sign extended value.
9323 * @param iSegReg The index of the segment register to use for
9324 * this access. The base and limits are checked.
9325 * @param GCPtrMem The address of the guest memory.
9326 */
9327IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9328{
9329 /* The lazy approach for now... */
9330 int32_t const *pi32Src;
9331 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9332 if (rc == VINF_SUCCESS)
9333 {
9334 *pu64Dst = *pi32Src;
9335 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9336 }
9337#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9338 else
9339 *pu64Dst = 0;
9340#endif
9341 return rc;
9342}
9343#endif
9344
9345
9346/**
9347 * Fetches a data qword.
9348 *
9349 * @returns Strict VBox status code.
9350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9351 * @param pu64Dst Where to return the qword.
9352 * @param iSegReg The index of the segment register to use for
9353 * this access. The base and limits are checked.
9354 * @param GCPtrMem The address of the guest memory.
9355 */
9356IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9357{
9358 /* The lazy approach for now... */
9359 uint64_t const *pu64Src;
9360 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9361 if (rc == VINF_SUCCESS)
9362 {
9363 *pu64Dst = *pu64Src;
9364 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9365 }
9366 return rc;
9367}
9368
9369
9370#ifdef IEM_WITH_SETJMP
9371/**
9372 * Fetches a data qword, longjmp on error.
9373 *
9374 * @returns The qword.
9375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9376 * @param iSegReg The index of the segment register to use for
9377 * this access. The base and limits are checked.
9378 * @param GCPtrMem The address of the guest memory.
9379 */
9380DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9381{
9382 /* The lazy approach for now... */
9383 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9384 uint64_t const u64Ret = *pu64Src;
9385 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9386 return u64Ret;
9387}
9388#endif
9389
9390
9391/**
9392 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9393 *
9394 * @returns Strict VBox status code.
9395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9396 * @param pu64Dst Where to return the qword.
9397 * @param iSegReg The index of the segment register to use for
9398 * this access. The base and limits are checked.
9399 * @param GCPtrMem The address of the guest memory.
9400 */
9401IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9402{
9403 /* The lazy approach for now... */
9404 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9405 if (RT_UNLIKELY(GCPtrMem & 15))
9406 return iemRaiseGeneralProtectionFault0(pVCpu);
9407
9408 uint64_t const *pu64Src;
9409 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9410 if (rc == VINF_SUCCESS)
9411 {
9412 *pu64Dst = *pu64Src;
9413 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9414 }
9415 return rc;
9416}
9417
9418
9419#ifdef IEM_WITH_SETJMP
9420/**
9421 * Fetches a data qword, longjmp on error.
9422 *
9423 * @returns The qword.
9424 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9425 * @param iSegReg The index of the segment register to use for
9426 * this access. The base and limits are checked.
9427 * @param GCPtrMem The address of the guest memory.
9428 */
9429DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9430{
9431 /* The lazy approach for now... */
9432 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9433 if (RT_LIKELY(!(GCPtrMem & 15)))
9434 {
9435 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9436 uint64_t const u64Ret = *pu64Src;
9437 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9438 return u64Ret;
9439 }
9440
9441 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9442 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9443}
9444#endif
9445
9446
9447/**
9448 * Fetches a data tword.
9449 *
9450 * @returns Strict VBox status code.
9451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9452 * @param pr80Dst Where to return the tword.
9453 * @param iSegReg The index of the segment register to use for
9454 * this access. The base and limits are checked.
9455 * @param GCPtrMem The address of the guest memory.
9456 */
9457IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9458{
9459 /* The lazy approach for now... */
9460 PCRTFLOAT80U pr80Src;
9461 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9462 if (rc == VINF_SUCCESS)
9463 {
9464 *pr80Dst = *pr80Src;
9465 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9466 }
9467 return rc;
9468}
9469
9470
9471#ifdef IEM_WITH_SETJMP
9472/**
9473 * Fetches a data tword, longjmp on error.
9474 *
9475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9476 * @param pr80Dst Where to return the tword.
9477 * @param iSegReg The index of the segment register to use for
9478 * this access. The base and limits are checked.
9479 * @param GCPtrMem The address of the guest memory.
9480 */
9481DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9482{
9483 /* The lazy approach for now... */
9484 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9485 *pr80Dst = *pr80Src;
9486 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9487}
9488#endif
9489
9490
9491/**
9492 * Fetches a data dqword (double qword), generally SSE related.
9493 *
9494 * @returns Strict VBox status code.
9495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9496 * @param pu128Dst Where to return the qword.
9497 * @param iSegReg The index of the segment register to use for
9498 * this access. The base and limits are checked.
9499 * @param GCPtrMem The address of the guest memory.
9500 */
9501IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9502{
9503 /* The lazy approach for now... */
9504 PCRTUINT128U pu128Src;
9505 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9506 if (rc == VINF_SUCCESS)
9507 {
9508 pu128Dst->au64[0] = pu128Src->au64[0];
9509 pu128Dst->au64[1] = pu128Src->au64[1];
9510 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9511 }
9512 return rc;
9513}
9514
9515
9516#ifdef IEM_WITH_SETJMP
9517/**
9518 * Fetches a data dqword (double qword), generally SSE related.
9519 *
9520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9521 * @param pu128Dst Where to return the qword.
9522 * @param iSegReg The index of the segment register to use for
9523 * this access. The base and limits are checked.
9524 * @param GCPtrMem The address of the guest memory.
9525 */
9526IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9527{
9528 /* The lazy approach for now... */
9529 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9530 pu128Dst->au64[0] = pu128Src->au64[0];
9531 pu128Dst->au64[1] = pu128Src->au64[1];
9532 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9533}
9534#endif
9535
9536
9537/**
9538 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9539 * related.
9540 *
9541 * Raises \#GP(0) if not aligned.
9542 *
9543 * @returns Strict VBox status code.
9544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9545 * @param pu128Dst Where to return the qword.
9546 * @param iSegReg The index of the segment register to use for
9547 * this access. The base and limits are checked.
9548 * @param GCPtrMem The address of the guest memory.
9549 */
9550IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9551{
9552 /* The lazy approach for now... */
9553 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9554 if ( (GCPtrMem & 15)
9555 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9556 return iemRaiseGeneralProtectionFault0(pVCpu);
9557
9558 PCRTUINT128U pu128Src;
9559 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9560 if (rc == VINF_SUCCESS)
9561 {
9562 pu128Dst->au64[0] = pu128Src->au64[0];
9563 pu128Dst->au64[1] = pu128Src->au64[1];
9564 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9565 }
9566 return rc;
9567}
9568
9569
9570#ifdef IEM_WITH_SETJMP
9571/**
9572 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9573 * related, longjmp on error.
9574 *
9575 * Raises \#GP(0) if not aligned.
9576 *
9577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9578 * @param pu128Dst Where to return the qword.
9579 * @param iSegReg The index of the segment register to use for
9580 * this access. The base and limits are checked.
9581 * @param GCPtrMem The address of the guest memory.
9582 */
9583DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9584{
9585 /* The lazy approach for now... */
9586 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9587 if ( (GCPtrMem & 15) == 0
9588 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9589 {
9590 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9591 pu128Dst->au64[0] = pu128Src->au64[0];
9592 pu128Dst->au64[1] = pu128Src->au64[1];
9593 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9594 return;
9595 }
9596
9597 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9598 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9599}
9600#endif
9601
9602
9603/**
9604 * Fetches a data oword (octo word), generally AVX related.
9605 *
9606 * @returns Strict VBox status code.
9607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9608 * @param pu256Dst Where to return the qword.
9609 * @param iSegReg The index of the segment register to use for
9610 * this access. The base and limits are checked.
9611 * @param GCPtrMem The address of the guest memory.
9612 */
9613IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9614{
9615 /* The lazy approach for now... */
9616 PCRTUINT256U pu256Src;
9617 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9618 if (rc == VINF_SUCCESS)
9619 {
9620 pu256Dst->au64[0] = pu256Src->au64[0];
9621 pu256Dst->au64[1] = pu256Src->au64[1];
9622 pu256Dst->au64[2] = pu256Src->au64[2];
9623 pu256Dst->au64[3] = pu256Src->au64[3];
9624 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9625 }
9626 return rc;
9627}
9628
9629
9630#ifdef IEM_WITH_SETJMP
9631/**
9632 * Fetches a data oword (octo word), generally AVX related.
9633 *
9634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9635 * @param pu256Dst Where to return the qword.
9636 * @param iSegReg The index of the segment register to use for
9637 * this access. The base and limits are checked.
9638 * @param GCPtrMem The address of the guest memory.
9639 */
9640IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9641{
9642 /* The lazy approach for now... */
9643 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9644 pu256Dst->au64[0] = pu256Src->au64[0];
9645 pu256Dst->au64[1] = pu256Src->au64[1];
9646 pu256Dst->au64[2] = pu256Src->au64[2];
9647 pu256Dst->au64[3] = pu256Src->au64[3];
9648 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9649}
9650#endif
9651
9652
9653/**
9654 * Fetches a data oword (octo word) at an aligned address, generally AVX
9655 * related.
9656 *
9657 * Raises \#GP(0) if not aligned.
9658 *
9659 * @returns Strict VBox status code.
9660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9661 * @param pu256Dst Where to return the qword.
9662 * @param iSegReg The index of the segment register to use for
9663 * this access. The base and limits are checked.
9664 * @param GCPtrMem The address of the guest memory.
9665 */
9666IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9667{
9668 /* The lazy approach for now... */
9669 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9670 if (GCPtrMem & 31)
9671 return iemRaiseGeneralProtectionFault0(pVCpu);
9672
9673 PCRTUINT256U pu256Src;
9674 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9675 if (rc == VINF_SUCCESS)
9676 {
9677 pu256Dst->au64[0] = pu256Src->au64[0];
9678 pu256Dst->au64[1] = pu256Src->au64[1];
9679 pu256Dst->au64[2] = pu256Src->au64[2];
9680 pu256Dst->au64[3] = pu256Src->au64[3];
9681 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9682 }
9683 return rc;
9684}
9685
9686
9687#ifdef IEM_WITH_SETJMP
9688/**
9689 * Fetches a data oword (octo word) at an aligned address, generally AVX
9690 * related, longjmp on error.
9691 *
9692 * Raises \#GP(0) if not aligned.
9693 *
9694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9695 * @param pu256Dst Where to return the qword.
9696 * @param iSegReg The index of the segment register to use for
9697 * this access. The base and limits are checked.
9698 * @param GCPtrMem The address of the guest memory.
9699 */
9700DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9701{
9702 /* The lazy approach for now... */
9703 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9704 if ((GCPtrMem & 31) == 0)
9705 {
9706 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9707 pu256Dst->au64[0] = pu256Src->au64[0];
9708 pu256Dst->au64[1] = pu256Src->au64[1];
9709 pu256Dst->au64[2] = pu256Src->au64[2];
9710 pu256Dst->au64[3] = pu256Src->au64[3];
9711 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9712 return;
9713 }
9714
9715 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9716 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9717}
9718#endif
9719
9720
9721
9722/**
9723 * Fetches a descriptor register (lgdt, lidt).
9724 *
9725 * @returns Strict VBox status code.
9726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9727 * @param pcbLimit Where to return the limit.
9728 * @param pGCPtrBase Where to return the base.
9729 * @param iSegReg The index of the segment register to use for
9730 * this access. The base and limits are checked.
9731 * @param GCPtrMem The address of the guest memory.
9732 * @param enmOpSize The effective operand size.
9733 */
9734IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9735 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9736{
9737 /*
9738 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9739 * little special:
9740 * - The two reads are done separately.
9741 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9742 * - We suspect the 386 to actually commit the limit before the base in
9743 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9744 * don't try emulate this eccentric behavior, because it's not well
9745 * enough understood and rather hard to trigger.
9746 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9747 */
9748 VBOXSTRICTRC rcStrict;
9749 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9750 {
9751 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9752 if (rcStrict == VINF_SUCCESS)
9753 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9754 }
9755 else
9756 {
9757 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9758 if (enmOpSize == IEMMODE_32BIT)
9759 {
9760 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9761 {
9762 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9763 if (rcStrict == VINF_SUCCESS)
9764 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9765 }
9766 else
9767 {
9768 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9769 if (rcStrict == VINF_SUCCESS)
9770 {
9771 *pcbLimit = (uint16_t)uTmp;
9772 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9773 }
9774 }
9775 if (rcStrict == VINF_SUCCESS)
9776 *pGCPtrBase = uTmp;
9777 }
9778 else
9779 {
9780 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9781 if (rcStrict == VINF_SUCCESS)
9782 {
9783 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9784 if (rcStrict == VINF_SUCCESS)
9785 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9786 }
9787 }
9788 }
9789 return rcStrict;
9790}
9791
9792
9793
9794/**
9795 * Stores a data byte.
9796 *
9797 * @returns Strict VBox status code.
9798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9799 * @param iSegReg The index of the segment register to use for
9800 * this access. The base and limits are checked.
9801 * @param GCPtrMem The address of the guest memory.
9802 * @param u8Value The value to store.
9803 */
9804IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9805{
9806 /* The lazy approach for now... */
9807 uint8_t *pu8Dst;
9808 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9809 if (rc == VINF_SUCCESS)
9810 {
9811 *pu8Dst = u8Value;
9812 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9813 }
9814 return rc;
9815}
9816
9817
9818#ifdef IEM_WITH_SETJMP
9819/**
9820 * Stores a data byte, longjmp on error.
9821 *
9822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9823 * @param iSegReg The index of the segment register to use for
9824 * this access. The base and limits are checked.
9825 * @param GCPtrMem The address of the guest memory.
9826 * @param u8Value The value to store.
9827 */
9828IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9829{
9830 /* The lazy approach for now... */
9831 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9832 *pu8Dst = u8Value;
9833 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9834}
9835#endif
9836
9837
9838/**
9839 * Stores a data word.
9840 *
9841 * @returns Strict VBox status code.
9842 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9843 * @param iSegReg The index of the segment register to use for
9844 * this access. The base and limits are checked.
9845 * @param GCPtrMem The address of the guest memory.
9846 * @param u16Value The value to store.
9847 */
9848IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9849{
9850 /* The lazy approach for now... */
9851 uint16_t *pu16Dst;
9852 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9853 if (rc == VINF_SUCCESS)
9854 {
9855 *pu16Dst = u16Value;
9856 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9857 }
9858 return rc;
9859}
9860
9861
9862#ifdef IEM_WITH_SETJMP
9863/**
9864 * Stores a data word, longjmp on error.
9865 *
9866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9867 * @param iSegReg The index of the segment register to use for
9868 * this access. The base and limits are checked.
9869 * @param GCPtrMem The address of the guest memory.
9870 * @param u16Value The value to store.
9871 */
9872IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9873{
9874 /* The lazy approach for now... */
9875 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9876 *pu16Dst = u16Value;
9877 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9878}
9879#endif
9880
9881
9882/**
9883 * Stores a data dword.
9884 *
9885 * @returns Strict VBox status code.
9886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9887 * @param iSegReg The index of the segment register to use for
9888 * this access. The base and limits are checked.
9889 * @param GCPtrMem The address of the guest memory.
9890 * @param u32Value The value to store.
9891 */
9892IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9893{
9894 /* The lazy approach for now... */
9895 uint32_t *pu32Dst;
9896 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9897 if (rc == VINF_SUCCESS)
9898 {
9899 *pu32Dst = u32Value;
9900 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9901 }
9902 return rc;
9903}
9904
9905
9906#ifdef IEM_WITH_SETJMP
9907/**
9908 * Stores a data dword.
9909 *
9910 * @returns Strict VBox status code.
9911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9912 * @param iSegReg The index of the segment register to use for
9913 * this access. The base and limits are checked.
9914 * @param GCPtrMem The address of the guest memory.
9915 * @param u32Value The value to store.
9916 */
9917IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9918{
9919 /* The lazy approach for now... */
9920 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9921 *pu32Dst = u32Value;
9922 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9923}
9924#endif
9925
9926
9927/**
9928 * Stores a data qword.
9929 *
9930 * @returns Strict VBox status code.
9931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9932 * @param iSegReg The index of the segment register to use for
9933 * this access. The base and limits are checked.
9934 * @param GCPtrMem The address of the guest memory.
9935 * @param u64Value The value to store.
9936 */
9937IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9938{
9939 /* The lazy approach for now... */
9940 uint64_t *pu64Dst;
9941 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9942 if (rc == VINF_SUCCESS)
9943 {
9944 *pu64Dst = u64Value;
9945 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9946 }
9947 return rc;
9948}
9949
9950
9951#ifdef IEM_WITH_SETJMP
9952/**
9953 * Stores a data qword, longjmp on error.
9954 *
9955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9956 * @param iSegReg The index of the segment register to use for
9957 * this access. The base and limits are checked.
9958 * @param GCPtrMem The address of the guest memory.
9959 * @param u64Value The value to store.
9960 */
9961IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9962{
9963 /* The lazy approach for now... */
9964 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9965 *pu64Dst = u64Value;
9966 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9967}
9968#endif
9969
9970
9971/**
9972 * Stores a data dqword.
9973 *
9974 * @returns Strict VBox status code.
9975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9976 * @param iSegReg The index of the segment register to use for
9977 * this access. The base and limits are checked.
9978 * @param GCPtrMem The address of the guest memory.
9979 * @param u128Value The value to store.
9980 */
9981IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9982{
9983 /* The lazy approach for now... */
9984 PRTUINT128U pu128Dst;
9985 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9986 if (rc == VINF_SUCCESS)
9987 {
9988 pu128Dst->au64[0] = u128Value.au64[0];
9989 pu128Dst->au64[1] = u128Value.au64[1];
9990 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9991 }
9992 return rc;
9993}
9994
9995
9996#ifdef IEM_WITH_SETJMP
9997/**
9998 * Stores a data dqword, longjmp on error.
9999 *
10000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10001 * @param iSegReg The index of the segment register to use for
10002 * this access. The base and limits are checked.
10003 * @param GCPtrMem The address of the guest memory.
10004 * @param u128Value The value to store.
10005 */
10006IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10007{
10008 /* The lazy approach for now... */
10009 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10010 pu128Dst->au64[0] = u128Value.au64[0];
10011 pu128Dst->au64[1] = u128Value.au64[1];
10012 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10013}
10014#endif
10015
10016
10017/**
10018 * Stores a data dqword, SSE aligned.
10019 *
10020 * @returns Strict VBox status code.
10021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10022 * @param iSegReg The index of the segment register to use for
10023 * this access. The base and limits are checked.
10024 * @param GCPtrMem The address of the guest memory.
10025 * @param u128Value The value to store.
10026 */
10027IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10028{
10029 /* The lazy approach for now... */
10030 if ( (GCPtrMem & 15)
10031 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10032 return iemRaiseGeneralProtectionFault0(pVCpu);
10033
10034 PRTUINT128U pu128Dst;
10035 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10036 if (rc == VINF_SUCCESS)
10037 {
10038 pu128Dst->au64[0] = u128Value.au64[0];
10039 pu128Dst->au64[1] = u128Value.au64[1];
10040 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10041 }
10042 return rc;
10043}
10044
10045
10046#ifdef IEM_WITH_SETJMP
10047/**
10048 * Stores a data dqword, SSE aligned.
10049 *
10050 * @returns Strict VBox status code.
10051 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10052 * @param iSegReg The index of the segment register to use for
10053 * this access. The base and limits are checked.
10054 * @param GCPtrMem The address of the guest memory.
10055 * @param u128Value The value to store.
10056 */
10057DECL_NO_INLINE(IEM_STATIC, void)
10058iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10059{
10060 /* The lazy approach for now... */
10061 if ( (GCPtrMem & 15) == 0
10062 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10063 {
10064 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10065 pu128Dst->au64[0] = u128Value.au64[0];
10066 pu128Dst->au64[1] = u128Value.au64[1];
10067 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10068 return;
10069 }
10070
10071 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10072 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10073}
10074#endif
10075
10076
10077/**
10078 * Stores a data dqword.
10079 *
10080 * @returns Strict VBox status code.
10081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10082 * @param iSegReg The index of the segment register to use for
10083 * this access. The base and limits are checked.
10084 * @param GCPtrMem The address of the guest memory.
10085 * @param pu256Value Pointer to the value to store.
10086 */
10087IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10088{
10089 /* The lazy approach for now... */
10090 PRTUINT256U pu256Dst;
10091 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10092 if (rc == VINF_SUCCESS)
10093 {
10094 pu256Dst->au64[0] = pu256Value->au64[0];
10095 pu256Dst->au64[1] = pu256Value->au64[1];
10096 pu256Dst->au64[2] = pu256Value->au64[2];
10097 pu256Dst->au64[3] = pu256Value->au64[3];
10098 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10099 }
10100 return rc;
10101}
10102
10103
10104#ifdef IEM_WITH_SETJMP
10105/**
10106 * Stores a data dqword, longjmp on error.
10107 *
10108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10109 * @param iSegReg The index of the segment register to use for
10110 * this access. The base and limits are checked.
10111 * @param GCPtrMem The address of the guest memory.
10112 * @param pu256Value Pointer to the value to store.
10113 */
10114IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10115{
10116 /* The lazy approach for now... */
10117 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10118 pu256Dst->au64[0] = pu256Value->au64[0];
10119 pu256Dst->au64[1] = pu256Value->au64[1];
10120 pu256Dst->au64[2] = pu256Value->au64[2];
10121 pu256Dst->au64[3] = pu256Value->au64[3];
10122 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10123}
10124#endif
10125
10126
10127/**
10128 * Stores a data dqword, AVX aligned.
10129 *
10130 * @returns Strict VBox status code.
10131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10132 * @param iSegReg The index of the segment register to use for
10133 * this access. The base and limits are checked.
10134 * @param GCPtrMem The address of the guest memory.
10135 * @param pu256Value Pointer to the value to store.
10136 */
10137IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10138{
10139 /* The lazy approach for now... */
10140 if (GCPtrMem & 31)
10141 return iemRaiseGeneralProtectionFault0(pVCpu);
10142
10143 PRTUINT256U pu256Dst;
10144 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10145 if (rc == VINF_SUCCESS)
10146 {
10147 pu256Dst->au64[0] = pu256Value->au64[0];
10148 pu256Dst->au64[1] = pu256Value->au64[1];
10149 pu256Dst->au64[2] = pu256Value->au64[2];
10150 pu256Dst->au64[3] = pu256Value->au64[3];
10151 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10152 }
10153 return rc;
10154}
10155
10156
10157#ifdef IEM_WITH_SETJMP
10158/**
10159 * Stores a data dqword, AVX aligned.
10160 *
10161 * @returns Strict VBox status code.
10162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10163 * @param iSegReg The index of the segment register to use for
10164 * this access. The base and limits are checked.
10165 * @param GCPtrMem The address of the guest memory.
10166 * @param pu256Value Pointer to the value to store.
10167 */
10168DECL_NO_INLINE(IEM_STATIC, void)
10169iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10170{
10171 /* The lazy approach for now... */
10172 if ((GCPtrMem & 31) == 0)
10173 {
10174 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10175 pu256Dst->au64[0] = pu256Value->au64[0];
10176 pu256Dst->au64[1] = pu256Value->au64[1];
10177 pu256Dst->au64[2] = pu256Value->au64[2];
10178 pu256Dst->au64[3] = pu256Value->au64[3];
10179 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10180 return;
10181 }
10182
10183 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10184 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10185}
10186#endif
10187
10188
10189/**
10190 * Stores a descriptor register (sgdt, sidt).
10191 *
10192 * @returns Strict VBox status code.
10193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10194 * @param cbLimit The limit.
10195 * @param GCPtrBase The base address.
10196 * @param iSegReg The index of the segment register to use for
10197 * this access. The base and limits are checked.
10198 * @param GCPtrMem The address of the guest memory.
10199 */
10200IEM_STATIC VBOXSTRICTRC
10201iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10202{
10203 /*
10204 * The SIDT and SGDT instructions actually stores the data using two
10205 * independent writes. The instructions does not respond to opsize prefixes.
10206 */
10207 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10208 if (rcStrict == VINF_SUCCESS)
10209 {
10210 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10211 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10212 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10213 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10214 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10215 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10216 else
10217 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10218 }
10219 return rcStrict;
10220}
10221
10222
10223/**
10224 * Pushes a word onto the stack.
10225 *
10226 * @returns Strict VBox status code.
10227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10228 * @param u16Value The value to push.
10229 */
10230IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10231{
10232 /* Increment the stack pointer. */
10233 uint64_t uNewRsp;
10234 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10235
10236 /* Write the word the lazy way. */
10237 uint16_t *pu16Dst;
10238 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10239 if (rc == VINF_SUCCESS)
10240 {
10241 *pu16Dst = u16Value;
10242 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10243 }
10244
10245 /* Commit the new RSP value unless we an access handler made trouble. */
10246 if (rc == VINF_SUCCESS)
10247 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10248
10249 return rc;
10250}
10251
10252
10253/**
10254 * Pushes a dword onto the stack.
10255 *
10256 * @returns Strict VBox status code.
10257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10258 * @param u32Value The value to push.
10259 */
10260IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10261{
10262 /* Increment the stack pointer. */
10263 uint64_t uNewRsp;
10264 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10265
10266 /* Write the dword the lazy way. */
10267 uint32_t *pu32Dst;
10268 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10269 if (rc == VINF_SUCCESS)
10270 {
10271 *pu32Dst = u32Value;
10272 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10273 }
10274
10275 /* Commit the new RSP value unless we an access handler made trouble. */
10276 if (rc == VINF_SUCCESS)
10277 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10278
10279 return rc;
10280}
10281
10282
10283/**
10284 * Pushes a dword segment register value onto the stack.
10285 *
10286 * @returns Strict VBox status code.
10287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10288 * @param u32Value The value to push.
10289 */
10290IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10291{
10292 /* Increment the stack pointer. */
10293 uint64_t uNewRsp;
10294 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10295
10296 /* The intel docs talks about zero extending the selector register
10297 value. My actual intel CPU here might be zero extending the value
10298 but it still only writes the lower word... */
10299 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10300 * happens when crossing an electric page boundrary, is the high word checked
10301 * for write accessibility or not? Probably it is. What about segment limits?
10302 * It appears this behavior is also shared with trap error codes.
10303 *
10304 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10305 * ancient hardware when it actually did change. */
10306 uint16_t *pu16Dst;
10307 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10308 if (rc == VINF_SUCCESS)
10309 {
10310 *pu16Dst = (uint16_t)u32Value;
10311 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10312 }
10313
10314 /* Commit the new RSP value unless we an access handler made trouble. */
10315 if (rc == VINF_SUCCESS)
10316 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10317
10318 return rc;
10319}
10320
10321
10322/**
10323 * Pushes a qword onto the stack.
10324 *
10325 * @returns Strict VBox status code.
10326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10327 * @param u64Value The value to push.
10328 */
10329IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10330{
10331 /* Increment the stack pointer. */
10332 uint64_t uNewRsp;
10333 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10334
10335 /* Write the word the lazy way. */
10336 uint64_t *pu64Dst;
10337 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10338 if (rc == VINF_SUCCESS)
10339 {
10340 *pu64Dst = u64Value;
10341 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10342 }
10343
10344 /* Commit the new RSP value unless we an access handler made trouble. */
10345 if (rc == VINF_SUCCESS)
10346 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10347
10348 return rc;
10349}
10350
10351
10352/**
10353 * Pops a word from the stack.
10354 *
10355 * @returns Strict VBox status code.
10356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10357 * @param pu16Value Where to store the popped value.
10358 */
10359IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10360{
10361 /* Increment the stack pointer. */
10362 uint64_t uNewRsp;
10363 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10364
10365 /* Write the word the lazy way. */
10366 uint16_t const *pu16Src;
10367 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10368 if (rc == VINF_SUCCESS)
10369 {
10370 *pu16Value = *pu16Src;
10371 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10372
10373 /* Commit the new RSP value. */
10374 if (rc == VINF_SUCCESS)
10375 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10376 }
10377
10378 return rc;
10379}
10380
10381
10382/**
10383 * Pops a dword from the stack.
10384 *
10385 * @returns Strict VBox status code.
10386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10387 * @param pu32Value Where to store the popped value.
10388 */
10389IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10390{
10391 /* Increment the stack pointer. */
10392 uint64_t uNewRsp;
10393 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10394
10395 /* Write the word the lazy way. */
10396 uint32_t const *pu32Src;
10397 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10398 if (rc == VINF_SUCCESS)
10399 {
10400 *pu32Value = *pu32Src;
10401 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10402
10403 /* Commit the new RSP value. */
10404 if (rc == VINF_SUCCESS)
10405 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10406 }
10407
10408 return rc;
10409}
10410
10411
10412/**
10413 * Pops a qword from the stack.
10414 *
10415 * @returns Strict VBox status code.
10416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10417 * @param pu64Value Where to store the popped value.
10418 */
10419IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
10420{
10421 /* Increment the stack pointer. */
10422 uint64_t uNewRsp;
10423 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10424
10425 /* Write the word the lazy way. */
10426 uint64_t const *pu64Src;
10427 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10428 if (rc == VINF_SUCCESS)
10429 {
10430 *pu64Value = *pu64Src;
10431 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10432
10433 /* Commit the new RSP value. */
10434 if (rc == VINF_SUCCESS)
10435 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10436 }
10437
10438 return rc;
10439}
10440
10441
10442/**
10443 * Pushes a word onto the stack, using a temporary stack pointer.
10444 *
10445 * @returns Strict VBox status code.
10446 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10447 * @param u16Value The value to push.
10448 * @param pTmpRsp Pointer to the temporary stack pointer.
10449 */
10450IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10451{
10452 /* Increment the stack pointer. */
10453 RTUINT64U NewRsp = *pTmpRsp;
10454 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10455
10456 /* Write the word the lazy way. */
10457 uint16_t *pu16Dst;
10458 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10459 if (rc == VINF_SUCCESS)
10460 {
10461 *pu16Dst = u16Value;
10462 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10463 }
10464
10465 /* Commit the new RSP value unless we an access handler made trouble. */
10466 if (rc == VINF_SUCCESS)
10467 *pTmpRsp = NewRsp;
10468
10469 return rc;
10470}
10471
10472
10473/**
10474 * Pushes a dword onto the stack, using a temporary stack pointer.
10475 *
10476 * @returns Strict VBox status code.
10477 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10478 * @param u32Value The value to push.
10479 * @param pTmpRsp Pointer to the temporary stack pointer.
10480 */
10481IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10482{
10483 /* Increment the stack pointer. */
10484 RTUINT64U NewRsp = *pTmpRsp;
10485 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10486
10487 /* Write the word the lazy way. */
10488 uint32_t *pu32Dst;
10489 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10490 if (rc == VINF_SUCCESS)
10491 {
10492 *pu32Dst = u32Value;
10493 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10494 }
10495
10496 /* Commit the new RSP value unless we an access handler made trouble. */
10497 if (rc == VINF_SUCCESS)
10498 *pTmpRsp = NewRsp;
10499
10500 return rc;
10501}
10502
10503
10504/**
10505 * Pushes a dword onto the stack, using a temporary stack pointer.
10506 *
10507 * @returns Strict VBox status code.
10508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10509 * @param u64Value The value to push.
10510 * @param pTmpRsp Pointer to the temporary stack pointer.
10511 */
10512IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10513{
10514 /* Increment the stack pointer. */
10515 RTUINT64U NewRsp = *pTmpRsp;
10516 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10517
10518 /* Write the word the lazy way. */
10519 uint64_t *pu64Dst;
10520 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10521 if (rc == VINF_SUCCESS)
10522 {
10523 *pu64Dst = u64Value;
10524 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10525 }
10526
10527 /* Commit the new RSP value unless we an access handler made trouble. */
10528 if (rc == VINF_SUCCESS)
10529 *pTmpRsp = NewRsp;
10530
10531 return rc;
10532}
10533
10534
10535/**
10536 * Pops a word from the stack, using a temporary stack pointer.
10537 *
10538 * @returns Strict VBox status code.
10539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10540 * @param pu16Value Where to store the popped value.
10541 * @param pTmpRsp Pointer to the temporary stack pointer.
10542 */
10543IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10544{
10545 /* Increment the stack pointer. */
10546 RTUINT64U NewRsp = *pTmpRsp;
10547 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10548
10549 /* Write the word the lazy way. */
10550 uint16_t const *pu16Src;
10551 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10552 if (rc == VINF_SUCCESS)
10553 {
10554 *pu16Value = *pu16Src;
10555 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10556
10557 /* Commit the new RSP value. */
10558 if (rc == VINF_SUCCESS)
10559 *pTmpRsp = NewRsp;
10560 }
10561
10562 return rc;
10563}
10564
10565
10566/**
10567 * Pops a dword from the stack, using a temporary stack pointer.
10568 *
10569 * @returns Strict VBox status code.
10570 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10571 * @param pu32Value Where to store the popped value.
10572 * @param pTmpRsp Pointer to the temporary stack pointer.
10573 */
10574IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10575{
10576 /* Increment the stack pointer. */
10577 RTUINT64U NewRsp = *pTmpRsp;
10578 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10579
10580 /* Write the word the lazy way. */
10581 uint32_t const *pu32Src;
10582 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10583 if (rc == VINF_SUCCESS)
10584 {
10585 *pu32Value = *pu32Src;
10586 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10587
10588 /* Commit the new RSP value. */
10589 if (rc == VINF_SUCCESS)
10590 *pTmpRsp = NewRsp;
10591 }
10592
10593 return rc;
10594}
10595
10596
10597/**
10598 * Pops a qword from the stack, using a temporary stack pointer.
10599 *
10600 * @returns Strict VBox status code.
10601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10602 * @param pu64Value Where to store the popped value.
10603 * @param pTmpRsp Pointer to the temporary stack pointer.
10604 */
10605IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10606{
10607 /* Increment the stack pointer. */
10608 RTUINT64U NewRsp = *pTmpRsp;
10609 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10610
10611 /* Write the word the lazy way. */
10612 uint64_t const *pu64Src;
10613 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10614 if (rcStrict == VINF_SUCCESS)
10615 {
10616 *pu64Value = *pu64Src;
10617 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10618
10619 /* Commit the new RSP value. */
10620 if (rcStrict == VINF_SUCCESS)
10621 *pTmpRsp = NewRsp;
10622 }
10623
10624 return rcStrict;
10625}
10626
10627
10628/**
10629 * Begin a special stack push (used by interrupt, exceptions and such).
10630 *
10631 * This will raise \#SS or \#PF if appropriate.
10632 *
10633 * @returns Strict VBox status code.
10634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10635 * @param cbMem The number of bytes to push onto the stack.
10636 * @param ppvMem Where to return the pointer to the stack memory.
10637 * As with the other memory functions this could be
10638 * direct access or bounce buffered access, so
10639 * don't commit register until the commit call
10640 * succeeds.
10641 * @param puNewRsp Where to return the new RSP value. This must be
10642 * passed unchanged to
10643 * iemMemStackPushCommitSpecial().
10644 */
10645IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10646{
10647 Assert(cbMem < UINT8_MAX);
10648 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10649 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10650}
10651
10652
10653/**
10654 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10655 *
10656 * This will update the rSP.
10657 *
10658 * @returns Strict VBox status code.
10659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10660 * @param pvMem The pointer returned by
10661 * iemMemStackPushBeginSpecial().
10662 * @param uNewRsp The new RSP value returned by
10663 * iemMemStackPushBeginSpecial().
10664 */
10665IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
10666{
10667 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10668 if (rcStrict == VINF_SUCCESS)
10669 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10670 return rcStrict;
10671}
10672
10673
10674/**
10675 * Begin a special stack pop (used by iret, retf and such).
10676 *
10677 * This will raise \#SS or \#PF if appropriate.
10678 *
10679 * @returns Strict VBox status code.
10680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10681 * @param cbMem The number of bytes to pop from the stack.
10682 * @param ppvMem Where to return the pointer to the stack memory.
10683 * @param puNewRsp Where to return the new RSP value. This must be
10684 * assigned to CPUMCTX::rsp manually some time
10685 * after iemMemStackPopDoneSpecial() has been
10686 * called.
10687 */
10688IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10689{
10690 Assert(cbMem < UINT8_MAX);
10691 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10692 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10693}
10694
10695
10696/**
10697 * Continue a special stack pop (used by iret and retf).
10698 *
10699 * This will raise \#SS or \#PF if appropriate.
10700 *
10701 * @returns Strict VBox status code.
10702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10703 * @param cbMem The number of bytes to pop from the stack.
10704 * @param ppvMem Where to return the pointer to the stack memory.
10705 * @param puNewRsp Where to return the new RSP value. This must be
10706 * assigned to CPUMCTX::rsp manually some time
10707 * after iemMemStackPopDoneSpecial() has been
10708 * called.
10709 */
10710IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10711{
10712 Assert(cbMem < UINT8_MAX);
10713 RTUINT64U NewRsp;
10714 NewRsp.u = *puNewRsp;
10715 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10716 *puNewRsp = NewRsp.u;
10717 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10718}
10719
10720
10721/**
10722 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10723 * iemMemStackPopContinueSpecial).
10724 *
10725 * The caller will manually commit the rSP.
10726 *
10727 * @returns Strict VBox status code.
10728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10729 * @param pvMem The pointer returned by
10730 * iemMemStackPopBeginSpecial() or
10731 * iemMemStackPopContinueSpecial().
10732 */
10733IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
10734{
10735 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10736}
10737
10738
10739/**
10740 * Fetches a system table byte.
10741 *
10742 * @returns Strict VBox status code.
10743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10744 * @param pbDst Where to return the byte.
10745 * @param iSegReg The index of the segment register to use for
10746 * this access. The base and limits are checked.
10747 * @param GCPtrMem The address of the guest memory.
10748 */
10749IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10750{
10751 /* The lazy approach for now... */
10752 uint8_t const *pbSrc;
10753 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10754 if (rc == VINF_SUCCESS)
10755 {
10756 *pbDst = *pbSrc;
10757 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10758 }
10759 return rc;
10760}
10761
10762
10763/**
10764 * Fetches a system table word.
10765 *
10766 * @returns Strict VBox status code.
10767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10768 * @param pu16Dst Where to return the word.
10769 * @param iSegReg The index of the segment register to use for
10770 * this access. The base and limits are checked.
10771 * @param GCPtrMem The address of the guest memory.
10772 */
10773IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10774{
10775 /* The lazy approach for now... */
10776 uint16_t const *pu16Src;
10777 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10778 if (rc == VINF_SUCCESS)
10779 {
10780 *pu16Dst = *pu16Src;
10781 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10782 }
10783 return rc;
10784}
10785
10786
10787/**
10788 * Fetches a system table dword.
10789 *
10790 * @returns Strict VBox status code.
10791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10792 * @param pu32Dst Where to return the dword.
10793 * @param iSegReg The index of the segment register to use for
10794 * this access. The base and limits are checked.
10795 * @param GCPtrMem The address of the guest memory.
10796 */
10797IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10798{
10799 /* The lazy approach for now... */
10800 uint32_t const *pu32Src;
10801 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10802 if (rc == VINF_SUCCESS)
10803 {
10804 *pu32Dst = *pu32Src;
10805 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10806 }
10807 return rc;
10808}
10809
10810
10811/**
10812 * Fetches a system table qword.
10813 *
10814 * @returns Strict VBox status code.
10815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10816 * @param pu64Dst Where to return the qword.
10817 * @param iSegReg The index of the segment register to use for
10818 * this access. The base and limits are checked.
10819 * @param GCPtrMem The address of the guest memory.
10820 */
10821IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10822{
10823 /* The lazy approach for now... */
10824 uint64_t const *pu64Src;
10825 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10826 if (rc == VINF_SUCCESS)
10827 {
10828 *pu64Dst = *pu64Src;
10829 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10830 }
10831 return rc;
10832}
10833
10834
10835/**
10836 * Fetches a descriptor table entry with caller specified error code.
10837 *
10838 * @returns Strict VBox status code.
10839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10840 * @param pDesc Where to return the descriptor table entry.
10841 * @param uSel The selector which table entry to fetch.
10842 * @param uXcpt The exception to raise on table lookup error.
10843 * @param uErrorCode The error code associated with the exception.
10844 */
10845IEM_STATIC VBOXSTRICTRC
10846iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10847{
10848 AssertPtr(pDesc);
10849 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10850
10851 /** @todo did the 286 require all 8 bytes to be accessible? */
10852 /*
10853 * Get the selector table base and check bounds.
10854 */
10855 RTGCPTR GCPtrBase;
10856 if (uSel & X86_SEL_LDT)
10857 {
10858 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10859 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10860 {
10861 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10862 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10863 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10864 uErrorCode, 0);
10865 }
10866
10867 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10868 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10869 }
10870 else
10871 {
10872 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10873 {
10874 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10875 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10876 uErrorCode, 0);
10877 }
10878 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10879 }
10880
10881 /*
10882 * Read the legacy descriptor and maybe the long mode extensions if
10883 * required.
10884 */
10885 VBOXSTRICTRC rcStrict;
10886 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10887 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10888 else
10889 {
10890 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10891 if (rcStrict == VINF_SUCCESS)
10892 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10893 if (rcStrict == VINF_SUCCESS)
10894 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10895 if (rcStrict == VINF_SUCCESS)
10896 pDesc->Legacy.au16[3] = 0;
10897 else
10898 return rcStrict;
10899 }
10900
10901 if (rcStrict == VINF_SUCCESS)
10902 {
10903 if ( !IEM_IS_LONG_MODE(pVCpu)
10904 || pDesc->Legacy.Gen.u1DescType)
10905 pDesc->Long.au64[1] = 0;
10906 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10907 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10908 else
10909 {
10910 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10911 /** @todo is this the right exception? */
10912 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10913 }
10914 }
10915 return rcStrict;
10916}
10917
10918
10919/**
10920 * Fetches a descriptor table entry.
10921 *
10922 * @returns Strict VBox status code.
10923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10924 * @param pDesc Where to return the descriptor table entry.
10925 * @param uSel The selector which table entry to fetch.
10926 * @param uXcpt The exception to raise on table lookup error.
10927 */
10928IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10929{
10930 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10931}
10932
10933
10934/**
10935 * Fakes a long mode stack selector for SS = 0.
10936 *
10937 * @param pDescSs Where to return the fake stack descriptor.
10938 * @param uDpl The DPL we want.
10939 */
10940IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10941{
10942 pDescSs->Long.au64[0] = 0;
10943 pDescSs->Long.au64[1] = 0;
10944 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10945 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10946 pDescSs->Long.Gen.u2Dpl = uDpl;
10947 pDescSs->Long.Gen.u1Present = 1;
10948 pDescSs->Long.Gen.u1Long = 1;
10949}
10950
10951
10952/**
10953 * Marks the selector descriptor as accessed (only non-system descriptors).
10954 *
10955 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10956 * will therefore skip the limit checks.
10957 *
10958 * @returns Strict VBox status code.
10959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10960 * @param uSel The selector.
10961 */
10962IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
10963{
10964 /*
10965 * Get the selector table base and calculate the entry address.
10966 */
10967 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10968 ? pVCpu->cpum.GstCtx.ldtr.u64Base
10969 : pVCpu->cpum.GstCtx.gdtr.pGdt;
10970 GCPtr += uSel & X86_SEL_MASK;
10971
10972 /*
10973 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10974 * ugly stuff to avoid this. This will make sure it's an atomic access
10975 * as well more or less remove any question about 8-bit or 32-bit accesss.
10976 */
10977 VBOXSTRICTRC rcStrict;
10978 uint32_t volatile *pu32;
10979 if ((GCPtr & 3) == 0)
10980 {
10981 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10982 GCPtr += 2 + 2;
10983 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10984 if (rcStrict != VINF_SUCCESS)
10985 return rcStrict;
10986 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10987 }
10988 else
10989 {
10990 /* The misaligned GDT/LDT case, map the whole thing. */
10991 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10992 if (rcStrict != VINF_SUCCESS)
10993 return rcStrict;
10994 switch ((uintptr_t)pu32 & 3)
10995 {
10996 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10997 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10998 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10999 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11000 }
11001 }
11002
11003 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11004}
11005
11006/** @} */
11007
11008
11009/*
11010 * Include the C/C++ implementation of instruction.
11011 */
11012#include "IEMAllCImpl.cpp.h"
11013
11014
11015
11016/** @name "Microcode" macros.
11017 *
11018 * The idea is that we should be able to use the same code to interpret
11019 * instructions as well as recompiler instructions. Thus this obfuscation.
11020 *
11021 * @{
11022 */
11023#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11024#define IEM_MC_END() }
11025#define IEM_MC_PAUSE() do {} while (0)
11026#define IEM_MC_CONTINUE() do {} while (0)
11027
11028/** Internal macro. */
11029#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11030 do \
11031 { \
11032 VBOXSTRICTRC rcStrict2 = a_Expr; \
11033 if (rcStrict2 != VINF_SUCCESS) \
11034 return rcStrict2; \
11035 } while (0)
11036
11037
11038#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11039#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11040#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11041#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11042#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11043#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11044#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11045#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11046#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11047 do { \
11048 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11049 return iemRaiseDeviceNotAvailable(pVCpu); \
11050 } while (0)
11051#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11052 do { \
11053 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11054 return iemRaiseDeviceNotAvailable(pVCpu); \
11055 } while (0)
11056#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11057 do { \
11058 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11059 return iemRaiseMathFault(pVCpu); \
11060 } while (0)
11061#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11062 do { \
11063 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11064 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11065 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11066 return iemRaiseUndefinedOpcode(pVCpu); \
11067 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11068 return iemRaiseDeviceNotAvailable(pVCpu); \
11069 } while (0)
11070#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11071 do { \
11072 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11073 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11074 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11075 return iemRaiseUndefinedOpcode(pVCpu); \
11076 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11077 return iemRaiseDeviceNotAvailable(pVCpu); \
11078 } while (0)
11079#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11080 do { \
11081 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11082 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11083 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11084 return iemRaiseUndefinedOpcode(pVCpu); \
11085 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11086 return iemRaiseDeviceNotAvailable(pVCpu); \
11087 } while (0)
11088#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11089 do { \
11090 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11091 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11092 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11093 return iemRaiseUndefinedOpcode(pVCpu); \
11094 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11095 return iemRaiseDeviceNotAvailable(pVCpu); \
11096 } while (0)
11097#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11098 do { \
11099 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11100 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11101 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11102 return iemRaiseUndefinedOpcode(pVCpu); \
11103 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11104 return iemRaiseDeviceNotAvailable(pVCpu); \
11105 } while (0)
11106#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11107 do { \
11108 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11109 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11110 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11111 return iemRaiseUndefinedOpcode(pVCpu); \
11112 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11113 return iemRaiseDeviceNotAvailable(pVCpu); \
11114 } while (0)
11115#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11116 do { \
11117 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11118 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11119 return iemRaiseUndefinedOpcode(pVCpu); \
11120 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11121 return iemRaiseDeviceNotAvailable(pVCpu); \
11122 } while (0)
11123#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11124 do { \
11125 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11126 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11127 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11128 return iemRaiseUndefinedOpcode(pVCpu); \
11129 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11130 return iemRaiseDeviceNotAvailable(pVCpu); \
11131 } while (0)
11132#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11133 do { \
11134 if (pVCpu->iem.s.uCpl != 0) \
11135 return iemRaiseGeneralProtectionFault0(pVCpu); \
11136 } while (0)
11137#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11138 do { \
11139 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11140 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11141 } while (0)
11142#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11143 do { \
11144 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11145 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11146 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11147 return iemRaiseUndefinedOpcode(pVCpu); \
11148 } while (0)
11149#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11150 do { \
11151 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11152 return iemRaiseGeneralProtectionFault0(pVCpu); \
11153 } while (0)
11154
11155
11156#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11157#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11158#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11159#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11160#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11161#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11162#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11163 uint32_t a_Name; \
11164 uint32_t *a_pName = &a_Name
11165#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11166 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11167
11168#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11169#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11170
11171#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11172#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11173#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11174#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11175#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11176#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11177#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11178#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11179#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11180#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11181#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11182#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11183#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11184#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11185#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11186#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11187#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11188#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11189 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11190 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11191 } while (0)
11192#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11193 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11194 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11195 } while (0)
11196#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11197 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11198 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11199 } while (0)
11200/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11201#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11202 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11203 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11204 } while (0)
11205#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11206 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11207 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11208 } while (0)
11209/** @note Not for IOPL or IF testing or modification. */
11210#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11211#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11212#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11213#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11214
11215#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11216#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11217#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11218#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11219#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11220#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11221#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11222#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11223#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11224#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11225/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11226#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11227 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11228 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11229 } while (0)
11230#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11231 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11232 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11233 } while (0)
11234#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11235 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11236
11237
11238#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11239#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11240/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11241 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11242#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11243#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11244/** @note Not for IOPL or IF testing or modification. */
11245#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11246
11247#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11248#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11249#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11250 do { \
11251 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11252 *pu32Reg += (a_u32Value); \
11253 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11254 } while (0)
11255#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11256
11257#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11258#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11259#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11260 do { \
11261 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11262 *pu32Reg -= (a_u32Value); \
11263 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11264 } while (0)
11265#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11266#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11267
11268#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11269#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11270#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11271#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11272#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11273#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11274#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11275
11276#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11277#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11278#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11279#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11280
11281#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11282#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11283#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11284
11285#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11286#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11287#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11288
11289#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11290#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11291#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11292
11293#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11294#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11295#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11296
11297#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11298
11299#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11300
11301#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11302#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11303#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11304 do { \
11305 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11306 *pu32Reg &= (a_u32Value); \
11307 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11308 } while (0)
11309#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11310
11311#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11312#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11313#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11314 do { \
11315 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11316 *pu32Reg |= (a_u32Value); \
11317 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11318 } while (0)
11319#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11320
11321
11322/** @note Not for IOPL or IF modification. */
11323#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11324/** @note Not for IOPL or IF modification. */
11325#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11326/** @note Not for IOPL or IF modification. */
11327#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11328
11329#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11330
11331/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11332#define IEM_MC_FPU_TO_MMX_MODE() do { \
11333 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11334 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11335 } while (0)
11336
11337/** Switches the FPU state from MMX mode (FTW=0xffff). */
11338#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11339 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11340 } while (0)
11341
11342#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11343 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11344#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11345 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11346#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11347 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11348 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11349 } while (0)
11350#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11351 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11352 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11353 } while (0)
11354#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11355 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11356#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11357 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11358#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11359 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11360
11361#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11362 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11363 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11364 } while (0)
11365#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11366 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11367#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11368 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11369#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11370 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11371#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11372 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11373 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11374 } while (0)
11375#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11376 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11377#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11378 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11379 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11380 } while (0)
11381#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11382 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11383#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11384 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11385 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11386 } while (0)
11387#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11388 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11389#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11390 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11391#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11392 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11393#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11394 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11395#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11396 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11397 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11398 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11399 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11400 } while (0)
11401
11402#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11403 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11404 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11405 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11406 } while (0)
11407#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11408 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11409 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11410 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11411 } while (0)
11412#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11413 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11414 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11415 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11416 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11417 } while (0)
11418#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11419 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11420 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11421 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11422 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11423 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11424 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11425 } while (0)
11426
11427#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11428#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11429 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11430 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11431 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11432 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11433 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11434 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11435 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11436 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11437 } while (0)
11438#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11439 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11440 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11441 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11442 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11443 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11444 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11445 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11446 } while (0)
11447#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11448 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11449 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11450 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11451 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11452 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11453 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11454 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11455 } while (0)
11456#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11457 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11458 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11459 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11460 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11461 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11462 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11463 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11464 } while (0)
11465
11466#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11467 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11468#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11469 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11470#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11471 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11472#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11473 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11474 uintptr_t const iYRegTmp = (a_iYReg); \
11475 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11476 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11477 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11478 } while (0)
11479
11480#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11481 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11482 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11483 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11484 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11485 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11486 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11487 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11488 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11489 } while (0)
11490#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11491 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11492 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11493 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11494 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11495 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11496 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11497 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11498 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11499 } while (0)
11500#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11501 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11502 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11503 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11504 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11505 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11506 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11507 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11508 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11509 } while (0)
11510
11511#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11512 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11513 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11514 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11515 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11516 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11517 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11518 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11519 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11520 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11521 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11522 } while (0)
11523#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11524 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11525 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11526 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11527 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11528 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11529 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11530 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11531 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11532 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11533 } while (0)
11534#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11535 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11536 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11537 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11538 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11539 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11540 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11541 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11542 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11543 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11544 } while (0)
11545#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11546 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11547 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11548 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11549 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11550 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11551 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11552 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11553 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11554 } while (0)
11555
11556#ifndef IEM_WITH_SETJMP
11557# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11558 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11559# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11560 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11561# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11562 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11563#else
11564# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11565 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11566# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11567 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11568# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11569 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11570#endif
11571
11572#ifndef IEM_WITH_SETJMP
11573# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11574 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11575# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11576 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11577# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11578 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11579#else
11580# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11581 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11582# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11583 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11584# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11585 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11586#endif
11587
11588#ifndef IEM_WITH_SETJMP
11589# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11590 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11591# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11592 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11593# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11594 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11595#else
11596# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11597 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11598# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11599 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11600# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11601 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11602#endif
11603
11604#ifdef SOME_UNUSED_FUNCTION
11605# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11606 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11607#endif
11608
11609#ifndef IEM_WITH_SETJMP
11610# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11611 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11612# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11613 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11614# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11615 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11616# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11617 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11618#else
11619# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11620 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11621# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11622 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11623# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11624 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11625# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11626 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11627#endif
11628
11629#ifndef IEM_WITH_SETJMP
11630# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11631 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11632# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11633 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11634# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11635 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11636#else
11637# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11638 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11639# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11640 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11641# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11642 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11643#endif
11644
11645#ifndef IEM_WITH_SETJMP
11646# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11647 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11648# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11650#else
11651# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11652 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11653# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11654 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11655#endif
11656
11657#ifndef IEM_WITH_SETJMP
11658# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11659 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11660# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11662#else
11663# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11664 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11665# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11666 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11667#endif
11668
11669
11670
11671#ifndef IEM_WITH_SETJMP
11672# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11673 do { \
11674 uint8_t u8Tmp; \
11675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11676 (a_u16Dst) = u8Tmp; \
11677 } while (0)
11678# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11679 do { \
11680 uint8_t u8Tmp; \
11681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11682 (a_u32Dst) = u8Tmp; \
11683 } while (0)
11684# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11685 do { \
11686 uint8_t u8Tmp; \
11687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11688 (a_u64Dst) = u8Tmp; \
11689 } while (0)
11690# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11691 do { \
11692 uint16_t u16Tmp; \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11694 (a_u32Dst) = u16Tmp; \
11695 } while (0)
11696# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11697 do { \
11698 uint16_t u16Tmp; \
11699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11700 (a_u64Dst) = u16Tmp; \
11701 } while (0)
11702# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11703 do { \
11704 uint32_t u32Tmp; \
11705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11706 (a_u64Dst) = u32Tmp; \
11707 } while (0)
11708#else /* IEM_WITH_SETJMP */
11709# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11710 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11711# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11712 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11713# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11714 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11715# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11716 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11717# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11718 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11719# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11720 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11721#endif /* IEM_WITH_SETJMP */
11722
11723#ifndef IEM_WITH_SETJMP
11724# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11725 do { \
11726 uint8_t u8Tmp; \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11728 (a_u16Dst) = (int8_t)u8Tmp; \
11729 } while (0)
11730# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11731 do { \
11732 uint8_t u8Tmp; \
11733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11734 (a_u32Dst) = (int8_t)u8Tmp; \
11735 } while (0)
11736# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11737 do { \
11738 uint8_t u8Tmp; \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11740 (a_u64Dst) = (int8_t)u8Tmp; \
11741 } while (0)
11742# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11743 do { \
11744 uint16_t u16Tmp; \
11745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11746 (a_u32Dst) = (int16_t)u16Tmp; \
11747 } while (0)
11748# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11749 do { \
11750 uint16_t u16Tmp; \
11751 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11752 (a_u64Dst) = (int16_t)u16Tmp; \
11753 } while (0)
11754# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11755 do { \
11756 uint32_t u32Tmp; \
11757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11758 (a_u64Dst) = (int32_t)u32Tmp; \
11759 } while (0)
11760#else /* IEM_WITH_SETJMP */
11761# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11762 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11763# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11764 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11765# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11766 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11767# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11768 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11769# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11770 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11771# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11772 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11773#endif /* IEM_WITH_SETJMP */
11774
11775#ifndef IEM_WITH_SETJMP
11776# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11777 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11778# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11779 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11780# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11781 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11782# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11784#else
11785# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11786 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11787# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11788 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11789# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11790 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11791# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11792 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11793#endif
11794
11795#ifndef IEM_WITH_SETJMP
11796# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11797 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11798# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11799 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11800# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11801 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11802# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11803 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11804#else
11805# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11806 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11807# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11808 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11809# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11810 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11811# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11812 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11813#endif
11814
11815#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11816#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11817#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11818#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11819#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11820#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11821#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11822 do { \
11823 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11824 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11825 } while (0)
11826
11827#ifndef IEM_WITH_SETJMP
11828# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11829 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11830# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11831 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11832#else
11833# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11834 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11835# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11836 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11837#endif
11838
11839#ifndef IEM_WITH_SETJMP
11840# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11841 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11842# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11843 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11844#else
11845# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11846 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11847# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11848 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11849#endif
11850
11851
11852#define IEM_MC_PUSH_U16(a_u16Value) \
11853 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11854#define IEM_MC_PUSH_U32(a_u32Value) \
11855 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11856#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11857 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11858#define IEM_MC_PUSH_U64(a_u64Value) \
11859 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11860
11861#define IEM_MC_POP_U16(a_pu16Value) \
11862 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11863#define IEM_MC_POP_U32(a_pu32Value) \
11864 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11865#define IEM_MC_POP_U64(a_pu64Value) \
11866 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11867
11868/** Maps guest memory for direct or bounce buffered access.
11869 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11870 * @remarks May return.
11871 */
11872#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11873 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11874
11875/** Maps guest memory for direct or bounce buffered access.
11876 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11877 * @remarks May return.
11878 */
11879#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11880 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11881
11882/** Commits the memory and unmaps the guest memory.
11883 * @remarks May return.
11884 */
11885#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11886 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11887
11888/** Commits the memory and unmaps the guest memory unless the FPU status word
11889 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11890 * that would cause FLD not to store.
11891 *
11892 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11893 * store, while \#P will not.
11894 *
11895 * @remarks May in theory return - for now.
11896 */
11897#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11898 do { \
11899 if ( !(a_u16FSW & X86_FSW_ES) \
11900 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11901 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11902 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11903 } while (0)
11904
11905/** Calculate efficient address from R/M. */
11906#ifndef IEM_WITH_SETJMP
11907# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11908 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11909#else
11910# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11911 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11912#endif
11913
11914#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11915#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11916#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11917#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11918#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11919#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11920#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11921
11922/**
11923 * Defers the rest of the instruction emulation to a C implementation routine
11924 * and returns, only taking the standard parameters.
11925 *
11926 * @param a_pfnCImpl The pointer to the C routine.
11927 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11928 */
11929#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11930
11931/**
11932 * Defers the rest of instruction emulation to a C implementation routine and
11933 * returns, taking one argument in addition to the standard ones.
11934 *
11935 * @param a_pfnCImpl The pointer to the C routine.
11936 * @param a0 The argument.
11937 */
11938#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11939
11940/**
11941 * Defers the rest of the instruction emulation to a C implementation routine
11942 * and returns, taking two arguments in addition to the standard ones.
11943 *
11944 * @param a_pfnCImpl The pointer to the C routine.
11945 * @param a0 The first extra argument.
11946 * @param a1 The second extra argument.
11947 */
11948#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11949
11950/**
11951 * Defers the rest of the instruction emulation to a C implementation routine
11952 * and returns, taking three arguments in addition to the standard ones.
11953 *
11954 * @param a_pfnCImpl The pointer to the C routine.
11955 * @param a0 The first extra argument.
11956 * @param a1 The second extra argument.
11957 * @param a2 The third extra argument.
11958 */
11959#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11960
11961/**
11962 * Defers the rest of the instruction emulation to a C implementation routine
11963 * and returns, taking four arguments in addition to the standard ones.
11964 *
11965 * @param a_pfnCImpl The pointer to the C routine.
11966 * @param a0 The first extra argument.
11967 * @param a1 The second extra argument.
11968 * @param a2 The third extra argument.
11969 * @param a3 The fourth extra argument.
11970 */
11971#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11972
11973/**
11974 * Defers the rest of the instruction emulation to a C implementation routine
11975 * and returns, taking two arguments in addition to the standard ones.
11976 *
11977 * @param a_pfnCImpl The pointer to the C routine.
11978 * @param a0 The first extra argument.
11979 * @param a1 The second extra argument.
11980 * @param a2 The third extra argument.
11981 * @param a3 The fourth extra argument.
11982 * @param a4 The fifth extra argument.
11983 */
11984#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11985
11986/**
11987 * Defers the entire instruction emulation to a C implementation routine and
11988 * returns, only taking the standard parameters.
11989 *
11990 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11991 *
11992 * @param a_pfnCImpl The pointer to the C routine.
11993 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11994 */
11995#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11996
11997/**
11998 * Defers the entire instruction emulation to a C implementation routine and
11999 * returns, taking one argument in addition to the standard ones.
12000 *
12001 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12002 *
12003 * @param a_pfnCImpl The pointer to the C routine.
12004 * @param a0 The argument.
12005 */
12006#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12007
12008/**
12009 * Defers the entire instruction emulation to a C implementation routine and
12010 * returns, taking two arguments in addition to the standard ones.
12011 *
12012 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12013 *
12014 * @param a_pfnCImpl The pointer to the C routine.
12015 * @param a0 The first extra argument.
12016 * @param a1 The second extra argument.
12017 */
12018#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12019
12020/**
12021 * Defers the entire instruction emulation to a C implementation routine and
12022 * returns, taking three arguments in addition to the standard ones.
12023 *
12024 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12025 *
12026 * @param a_pfnCImpl The pointer to the C routine.
12027 * @param a0 The first extra argument.
12028 * @param a1 The second extra argument.
12029 * @param a2 The third extra argument.
12030 */
12031#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12032
12033/**
12034 * Calls a FPU assembly implementation taking one visible argument.
12035 *
12036 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12037 * @param a0 The first extra argument.
12038 */
12039#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12040 do { \
12041 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12042 } while (0)
12043
12044/**
12045 * Calls a FPU assembly implementation taking two visible arguments.
12046 *
12047 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12048 * @param a0 The first extra argument.
12049 * @param a1 The second extra argument.
12050 */
12051#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12052 do { \
12053 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12054 } while (0)
12055
12056/**
12057 * Calls a FPU assembly implementation taking three visible arguments.
12058 *
12059 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12060 * @param a0 The first extra argument.
12061 * @param a1 The second extra argument.
12062 * @param a2 The third extra argument.
12063 */
12064#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12065 do { \
12066 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12067 } while (0)
12068
12069#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12070 do { \
12071 (a_FpuData).FSW = (a_FSW); \
12072 (a_FpuData).r80Result = *(a_pr80Value); \
12073 } while (0)
12074
12075/** Pushes FPU result onto the stack. */
12076#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12077 iemFpuPushResult(pVCpu, &a_FpuData)
12078/** Pushes FPU result onto the stack and sets the FPUDP. */
12079#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12080 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12081
12082/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12083#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12084 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12085
12086/** Stores FPU result in a stack register. */
12087#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12088 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12089/** Stores FPU result in a stack register and pops the stack. */
12090#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12091 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12092/** Stores FPU result in a stack register and sets the FPUDP. */
12093#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12094 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12095/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12096 * stack. */
12097#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12098 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12099
12100/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12101#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12102 iemFpuUpdateOpcodeAndIp(pVCpu)
12103/** Free a stack register (for FFREE and FFREEP). */
12104#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12105 iemFpuStackFree(pVCpu, a_iStReg)
12106/** Increment the FPU stack pointer. */
12107#define IEM_MC_FPU_STACK_INC_TOP() \
12108 iemFpuStackIncTop(pVCpu)
12109/** Decrement the FPU stack pointer. */
12110#define IEM_MC_FPU_STACK_DEC_TOP() \
12111 iemFpuStackDecTop(pVCpu)
12112
12113/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12114#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12115 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12116/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12117#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12118 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12119/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12120#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12121 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12122/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12123#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12124 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12125/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12126 * stack. */
12127#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12128 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12129/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12130#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12131 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12132
12133/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12134#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12135 iemFpuStackUnderflow(pVCpu, a_iStDst)
12136/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12137 * stack. */
12138#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12139 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12140/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12141 * FPUDS. */
12142#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12143 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12144/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12145 * FPUDS. Pops stack. */
12146#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12147 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12148/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12149 * stack twice. */
12150#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12151 iemFpuStackUnderflowThenPopPop(pVCpu)
12152/** Raises a FPU stack underflow exception for an instruction pushing a result
12153 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12154#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12155 iemFpuStackPushUnderflow(pVCpu)
12156/** Raises a FPU stack underflow exception for an instruction pushing a result
12157 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12158#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12159 iemFpuStackPushUnderflowTwo(pVCpu)
12160
12161/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12162 * FPUIP, FPUCS and FOP. */
12163#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12164 iemFpuStackPushOverflow(pVCpu)
12165/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12166 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12167#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12168 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12169/** Prepares for using the FPU state.
12170 * Ensures that we can use the host FPU in the current context (RC+R0.
12171 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12172#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12173/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12174#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12175/** Actualizes the guest FPU state so it can be accessed and modified. */
12176#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12177
12178/** Prepares for using the SSE state.
12179 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12180 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12181#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12182/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12183#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12184/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12185#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12186
12187/** Prepares for using the AVX state.
12188 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12189 * Ensures the guest AVX state in the CPUMCTX is up to date.
12190 * @note This will include the AVX512 state too when support for it is added
12191 * due to the zero extending feature of VEX instruction. */
12192#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12193/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12194#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12195/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12196#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12197
12198/**
12199 * Calls a MMX assembly implementation taking two visible arguments.
12200 *
12201 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12202 * @param a0 The first extra argument.
12203 * @param a1 The second extra argument.
12204 */
12205#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12206 do { \
12207 IEM_MC_PREPARE_FPU_USAGE(); \
12208 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12209 } while (0)
12210
12211/**
12212 * Calls a MMX assembly implementation taking three visible arguments.
12213 *
12214 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12215 * @param a0 The first extra argument.
12216 * @param a1 The second extra argument.
12217 * @param a2 The third extra argument.
12218 */
12219#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12220 do { \
12221 IEM_MC_PREPARE_FPU_USAGE(); \
12222 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12223 } while (0)
12224
12225
12226/**
12227 * Calls a SSE assembly implementation taking two visible arguments.
12228 *
12229 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12230 * @param a0 The first extra argument.
12231 * @param a1 The second extra argument.
12232 */
12233#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12234 do { \
12235 IEM_MC_PREPARE_SSE_USAGE(); \
12236 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12237 } while (0)
12238
12239/**
12240 * Calls a SSE assembly implementation taking three visible arguments.
12241 *
12242 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12243 * @param a0 The first extra argument.
12244 * @param a1 The second extra argument.
12245 * @param a2 The third extra argument.
12246 */
12247#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12248 do { \
12249 IEM_MC_PREPARE_SSE_USAGE(); \
12250 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12251 } while (0)
12252
12253
12254/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12255 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12256#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12257 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12258
12259/**
12260 * Calls a AVX assembly implementation taking two visible arguments.
12261 *
12262 * There is one implicit zero'th argument, a pointer to the extended state.
12263 *
12264 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12265 * @param a1 The first extra argument.
12266 * @param a2 The second extra argument.
12267 */
12268#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12269 do { \
12270 IEM_MC_PREPARE_AVX_USAGE(); \
12271 a_pfnAImpl(pXState, (a1), (a2)); \
12272 } while (0)
12273
12274/**
12275 * Calls a AVX assembly implementation taking three visible arguments.
12276 *
12277 * There is one implicit zero'th argument, a pointer to the extended state.
12278 *
12279 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12280 * @param a1 The first extra argument.
12281 * @param a2 The second extra argument.
12282 * @param a3 The third extra argument.
12283 */
12284#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12285 do { \
12286 IEM_MC_PREPARE_AVX_USAGE(); \
12287 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12288 } while (0)
12289
12290/** @note Not for IOPL or IF testing. */
12291#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12292/** @note Not for IOPL or IF testing. */
12293#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12294/** @note Not for IOPL or IF testing. */
12295#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12296/** @note Not for IOPL or IF testing. */
12297#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12298/** @note Not for IOPL or IF testing. */
12299#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12300 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12301 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12302/** @note Not for IOPL or IF testing. */
12303#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12304 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12305 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12306/** @note Not for IOPL or IF testing. */
12307#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12308 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12309 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12310 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12311/** @note Not for IOPL or IF testing. */
12312#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12313 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12314 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12315 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12316#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12317#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12318#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12319/** @note Not for IOPL or IF testing. */
12320#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12321 if ( pVCpu->cpum.GstCtx.cx != 0 \
12322 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12323/** @note Not for IOPL or IF testing. */
12324#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12325 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12326 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12327/** @note Not for IOPL or IF testing. */
12328#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12329 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12330 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12331/** @note Not for IOPL or IF testing. */
12332#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12333 if ( pVCpu->cpum.GstCtx.cx != 0 \
12334 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12335/** @note Not for IOPL or IF testing. */
12336#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12337 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12338 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12339/** @note Not for IOPL or IF testing. */
12340#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12341 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12342 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12343#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12344#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12345
12346#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12347 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12348#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12349 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12350#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12351 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12352#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12353 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12354#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12355 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12356#define IEM_MC_IF_FCW_IM() \
12357 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12358
12359#define IEM_MC_ELSE() } else {
12360#define IEM_MC_ENDIF() } do {} while (0)
12361
12362/** @} */
12363
12364
12365/** @name Opcode Debug Helpers.
12366 * @{
12367 */
12368#ifdef VBOX_WITH_STATISTICS
12369# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12370#else
12371# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12372#endif
12373
12374#ifdef DEBUG
12375# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12376 do { \
12377 IEMOP_INC_STATS(a_Stats); \
12378 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12379 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12380 } while (0)
12381
12382# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12383 do { \
12384 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12385 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12386 (void)RT_CONCAT(OP_,a_Upper); \
12387 (void)(a_fDisHints); \
12388 (void)(a_fIemHints); \
12389 } while (0)
12390
12391# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12392 do { \
12393 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12394 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12395 (void)RT_CONCAT(OP_,a_Upper); \
12396 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12397 (void)(a_fDisHints); \
12398 (void)(a_fIemHints); \
12399 } while (0)
12400
12401# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12402 do { \
12403 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12404 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12405 (void)RT_CONCAT(OP_,a_Upper); \
12406 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12407 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12408 (void)(a_fDisHints); \
12409 (void)(a_fIemHints); \
12410 } while (0)
12411
12412# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12413 do { \
12414 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12415 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12416 (void)RT_CONCAT(OP_,a_Upper); \
12417 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12418 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12419 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12420 (void)(a_fDisHints); \
12421 (void)(a_fIemHints); \
12422 } while (0)
12423
12424# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12425 do { \
12426 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12427 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12428 (void)RT_CONCAT(OP_,a_Upper); \
12429 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12430 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12431 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12432 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12433 (void)(a_fDisHints); \
12434 (void)(a_fIemHints); \
12435 } while (0)
12436
12437#else
12438# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12439
12440# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12441 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12442# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12443 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12444# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12445 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12446# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12447 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12448# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12449 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12450
12451#endif
12452
12453#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12454 IEMOP_MNEMONIC0EX(a_Lower, \
12455 #a_Lower, \
12456 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12457#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12458 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12459 #a_Lower " " #a_Op1, \
12460 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12461#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12462 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12463 #a_Lower " " #a_Op1 "," #a_Op2, \
12464 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12465#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12466 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12467 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12468 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12469#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12470 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12471 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12472 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12473
12474/** @} */
12475
12476
12477/** @name Opcode Helpers.
12478 * @{
12479 */
12480
12481#ifdef IN_RING3
12482# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12483 do { \
12484 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12485 else \
12486 { \
12487 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12488 return IEMOP_RAISE_INVALID_OPCODE(); \
12489 } \
12490 } while (0)
12491#else
12492# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12493 do { \
12494 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12495 else return IEMOP_RAISE_INVALID_OPCODE(); \
12496 } while (0)
12497#endif
12498
12499/** The instruction requires a 186 or later. */
12500#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12501# define IEMOP_HLP_MIN_186() do { } while (0)
12502#else
12503# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12504#endif
12505
12506/** The instruction requires a 286 or later. */
12507#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12508# define IEMOP_HLP_MIN_286() do { } while (0)
12509#else
12510# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12511#endif
12512
12513/** The instruction requires a 386 or later. */
12514#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12515# define IEMOP_HLP_MIN_386() do { } while (0)
12516#else
12517# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12518#endif
12519
12520/** The instruction requires a 386 or later if the given expression is true. */
12521#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12522# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12523#else
12524# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12525#endif
12526
12527/** The instruction requires a 486 or later. */
12528#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12529# define IEMOP_HLP_MIN_486() do { } while (0)
12530#else
12531# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12532#endif
12533
12534/** The instruction requires a Pentium (586) or later. */
12535#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12536# define IEMOP_HLP_MIN_586() do { } while (0)
12537#else
12538# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12539#endif
12540
12541/** The instruction requires a PentiumPro (686) or later. */
12542#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12543# define IEMOP_HLP_MIN_686() do { } while (0)
12544#else
12545# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12546#endif
12547
12548
12549/** The instruction raises an \#UD in real and V8086 mode. */
12550#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12551 do \
12552 { \
12553 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12554 else return IEMOP_RAISE_INVALID_OPCODE(); \
12555 } while (0)
12556
12557#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12558/** This instruction raises an \#UD in real and V8086 mode or when not using a
12559 * 64-bit code segment when in long mode (applicable to all VMX instructions
12560 * except VMCALL).
12561 */
12562#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12563 do \
12564 { \
12565 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12566 && ( !IEM_IS_LONG_MODE(pVCpu) \
12567 || IEM_IS_64BIT_CODE(pVCpu))) \
12568 { /* likely */ } \
12569 else \
12570 { \
12571 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12572 { \
12573 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12574 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12575 return IEMOP_RAISE_INVALID_OPCODE(); \
12576 } \
12577 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12578 { \
12579 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12580 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12581 return IEMOP_RAISE_INVALID_OPCODE(); \
12582 } \
12583 } \
12584 } while (0)
12585
12586/** The instruction can only be executed in VMX operation (VMX root mode and
12587 * non-root mode).
12588 *
12589 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12590 */
12591# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12592 do \
12593 { \
12594 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12595 else \
12596 { \
12597 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12598 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12599 return IEMOP_RAISE_INVALID_OPCODE(); \
12600 } \
12601 } while (0)
12602#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12603
12604/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12605 * 64-bit mode. */
12606#define IEMOP_HLP_NO_64BIT() \
12607 do \
12608 { \
12609 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12610 return IEMOP_RAISE_INVALID_OPCODE(); \
12611 } while (0)
12612
12613/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12614 * 64-bit mode. */
12615#define IEMOP_HLP_ONLY_64BIT() \
12616 do \
12617 { \
12618 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12619 return IEMOP_RAISE_INVALID_OPCODE(); \
12620 } while (0)
12621
12622/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12623#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12624 do \
12625 { \
12626 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12627 iemRecalEffOpSize64Default(pVCpu); \
12628 } while (0)
12629
12630/** The instruction has 64-bit operand size if 64-bit mode. */
12631#define IEMOP_HLP_64BIT_OP_SIZE() \
12632 do \
12633 { \
12634 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12635 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12636 } while (0)
12637
12638/** Only a REX prefix immediately preceeding the first opcode byte takes
12639 * effect. This macro helps ensuring this as well as logging bad guest code. */
12640#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12641 do \
12642 { \
12643 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12644 { \
12645 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12646 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12647 pVCpu->iem.s.uRexB = 0; \
12648 pVCpu->iem.s.uRexIndex = 0; \
12649 pVCpu->iem.s.uRexReg = 0; \
12650 iemRecalEffOpSize(pVCpu); \
12651 } \
12652 } while (0)
12653
12654/**
12655 * Done decoding.
12656 */
12657#define IEMOP_HLP_DONE_DECODING() \
12658 do \
12659 { \
12660 /*nothing for now, maybe later... */ \
12661 } while (0)
12662
12663/**
12664 * Done decoding, raise \#UD exception if lock prefix present.
12665 */
12666#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12667 do \
12668 { \
12669 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12670 { /* likely */ } \
12671 else \
12672 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12673 } while (0)
12674
12675
12676/**
12677 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12678 * repnz or size prefixes are present, or if in real or v8086 mode.
12679 */
12680#define IEMOP_HLP_DONE_VEX_DECODING() \
12681 do \
12682 { \
12683 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12684 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12685 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12686 { /* likely */ } \
12687 else \
12688 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12689 } while (0)
12690
12691/**
12692 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12693 * repnz or size prefixes are present, or if in real or v8086 mode.
12694 */
12695#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12696 do \
12697 { \
12698 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12699 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12700 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12701 && pVCpu->iem.s.uVexLength == 0)) \
12702 { /* likely */ } \
12703 else \
12704 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12705 } while (0)
12706
12707
12708/**
12709 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12710 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12711 * register 0, or if in real or v8086 mode.
12712 */
12713#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12714 do \
12715 { \
12716 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12717 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12718 && !pVCpu->iem.s.uVex3rdReg \
12719 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12720 { /* likely */ } \
12721 else \
12722 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12723 } while (0)
12724
12725/**
12726 * Done decoding VEX, no V, L=0.
12727 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12728 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12729 */
12730#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12731 do \
12732 { \
12733 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12734 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12735 && pVCpu->iem.s.uVexLength == 0 \
12736 && pVCpu->iem.s.uVex3rdReg == 0 \
12737 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12738 { /* likely */ } \
12739 else \
12740 return IEMOP_RAISE_INVALID_OPCODE(); \
12741 } while (0)
12742
12743#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12744 do \
12745 { \
12746 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12747 { /* likely */ } \
12748 else \
12749 { \
12750 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12751 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12752 } \
12753 } while (0)
12754#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12755 do \
12756 { \
12757 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12758 { /* likely */ } \
12759 else \
12760 { \
12761 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12762 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12763 } \
12764 } while (0)
12765
12766/**
12767 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12768 * are present.
12769 */
12770#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12771 do \
12772 { \
12773 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12774 { /* likely */ } \
12775 else \
12776 return IEMOP_RAISE_INVALID_OPCODE(); \
12777 } while (0)
12778
12779/**
12780 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12781 * prefixes are present.
12782 */
12783#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12784 do \
12785 { \
12786 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12787 { /* likely */ } \
12788 else \
12789 return IEMOP_RAISE_INVALID_OPCODE(); \
12790 } while (0)
12791
12792
12793/**
12794 * Calculates the effective address of a ModR/M memory operand.
12795 *
12796 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12797 *
12798 * @return Strict VBox status code.
12799 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12800 * @param bRm The ModRM byte.
12801 * @param cbImm The size of any immediate following the
12802 * effective address opcode bytes. Important for
12803 * RIP relative addressing.
12804 * @param pGCPtrEff Where to return the effective address.
12805 */
12806IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12807{
12808 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12809# define SET_SS_DEF() \
12810 do \
12811 { \
12812 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12813 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12814 } while (0)
12815
12816 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12817 {
12818/** @todo Check the effective address size crap! */
12819 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12820 {
12821 uint16_t u16EffAddr;
12822
12823 /* Handle the disp16 form with no registers first. */
12824 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12825 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12826 else
12827 {
12828 /* Get the displacment. */
12829 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12830 {
12831 case 0: u16EffAddr = 0; break;
12832 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12833 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12834 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12835 }
12836
12837 /* Add the base and index registers to the disp. */
12838 switch (bRm & X86_MODRM_RM_MASK)
12839 {
12840 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12841 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12842 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12843 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12844 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12845 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12846 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12847 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12848 }
12849 }
12850
12851 *pGCPtrEff = u16EffAddr;
12852 }
12853 else
12854 {
12855 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12856 uint32_t u32EffAddr;
12857
12858 /* Handle the disp32 form with no registers first. */
12859 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12860 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12861 else
12862 {
12863 /* Get the register (or SIB) value. */
12864 switch ((bRm & X86_MODRM_RM_MASK))
12865 {
12866 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12867 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12868 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12869 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12870 case 4: /* SIB */
12871 {
12872 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12873
12874 /* Get the index and scale it. */
12875 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12876 {
12877 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12878 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12879 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12880 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12881 case 4: u32EffAddr = 0; /*none */ break;
12882 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12883 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12884 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12885 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12886 }
12887 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12888
12889 /* add base */
12890 switch (bSib & X86_SIB_BASE_MASK)
12891 {
12892 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12893 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12894 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12895 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12896 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12897 case 5:
12898 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12899 {
12900 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12901 SET_SS_DEF();
12902 }
12903 else
12904 {
12905 uint32_t u32Disp;
12906 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12907 u32EffAddr += u32Disp;
12908 }
12909 break;
12910 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12911 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12912 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12913 }
12914 break;
12915 }
12916 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12917 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12918 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12919 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12920 }
12921
12922 /* Get and add the displacement. */
12923 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12924 {
12925 case 0:
12926 break;
12927 case 1:
12928 {
12929 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12930 u32EffAddr += i8Disp;
12931 break;
12932 }
12933 case 2:
12934 {
12935 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12936 u32EffAddr += u32Disp;
12937 break;
12938 }
12939 default:
12940 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12941 }
12942
12943 }
12944 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12945 *pGCPtrEff = u32EffAddr;
12946 else
12947 {
12948 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12949 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12950 }
12951 }
12952 }
12953 else
12954 {
12955 uint64_t u64EffAddr;
12956
12957 /* Handle the rip+disp32 form with no registers first. */
12958 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12959 {
12960 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12961 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12962 }
12963 else
12964 {
12965 /* Get the register (or SIB) value. */
12966 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12967 {
12968 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12969 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12970 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12971 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12972 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
12973 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12974 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12975 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12976 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12977 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12978 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12979 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12980 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12981 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12982 /* SIB */
12983 case 4:
12984 case 12:
12985 {
12986 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12987
12988 /* Get the index and scale it. */
12989 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12990 {
12991 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12992 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12993 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12994 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12995 case 4: u64EffAddr = 0; /*none */ break;
12996 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
12997 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12998 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12999 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13000 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13001 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13002 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13003 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13004 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13005 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13006 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13007 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13008 }
13009 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13010
13011 /* add base */
13012 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13013 {
13014 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13015 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13016 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13017 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13018 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13019 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13020 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13021 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13022 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13023 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13024 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13025 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13026 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13027 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13028 /* complicated encodings */
13029 case 5:
13030 case 13:
13031 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13032 {
13033 if (!pVCpu->iem.s.uRexB)
13034 {
13035 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13036 SET_SS_DEF();
13037 }
13038 else
13039 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13040 }
13041 else
13042 {
13043 uint32_t u32Disp;
13044 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13045 u64EffAddr += (int32_t)u32Disp;
13046 }
13047 break;
13048 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13049 }
13050 break;
13051 }
13052 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13053 }
13054
13055 /* Get and add the displacement. */
13056 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13057 {
13058 case 0:
13059 break;
13060 case 1:
13061 {
13062 int8_t i8Disp;
13063 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13064 u64EffAddr += i8Disp;
13065 break;
13066 }
13067 case 2:
13068 {
13069 uint32_t u32Disp;
13070 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13071 u64EffAddr += (int32_t)u32Disp;
13072 break;
13073 }
13074 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13075 }
13076
13077 }
13078
13079 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13080 *pGCPtrEff = u64EffAddr;
13081 else
13082 {
13083 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13084 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13085 }
13086 }
13087
13088 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13089 return VINF_SUCCESS;
13090}
13091
13092
13093/**
13094 * Calculates the effective address of a ModR/M memory operand.
13095 *
13096 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13097 *
13098 * @return Strict VBox status code.
13099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13100 * @param bRm The ModRM byte.
13101 * @param cbImm The size of any immediate following the
13102 * effective address opcode bytes. Important for
13103 * RIP relative addressing.
13104 * @param pGCPtrEff Where to return the effective address.
13105 * @param offRsp RSP displacement.
13106 */
13107IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13108{
13109 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13110# define SET_SS_DEF() \
13111 do \
13112 { \
13113 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13114 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13115 } while (0)
13116
13117 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13118 {
13119/** @todo Check the effective address size crap! */
13120 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13121 {
13122 uint16_t u16EffAddr;
13123
13124 /* Handle the disp16 form with no registers first. */
13125 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13126 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13127 else
13128 {
13129 /* Get the displacment. */
13130 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13131 {
13132 case 0: u16EffAddr = 0; break;
13133 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13134 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13135 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13136 }
13137
13138 /* Add the base and index registers to the disp. */
13139 switch (bRm & X86_MODRM_RM_MASK)
13140 {
13141 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13142 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13143 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13144 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13145 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13146 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13147 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13148 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13149 }
13150 }
13151
13152 *pGCPtrEff = u16EffAddr;
13153 }
13154 else
13155 {
13156 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13157 uint32_t u32EffAddr;
13158
13159 /* Handle the disp32 form with no registers first. */
13160 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13161 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13162 else
13163 {
13164 /* Get the register (or SIB) value. */
13165 switch ((bRm & X86_MODRM_RM_MASK))
13166 {
13167 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13168 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13169 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13170 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13171 case 4: /* SIB */
13172 {
13173 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13174
13175 /* Get the index and scale it. */
13176 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13177 {
13178 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13179 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13180 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13181 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13182 case 4: u32EffAddr = 0; /*none */ break;
13183 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13184 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13185 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13186 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13187 }
13188 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13189
13190 /* add base */
13191 switch (bSib & X86_SIB_BASE_MASK)
13192 {
13193 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13194 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13195 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13196 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13197 case 4:
13198 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13199 SET_SS_DEF();
13200 break;
13201 case 5:
13202 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13203 {
13204 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13205 SET_SS_DEF();
13206 }
13207 else
13208 {
13209 uint32_t u32Disp;
13210 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13211 u32EffAddr += u32Disp;
13212 }
13213 break;
13214 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13215 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13216 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13217 }
13218 break;
13219 }
13220 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13221 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13222 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13223 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13224 }
13225
13226 /* Get and add the displacement. */
13227 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13228 {
13229 case 0:
13230 break;
13231 case 1:
13232 {
13233 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13234 u32EffAddr += i8Disp;
13235 break;
13236 }
13237 case 2:
13238 {
13239 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13240 u32EffAddr += u32Disp;
13241 break;
13242 }
13243 default:
13244 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13245 }
13246
13247 }
13248 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13249 *pGCPtrEff = u32EffAddr;
13250 else
13251 {
13252 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13253 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13254 }
13255 }
13256 }
13257 else
13258 {
13259 uint64_t u64EffAddr;
13260
13261 /* Handle the rip+disp32 form with no registers first. */
13262 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13263 {
13264 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13265 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13266 }
13267 else
13268 {
13269 /* Get the register (or SIB) value. */
13270 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13271 {
13272 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13273 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13274 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13275 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13276 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13277 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13278 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13279 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13280 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13281 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13282 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13283 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13284 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13285 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13286 /* SIB */
13287 case 4:
13288 case 12:
13289 {
13290 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13291
13292 /* Get the index and scale it. */
13293 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13294 {
13295 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13296 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13297 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13298 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13299 case 4: u64EffAddr = 0; /*none */ break;
13300 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13301 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13302 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13303 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13304 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13305 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13306 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13307 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13308 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13309 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13310 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13311 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13312 }
13313 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13314
13315 /* add base */
13316 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13317 {
13318 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13319 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13320 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13321 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13322 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13323 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13324 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13325 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13326 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13327 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13328 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13329 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13330 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13331 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13332 /* complicated encodings */
13333 case 5:
13334 case 13:
13335 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13336 {
13337 if (!pVCpu->iem.s.uRexB)
13338 {
13339 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13340 SET_SS_DEF();
13341 }
13342 else
13343 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13344 }
13345 else
13346 {
13347 uint32_t u32Disp;
13348 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13349 u64EffAddr += (int32_t)u32Disp;
13350 }
13351 break;
13352 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13353 }
13354 break;
13355 }
13356 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13357 }
13358
13359 /* Get and add the displacement. */
13360 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13361 {
13362 case 0:
13363 break;
13364 case 1:
13365 {
13366 int8_t i8Disp;
13367 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13368 u64EffAddr += i8Disp;
13369 break;
13370 }
13371 case 2:
13372 {
13373 uint32_t u32Disp;
13374 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13375 u64EffAddr += (int32_t)u32Disp;
13376 break;
13377 }
13378 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13379 }
13380
13381 }
13382
13383 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13384 *pGCPtrEff = u64EffAddr;
13385 else
13386 {
13387 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13388 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13389 }
13390 }
13391
13392 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13393 return VINF_SUCCESS;
13394}
13395
13396
13397#ifdef IEM_WITH_SETJMP
13398/**
13399 * Calculates the effective address of a ModR/M memory operand.
13400 *
13401 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13402 *
13403 * May longjmp on internal error.
13404 *
13405 * @return The effective address.
13406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13407 * @param bRm The ModRM byte.
13408 * @param cbImm The size of any immediate following the
13409 * effective address opcode bytes. Important for
13410 * RIP relative addressing.
13411 */
13412IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
13413{
13414 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13415# define SET_SS_DEF() \
13416 do \
13417 { \
13418 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13419 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13420 } while (0)
13421
13422 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13423 {
13424/** @todo Check the effective address size crap! */
13425 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13426 {
13427 uint16_t u16EffAddr;
13428
13429 /* Handle the disp16 form with no registers first. */
13430 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13431 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13432 else
13433 {
13434 /* Get the displacment. */
13435 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13436 {
13437 case 0: u16EffAddr = 0; break;
13438 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13439 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13440 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13441 }
13442
13443 /* Add the base and index registers to the disp. */
13444 switch (bRm & X86_MODRM_RM_MASK)
13445 {
13446 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13447 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13448 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13449 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13450 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13451 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13452 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13453 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13454 }
13455 }
13456
13457 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13458 return u16EffAddr;
13459 }
13460
13461 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13462 uint32_t u32EffAddr;
13463
13464 /* Handle the disp32 form with no registers first. */
13465 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13466 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13467 else
13468 {
13469 /* Get the register (or SIB) value. */
13470 switch ((bRm & X86_MODRM_RM_MASK))
13471 {
13472 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13473 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13474 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13475 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13476 case 4: /* SIB */
13477 {
13478 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13479
13480 /* Get the index and scale it. */
13481 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13482 {
13483 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13484 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13485 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13486 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13487 case 4: u32EffAddr = 0; /*none */ break;
13488 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13489 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13490 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13491 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13492 }
13493 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13494
13495 /* add base */
13496 switch (bSib & X86_SIB_BASE_MASK)
13497 {
13498 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13499 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13500 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13501 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13502 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13503 case 5:
13504 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13505 {
13506 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13507 SET_SS_DEF();
13508 }
13509 else
13510 {
13511 uint32_t u32Disp;
13512 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13513 u32EffAddr += u32Disp;
13514 }
13515 break;
13516 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13517 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13518 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13519 }
13520 break;
13521 }
13522 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13523 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13524 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13525 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13526 }
13527
13528 /* Get and add the displacement. */
13529 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13530 {
13531 case 0:
13532 break;
13533 case 1:
13534 {
13535 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13536 u32EffAddr += i8Disp;
13537 break;
13538 }
13539 case 2:
13540 {
13541 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13542 u32EffAddr += u32Disp;
13543 break;
13544 }
13545 default:
13546 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13547 }
13548 }
13549
13550 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13551 {
13552 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13553 return u32EffAddr;
13554 }
13555 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13556 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13557 return u32EffAddr & UINT16_MAX;
13558 }
13559
13560 uint64_t u64EffAddr;
13561
13562 /* Handle the rip+disp32 form with no registers first. */
13563 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13564 {
13565 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13566 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13567 }
13568 else
13569 {
13570 /* Get the register (or SIB) value. */
13571 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13572 {
13573 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13574 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13575 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13576 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13577 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13578 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13579 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13580 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13581 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13582 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13583 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13584 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13585 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13586 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13587 /* SIB */
13588 case 4:
13589 case 12:
13590 {
13591 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13592
13593 /* Get the index and scale it. */
13594 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13595 {
13596 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13597 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13598 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13599 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13600 case 4: u64EffAddr = 0; /*none */ break;
13601 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13602 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13603 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13604 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13605 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13606 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13607 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13608 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13609 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13610 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13611 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13612 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13613 }
13614 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13615
13616 /* add base */
13617 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13618 {
13619 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13620 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13621 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13622 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13623 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13624 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13625 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13626 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13627 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13628 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13629 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13630 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13631 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13632 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13633 /* complicated encodings */
13634 case 5:
13635 case 13:
13636 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13637 {
13638 if (!pVCpu->iem.s.uRexB)
13639 {
13640 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13641 SET_SS_DEF();
13642 }
13643 else
13644 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13645 }
13646 else
13647 {
13648 uint32_t u32Disp;
13649 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13650 u64EffAddr += (int32_t)u32Disp;
13651 }
13652 break;
13653 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13654 }
13655 break;
13656 }
13657 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13658 }
13659
13660 /* Get and add the displacement. */
13661 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13662 {
13663 case 0:
13664 break;
13665 case 1:
13666 {
13667 int8_t i8Disp;
13668 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13669 u64EffAddr += i8Disp;
13670 break;
13671 }
13672 case 2:
13673 {
13674 uint32_t u32Disp;
13675 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13676 u64EffAddr += (int32_t)u32Disp;
13677 break;
13678 }
13679 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13680 }
13681
13682 }
13683
13684 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13685 {
13686 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13687 return u64EffAddr;
13688 }
13689 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13690 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13691 return u64EffAddr & UINT32_MAX;
13692}
13693#endif /* IEM_WITH_SETJMP */
13694
13695/** @} */
13696
13697
13698
13699/*
13700 * Include the instructions
13701 */
13702#include "IEMAllInstructions.cpp.h"
13703
13704
13705
13706#ifdef LOG_ENABLED
13707/**
13708 * Logs the current instruction.
13709 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13710 * @param fSameCtx Set if we have the same context information as the VMM,
13711 * clear if we may have already executed an instruction in
13712 * our debug context. When clear, we assume IEMCPU holds
13713 * valid CPU mode info.
13714 *
13715 * The @a fSameCtx parameter is now misleading and obsolete.
13716 * @param pszFunction The IEM function doing the execution.
13717 */
13718IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
13719{
13720# ifdef IN_RING3
13721 if (LogIs2Enabled())
13722 {
13723 char szInstr[256];
13724 uint32_t cbInstr = 0;
13725 if (fSameCtx)
13726 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13727 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13728 szInstr, sizeof(szInstr), &cbInstr);
13729 else
13730 {
13731 uint32_t fFlags = 0;
13732 switch (pVCpu->iem.s.enmCpuMode)
13733 {
13734 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13735 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13736 case IEMMODE_16BIT:
13737 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13738 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13739 else
13740 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13741 break;
13742 }
13743 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13744 szInstr, sizeof(szInstr), &cbInstr);
13745 }
13746
13747 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13748 Log2(("**** %s\n"
13749 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13750 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13751 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13752 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13753 " %s\n"
13754 , pszFunction,
13755 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13756 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13757 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13758 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13759 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13760 szInstr));
13761
13762 if (LogIs3Enabled())
13763 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13764 }
13765 else
13766# endif
13767 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13768 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13769 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13770}
13771#endif /* LOG_ENABLED */
13772
13773
13774/**
13775 * Makes status code addjustments (pass up from I/O and access handler)
13776 * as well as maintaining statistics.
13777 *
13778 * @returns Strict VBox status code to pass up.
13779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13780 * @param rcStrict The status from executing an instruction.
13781 */
13782DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13783{
13784 if (rcStrict != VINF_SUCCESS)
13785 {
13786 if (RT_SUCCESS(rcStrict))
13787 {
13788 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13789 || rcStrict == VINF_IOM_R3_IOPORT_READ
13790 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13791 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13792 || rcStrict == VINF_IOM_R3_MMIO_READ
13793 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13794 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13795 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13796 || rcStrict == VINF_CPUM_R3_MSR_READ
13797 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13798 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13799 || rcStrict == VINF_EM_RAW_TO_R3
13800 || rcStrict == VINF_EM_TRIPLE_FAULT
13801 || rcStrict == VINF_GIM_R3_HYPERCALL
13802 /* raw-mode / virt handlers only: */
13803 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13804 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13805 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13806 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13807 || rcStrict == VINF_SELM_SYNC_GDT
13808 || rcStrict == VINF_CSAM_PENDING_ACTION
13809 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13810 /* nested hw.virt codes: */
13811 || rcStrict == VINF_VMX_VMEXIT
13812 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13813 || rcStrict == VINF_SVM_VMEXIT
13814 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13815/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13816 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13817#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13818 if ( rcStrict == VINF_VMX_VMEXIT
13819 && rcPassUp == VINF_SUCCESS)
13820 rcStrict = VINF_SUCCESS;
13821 else
13822#endif
13823#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13824 if ( rcStrict == VINF_SVM_VMEXIT
13825 && rcPassUp == VINF_SUCCESS)
13826 rcStrict = VINF_SUCCESS;
13827 else
13828#endif
13829 if (rcPassUp == VINF_SUCCESS)
13830 pVCpu->iem.s.cRetInfStatuses++;
13831 else if ( rcPassUp < VINF_EM_FIRST
13832 || rcPassUp > VINF_EM_LAST
13833 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13834 {
13835 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13836 pVCpu->iem.s.cRetPassUpStatus++;
13837 rcStrict = rcPassUp;
13838 }
13839 else
13840 {
13841 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13842 pVCpu->iem.s.cRetInfStatuses++;
13843 }
13844 }
13845 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13846 pVCpu->iem.s.cRetAspectNotImplemented++;
13847 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13848 pVCpu->iem.s.cRetInstrNotImplemented++;
13849 else
13850 pVCpu->iem.s.cRetErrStatuses++;
13851 }
13852 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13853 {
13854 pVCpu->iem.s.cRetPassUpStatus++;
13855 rcStrict = pVCpu->iem.s.rcPassUp;
13856 }
13857
13858 return rcStrict;
13859}
13860
13861
13862/**
13863 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13864 * IEMExecOneWithPrefetchedByPC.
13865 *
13866 * Similar code is found in IEMExecLots.
13867 *
13868 * @return Strict VBox status code.
13869 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13870 * @param fExecuteInhibit If set, execute the instruction following CLI,
13871 * POP SS and MOV SS,GR.
13872 * @param pszFunction The calling function name.
13873 */
13874DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
13875{
13876 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13877 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13878 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13879 RT_NOREF_PV(pszFunction);
13880
13881#ifdef IEM_WITH_SETJMP
13882 VBOXSTRICTRC rcStrict;
13883 jmp_buf JmpBuf;
13884 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13885 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13886 if ((rcStrict = setjmp(JmpBuf)) == 0)
13887 {
13888 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13889 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13890 }
13891 else
13892 pVCpu->iem.s.cLongJumps++;
13893 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13894#else
13895 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13896 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13897#endif
13898 if (rcStrict == VINF_SUCCESS)
13899 pVCpu->iem.s.cInstructions++;
13900 if (pVCpu->iem.s.cActiveMappings > 0)
13901 {
13902 Assert(rcStrict != VINF_SUCCESS);
13903 iemMemRollback(pVCpu);
13904 }
13905 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13906 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13907 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13908
13909//#ifdef DEBUG
13910// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13911//#endif
13912
13913#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13914 /*
13915 * Perform any VMX nested-guest instruction boundary actions.
13916 *
13917 * If any of these causes a VM-exit, we must skip executing the next
13918 * instruction (would run into stale page tables). A VM-exit makes sure
13919 * there is no interrupt-inhibition, so that should ensure we don't go
13920 * to try execute the next instruction. Clearing fExecuteInhibit is
13921 * problematic because of the setjmp/longjmp clobbering above.
13922 */
13923 if ( rcStrict == VINF_SUCCESS
13924 && CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
13925 {
13926 bool fCheckRemainingIntercepts = true;
13927 /* TPR-below threshold/APIC write has the highest priority. */
13928 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
13929 {
13930 rcStrict = iemVmxApicWriteEmulation(pVCpu);
13931 fCheckRemainingIntercepts = false;
13932 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13933 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
13934 }
13935 /* MTF takes priority over VMX-preemption timer. */
13936 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
13937 {
13938 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
13939 fCheckRemainingIntercepts = false;
13940 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13941 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
13942 }
13943 /* VMX preemption timer takes priority over NMI-window exits. */
13944 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
13945 {
13946 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
13947 if (rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE)
13948 rcStrict = VINF_SUCCESS;
13949 else
13950 {
13951 fCheckRemainingIntercepts = false;
13952 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13953 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
13954 }
13955 }
13956
13957 /*
13958 * Check remaining intercepts.
13959 *
13960 * NMI-window and Interrupt-window VM-exits.
13961 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
13962 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
13963 *
13964 * See Intel spec. 26.7.6 "NMI-Window Exiting".
13965 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
13966 */
13967 if ( fCheckRemainingIntercepts
13968 && !TRPMHasTrap(pVCpu)
13969 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
13970 {
13971 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents);
13972 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
13973 && CPUMIsGuestVmxVirtNmiBlocking(pVCpu, &pVCpu->cpum.GstCtx))
13974 {
13975 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
13976 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
13977 }
13978 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
13979 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
13980 {
13981 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
13982 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
13983 }
13984 }
13985 }
13986#endif
13987
13988 /* Execute the next instruction as well if a cli, pop ss or
13989 mov ss, Gr has just completed successfully. */
13990 if ( fExecuteInhibit
13991 && rcStrict == VINF_SUCCESS
13992 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13993 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13994 {
13995 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13996 if (rcStrict == VINF_SUCCESS)
13997 {
13998#ifdef LOG_ENABLED
13999 iemLogCurInstr(pVCpu, false, pszFunction);
14000#endif
14001#ifdef IEM_WITH_SETJMP
14002 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14003 if ((rcStrict = setjmp(JmpBuf)) == 0)
14004 {
14005 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14006 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14007 }
14008 else
14009 pVCpu->iem.s.cLongJumps++;
14010 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14011#else
14012 IEM_OPCODE_GET_NEXT_U8(&b);
14013 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14014#endif
14015 if (rcStrict == VINF_SUCCESS)
14016 pVCpu->iem.s.cInstructions++;
14017 if (pVCpu->iem.s.cActiveMappings > 0)
14018 {
14019 Assert(rcStrict != VINF_SUCCESS);
14020 iemMemRollback(pVCpu);
14021 }
14022 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14023 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14024 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14025 }
14026 else if (pVCpu->iem.s.cActiveMappings > 0)
14027 iemMemRollback(pVCpu);
14028 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14029 }
14030
14031 /*
14032 * Return value fiddling, statistics and sanity assertions.
14033 */
14034 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14035
14036 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14038 return rcStrict;
14039}
14040
14041
14042/**
14043 * Execute one instruction.
14044 *
14045 * @return Strict VBox status code.
14046 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14047 */
14048VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14049{
14050#ifdef LOG_ENABLED
14051 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14052#endif
14053
14054 /*
14055 * Do the decoding and emulation.
14056 */
14057 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14058 if (rcStrict == VINF_SUCCESS)
14059 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14060 else if (pVCpu->iem.s.cActiveMappings > 0)
14061 iemMemRollback(pVCpu);
14062
14063 if (rcStrict != VINF_SUCCESS)
14064 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14065 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14066 return rcStrict;
14067}
14068
14069
14070VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14071{
14072 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14073
14074 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14075 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14076 if (rcStrict == VINF_SUCCESS)
14077 {
14078 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14079 if (pcbWritten)
14080 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14081 }
14082 else if (pVCpu->iem.s.cActiveMappings > 0)
14083 iemMemRollback(pVCpu);
14084
14085 return rcStrict;
14086}
14087
14088
14089VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14090 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14091{
14092 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14093
14094 VBOXSTRICTRC rcStrict;
14095 if ( cbOpcodeBytes
14096 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14097 {
14098 iemInitDecoder(pVCpu, false);
14099#ifdef IEM_WITH_CODE_TLB
14100 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14101 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14102 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14103 pVCpu->iem.s.offCurInstrStart = 0;
14104 pVCpu->iem.s.offInstrNextByte = 0;
14105#else
14106 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14107 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14108#endif
14109 rcStrict = VINF_SUCCESS;
14110 }
14111 else
14112 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14113 if (rcStrict == VINF_SUCCESS)
14114 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14115 else if (pVCpu->iem.s.cActiveMappings > 0)
14116 iemMemRollback(pVCpu);
14117
14118 return rcStrict;
14119}
14120
14121
14122VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14123{
14124 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14125
14126 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14127 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14128 if (rcStrict == VINF_SUCCESS)
14129 {
14130 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14131 if (pcbWritten)
14132 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14133 }
14134 else if (pVCpu->iem.s.cActiveMappings > 0)
14135 iemMemRollback(pVCpu);
14136
14137 return rcStrict;
14138}
14139
14140
14141VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14142 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14143{
14144 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14145
14146 VBOXSTRICTRC rcStrict;
14147 if ( cbOpcodeBytes
14148 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14149 {
14150 iemInitDecoder(pVCpu, true);
14151#ifdef IEM_WITH_CODE_TLB
14152 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14153 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14154 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14155 pVCpu->iem.s.offCurInstrStart = 0;
14156 pVCpu->iem.s.offInstrNextByte = 0;
14157#else
14158 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14159 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14160#endif
14161 rcStrict = VINF_SUCCESS;
14162 }
14163 else
14164 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14165 if (rcStrict == VINF_SUCCESS)
14166 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14167 else if (pVCpu->iem.s.cActiveMappings > 0)
14168 iemMemRollback(pVCpu);
14169
14170 return rcStrict;
14171}
14172
14173
14174/**
14175 * For debugging DISGetParamSize, may come in handy.
14176 *
14177 * @returns Strict VBox status code.
14178 * @param pVCpu The cross context virtual CPU structure of the
14179 * calling EMT.
14180 * @param pCtxCore The context core structure.
14181 * @param OpcodeBytesPC The PC of the opcode bytes.
14182 * @param pvOpcodeBytes Prefeched opcode bytes.
14183 * @param cbOpcodeBytes Number of prefetched bytes.
14184 * @param pcbWritten Where to return the number of bytes written.
14185 * Optional.
14186 */
14187VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14188 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14189 uint32_t *pcbWritten)
14190{
14191 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14192
14193 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14194 VBOXSTRICTRC rcStrict;
14195 if ( cbOpcodeBytes
14196 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14197 {
14198 iemInitDecoder(pVCpu, true);
14199#ifdef IEM_WITH_CODE_TLB
14200 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14201 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14202 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14203 pVCpu->iem.s.offCurInstrStart = 0;
14204 pVCpu->iem.s.offInstrNextByte = 0;
14205#else
14206 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14207 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14208#endif
14209 rcStrict = VINF_SUCCESS;
14210 }
14211 else
14212 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14213 if (rcStrict == VINF_SUCCESS)
14214 {
14215 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14216 if (pcbWritten)
14217 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14218 }
14219 else if (pVCpu->iem.s.cActiveMappings > 0)
14220 iemMemRollback(pVCpu);
14221
14222 return rcStrict;
14223}
14224
14225
14226VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14227{
14228 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14229 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14230
14231 /*
14232 * See if there is an interrupt pending in TRPM, inject it if we can.
14233 */
14234 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14235#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14236 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14237 if (fIntrEnabled)
14238 {
14239 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14240 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14241 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14242 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14243 else
14244 {
14245 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14246 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14247 }
14248 }
14249#else
14250 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14251#endif
14252
14253 /** @todo What if we are injecting an exception and not an interrupt? Is that
14254 * possible here? */
14255 if ( fIntrEnabled
14256 && TRPMHasTrap(pVCpu)
14257 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14258 {
14259 uint8_t u8TrapNo;
14260 TRPMEVENT enmType;
14261 RTGCUINT uErrCode;
14262 RTGCPTR uCr2;
14263 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14264 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14265 TRPMResetTrap(pVCpu);
14266#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14267 /* Injecting an event may cause a VM-exit. */
14268 if ( rcStrict != VINF_SUCCESS
14269 && rcStrict != VINF_IEM_RAISED_XCPT)
14270 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14271#else
14272 NOREF(rcStrict);
14273#endif
14274 }
14275
14276 /*
14277 * Initial decoder init w/ prefetch, then setup setjmp.
14278 */
14279 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14280 if (rcStrict == VINF_SUCCESS)
14281 {
14282#ifdef IEM_WITH_SETJMP
14283 jmp_buf JmpBuf;
14284 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14285 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14286 pVCpu->iem.s.cActiveMappings = 0;
14287 if ((rcStrict = setjmp(JmpBuf)) == 0)
14288#endif
14289 {
14290 /*
14291 * The run loop. We limit ourselves to 4096 instructions right now.
14292 */
14293 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14294 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14295 for (;;)
14296 {
14297 /*
14298 * Log the state.
14299 */
14300#ifdef LOG_ENABLED
14301 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14302#endif
14303
14304 /*
14305 * Do the decoding and emulation.
14306 */
14307 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14308 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14309 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14310 {
14311 Assert(pVCpu->iem.s.cActiveMappings == 0);
14312 pVCpu->iem.s.cInstructions++;
14313 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14314 {
14315 uint64_t fCpu = pVCpu->fLocalForcedActions
14316 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14317 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14318 | VMCPU_FF_TLB_FLUSH
14319 | VMCPU_FF_INHIBIT_INTERRUPTS
14320 | VMCPU_FF_BLOCK_NMIS
14321 | VMCPU_FF_UNHALT ));
14322
14323 if (RT_LIKELY( ( !fCpu
14324 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14325 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14326 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14327 {
14328 if (cMaxInstructionsGccStupidity-- > 0)
14329 {
14330 /* Poll timers every now an then according to the caller's specs. */
14331 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14332 || !TMTimerPollBool(pVM, pVCpu))
14333 {
14334 Assert(pVCpu->iem.s.cActiveMappings == 0);
14335 iemReInitDecoder(pVCpu);
14336 continue;
14337 }
14338 }
14339 }
14340 }
14341 Assert(pVCpu->iem.s.cActiveMappings == 0);
14342 }
14343 else if (pVCpu->iem.s.cActiveMappings > 0)
14344 iemMemRollback(pVCpu);
14345 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14346 break;
14347 }
14348 }
14349#ifdef IEM_WITH_SETJMP
14350 else
14351 {
14352 if (pVCpu->iem.s.cActiveMappings > 0)
14353 iemMemRollback(pVCpu);
14354# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14355 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14356# endif
14357 pVCpu->iem.s.cLongJumps++;
14358 }
14359 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14360#endif
14361
14362 /*
14363 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14364 */
14365 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14366 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14367 }
14368 else
14369 {
14370 if (pVCpu->iem.s.cActiveMappings > 0)
14371 iemMemRollback(pVCpu);
14372
14373#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14374 /*
14375 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14376 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14377 */
14378 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14379#endif
14380 }
14381
14382 /*
14383 * Maybe re-enter raw-mode and log.
14384 */
14385 if (rcStrict != VINF_SUCCESS)
14386 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14387 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14388 if (pcInstructions)
14389 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14390 return rcStrict;
14391}
14392
14393
14394/**
14395 * Interface used by EMExecuteExec, does exit statistics and limits.
14396 *
14397 * @returns Strict VBox status code.
14398 * @param pVCpu The cross context virtual CPU structure.
14399 * @param fWillExit To be defined.
14400 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14401 * @param cMaxInstructions Maximum number of instructions to execute.
14402 * @param cMaxInstructionsWithoutExits
14403 * The max number of instructions without exits.
14404 * @param pStats Where to return statistics.
14405 */
14406VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14407 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14408{
14409 NOREF(fWillExit); /** @todo define flexible exit crits */
14410
14411 /*
14412 * Initialize return stats.
14413 */
14414 pStats->cInstructions = 0;
14415 pStats->cExits = 0;
14416 pStats->cMaxExitDistance = 0;
14417 pStats->cReserved = 0;
14418
14419 /*
14420 * Initial decoder init w/ prefetch, then setup setjmp.
14421 */
14422 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14423 if (rcStrict == VINF_SUCCESS)
14424 {
14425#ifdef IEM_WITH_SETJMP
14426 jmp_buf JmpBuf;
14427 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14428 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14429 pVCpu->iem.s.cActiveMappings = 0;
14430 if ((rcStrict = setjmp(JmpBuf)) == 0)
14431#endif
14432 {
14433#ifdef IN_RING0
14434 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14435#endif
14436 uint32_t cInstructionSinceLastExit = 0;
14437
14438 /*
14439 * The run loop. We limit ourselves to 4096 instructions right now.
14440 */
14441 PVM pVM = pVCpu->CTX_SUFF(pVM);
14442 for (;;)
14443 {
14444 /*
14445 * Log the state.
14446 */
14447#ifdef LOG_ENABLED
14448 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14449#endif
14450
14451 /*
14452 * Do the decoding and emulation.
14453 */
14454 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14455
14456 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14457 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14458
14459 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14460 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14461 {
14462 pStats->cExits += 1;
14463 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14464 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14465 cInstructionSinceLastExit = 0;
14466 }
14467
14468 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14469 {
14470 Assert(pVCpu->iem.s.cActiveMappings == 0);
14471 pVCpu->iem.s.cInstructions++;
14472 pStats->cInstructions++;
14473 cInstructionSinceLastExit++;
14474 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14475 {
14476 uint64_t fCpu = pVCpu->fLocalForcedActions
14477 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14478 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14479 | VMCPU_FF_TLB_FLUSH
14480 | VMCPU_FF_INHIBIT_INTERRUPTS
14481 | VMCPU_FF_BLOCK_NMIS
14482 | VMCPU_FF_UNHALT ));
14483
14484 if (RT_LIKELY( ( ( !fCpu
14485 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14486 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14487 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14488 || pStats->cInstructions < cMinInstructions))
14489 {
14490 if (pStats->cInstructions < cMaxInstructions)
14491 {
14492 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14493 {
14494#ifdef IN_RING0
14495 if ( !fCheckPreemptionPending
14496 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14497#endif
14498 {
14499 Assert(pVCpu->iem.s.cActiveMappings == 0);
14500 iemReInitDecoder(pVCpu);
14501 continue;
14502 }
14503#ifdef IN_RING0
14504 rcStrict = VINF_EM_RAW_INTERRUPT;
14505 break;
14506#endif
14507 }
14508 }
14509 }
14510 Assert(!(fCpu & VMCPU_FF_IEM));
14511 }
14512 Assert(pVCpu->iem.s.cActiveMappings == 0);
14513 }
14514 else if (pVCpu->iem.s.cActiveMappings > 0)
14515 iemMemRollback(pVCpu);
14516 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14517 break;
14518 }
14519 }
14520#ifdef IEM_WITH_SETJMP
14521 else
14522 {
14523 if (pVCpu->iem.s.cActiveMappings > 0)
14524 iemMemRollback(pVCpu);
14525 pVCpu->iem.s.cLongJumps++;
14526 }
14527 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14528#endif
14529
14530 /*
14531 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14532 */
14533 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14534 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14535 }
14536 else
14537 {
14538 if (pVCpu->iem.s.cActiveMappings > 0)
14539 iemMemRollback(pVCpu);
14540
14541#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14542 /*
14543 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14544 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14545 */
14546 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14547#endif
14548 }
14549
14550 /*
14551 * Maybe re-enter raw-mode and log.
14552 */
14553 if (rcStrict != VINF_SUCCESS)
14554 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14555 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14556 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14557 return rcStrict;
14558}
14559
14560
14561/**
14562 * Injects a trap, fault, abort, software interrupt or external interrupt.
14563 *
14564 * The parameter list matches TRPMQueryTrapAll pretty closely.
14565 *
14566 * @returns Strict VBox status code.
14567 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14568 * @param u8TrapNo The trap number.
14569 * @param enmType What type is it (trap/fault/abort), software
14570 * interrupt or hardware interrupt.
14571 * @param uErrCode The error code if applicable.
14572 * @param uCr2 The CR2 value if applicable.
14573 * @param cbInstr The instruction length (only relevant for
14574 * software interrupts).
14575 */
14576VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14577 uint8_t cbInstr)
14578{
14579 iemInitDecoder(pVCpu, false);
14580#ifdef DBGFTRACE_ENABLED
14581 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14582 u8TrapNo, enmType, uErrCode, uCr2);
14583#endif
14584
14585 uint32_t fFlags;
14586 switch (enmType)
14587 {
14588 case TRPM_HARDWARE_INT:
14589 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14590 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14591 uErrCode = uCr2 = 0;
14592 break;
14593
14594 case TRPM_SOFTWARE_INT:
14595 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14596 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14597 uErrCode = uCr2 = 0;
14598 break;
14599
14600 case TRPM_TRAP:
14601 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14602 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14603 if (u8TrapNo == X86_XCPT_PF)
14604 fFlags |= IEM_XCPT_FLAGS_CR2;
14605 switch (u8TrapNo)
14606 {
14607 case X86_XCPT_DF:
14608 case X86_XCPT_TS:
14609 case X86_XCPT_NP:
14610 case X86_XCPT_SS:
14611 case X86_XCPT_PF:
14612 case X86_XCPT_AC:
14613 fFlags |= IEM_XCPT_FLAGS_ERR;
14614 break;
14615 }
14616 break;
14617
14618 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14619 }
14620
14621 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14622
14623 if (pVCpu->iem.s.cActiveMappings > 0)
14624 iemMemRollback(pVCpu);
14625
14626 return rcStrict;
14627}
14628
14629
14630/**
14631 * Injects the active TRPM event.
14632 *
14633 * @returns Strict VBox status code.
14634 * @param pVCpu The cross context virtual CPU structure.
14635 */
14636VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
14637{
14638#ifndef IEM_IMPLEMENTS_TASKSWITCH
14639 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14640#else
14641 uint8_t u8TrapNo;
14642 TRPMEVENT enmType;
14643 RTGCUINT uErrCode;
14644 RTGCUINTPTR uCr2;
14645 uint8_t cbInstr;
14646 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14647 if (RT_FAILURE(rc))
14648 return rc;
14649
14650 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14651#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14652 if (rcStrict == VINF_SVM_VMEXIT)
14653 rcStrict = VINF_SUCCESS;
14654#endif
14655#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14656 if (rcStrict == VINF_VMX_VMEXIT)
14657 rcStrict = VINF_SUCCESS;
14658#endif
14659 /** @todo Are there any other codes that imply the event was successfully
14660 * delivered to the guest? See @bugref{6607}. */
14661 if ( rcStrict == VINF_SUCCESS
14662 || rcStrict == VINF_IEM_RAISED_XCPT)
14663 TRPMResetTrap(pVCpu);
14664
14665 return rcStrict;
14666#endif
14667}
14668
14669
14670VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14671{
14672 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14673 return VERR_NOT_IMPLEMENTED;
14674}
14675
14676
14677VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14678{
14679 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14680 return VERR_NOT_IMPLEMENTED;
14681}
14682
14683
14684#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14685/**
14686 * Executes a IRET instruction with default operand size.
14687 *
14688 * This is for PATM.
14689 *
14690 * @returns VBox status code.
14691 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14692 * @param pCtxCore The register frame.
14693 */
14694VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
14695{
14696 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14697
14698 iemCtxCoreToCtx(pCtx, pCtxCore);
14699 iemInitDecoder(pVCpu);
14700 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14701 if (rcStrict == VINF_SUCCESS)
14702 iemCtxToCtxCore(pCtxCore, pCtx);
14703 else
14704 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14705 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14706 return rcStrict;
14707}
14708#endif
14709
14710
14711/**
14712 * Macro used by the IEMExec* method to check the given instruction length.
14713 *
14714 * Will return on failure!
14715 *
14716 * @param a_cbInstr The given instruction length.
14717 * @param a_cbMin The minimum length.
14718 */
14719#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14720 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14721 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14722
14723
14724/**
14725 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14726 *
14727 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14728 *
14729 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14731 * @param rcStrict The status code to fiddle.
14732 */
14733DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14734{
14735 iemUninitExec(pVCpu);
14736 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14737}
14738
14739
14740/**
14741 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14742 *
14743 * This API ASSUMES that the caller has already verified that the guest code is
14744 * allowed to access the I/O port. (The I/O port is in the DX register in the
14745 * guest state.)
14746 *
14747 * @returns Strict VBox status code.
14748 * @param pVCpu The cross context virtual CPU structure.
14749 * @param cbValue The size of the I/O port access (1, 2, or 4).
14750 * @param enmAddrMode The addressing mode.
14751 * @param fRepPrefix Indicates whether a repeat prefix is used
14752 * (doesn't matter which for this instruction).
14753 * @param cbInstr The instruction length in bytes.
14754 * @param iEffSeg The effective segment address.
14755 * @param fIoChecked Whether the access to the I/O port has been
14756 * checked or not. It's typically checked in the
14757 * HM scenario.
14758 */
14759VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14760 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14761{
14762 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14763 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14764
14765 /*
14766 * State init.
14767 */
14768 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14769
14770 /*
14771 * Switch orgy for getting to the right handler.
14772 */
14773 VBOXSTRICTRC rcStrict;
14774 if (fRepPrefix)
14775 {
14776 switch (enmAddrMode)
14777 {
14778 case IEMMODE_16BIT:
14779 switch (cbValue)
14780 {
14781 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14782 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14783 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14784 default:
14785 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14786 }
14787 break;
14788
14789 case IEMMODE_32BIT:
14790 switch (cbValue)
14791 {
14792 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14793 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14794 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14795 default:
14796 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14797 }
14798 break;
14799
14800 case IEMMODE_64BIT:
14801 switch (cbValue)
14802 {
14803 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14804 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14805 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14806 default:
14807 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14808 }
14809 break;
14810
14811 default:
14812 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14813 }
14814 }
14815 else
14816 {
14817 switch (enmAddrMode)
14818 {
14819 case IEMMODE_16BIT:
14820 switch (cbValue)
14821 {
14822 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14823 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14824 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14825 default:
14826 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14827 }
14828 break;
14829
14830 case IEMMODE_32BIT:
14831 switch (cbValue)
14832 {
14833 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14834 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14835 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14836 default:
14837 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14838 }
14839 break;
14840
14841 case IEMMODE_64BIT:
14842 switch (cbValue)
14843 {
14844 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14845 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14846 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14847 default:
14848 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14849 }
14850 break;
14851
14852 default:
14853 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14854 }
14855 }
14856
14857 if (pVCpu->iem.s.cActiveMappings)
14858 iemMemRollback(pVCpu);
14859
14860 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14861}
14862
14863
14864/**
14865 * Interface for HM and EM for executing string I/O IN (read) instructions.
14866 *
14867 * This API ASSUMES that the caller has already verified that the guest code is
14868 * allowed to access the I/O port. (The I/O port is in the DX register in the
14869 * guest state.)
14870 *
14871 * @returns Strict VBox status code.
14872 * @param pVCpu The cross context virtual CPU structure.
14873 * @param cbValue The size of the I/O port access (1, 2, or 4).
14874 * @param enmAddrMode The addressing mode.
14875 * @param fRepPrefix Indicates whether a repeat prefix is used
14876 * (doesn't matter which for this instruction).
14877 * @param cbInstr The instruction length in bytes.
14878 * @param fIoChecked Whether the access to the I/O port has been
14879 * checked or not. It's typically checked in the
14880 * HM scenario.
14881 */
14882VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14883 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14884{
14885 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14886
14887 /*
14888 * State init.
14889 */
14890 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14891
14892 /*
14893 * Switch orgy for getting to the right handler.
14894 */
14895 VBOXSTRICTRC rcStrict;
14896 if (fRepPrefix)
14897 {
14898 switch (enmAddrMode)
14899 {
14900 case IEMMODE_16BIT:
14901 switch (cbValue)
14902 {
14903 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14904 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14905 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14906 default:
14907 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14908 }
14909 break;
14910
14911 case IEMMODE_32BIT:
14912 switch (cbValue)
14913 {
14914 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14915 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14916 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14917 default:
14918 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14919 }
14920 break;
14921
14922 case IEMMODE_64BIT:
14923 switch (cbValue)
14924 {
14925 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14926 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14927 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14928 default:
14929 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14930 }
14931 break;
14932
14933 default:
14934 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14935 }
14936 }
14937 else
14938 {
14939 switch (enmAddrMode)
14940 {
14941 case IEMMODE_16BIT:
14942 switch (cbValue)
14943 {
14944 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14945 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14946 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14947 default:
14948 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14949 }
14950 break;
14951
14952 case IEMMODE_32BIT:
14953 switch (cbValue)
14954 {
14955 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14956 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14957 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14958 default:
14959 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14960 }
14961 break;
14962
14963 case IEMMODE_64BIT:
14964 switch (cbValue)
14965 {
14966 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14967 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14968 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14969 default:
14970 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14971 }
14972 break;
14973
14974 default:
14975 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14976 }
14977 }
14978
14979 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
14980 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14981}
14982
14983
14984/**
14985 * Interface for rawmode to write execute an OUT instruction.
14986 *
14987 * @returns Strict VBox status code.
14988 * @param pVCpu The cross context virtual CPU structure.
14989 * @param cbInstr The instruction length in bytes.
14990 * @param u16Port The port to read.
14991 * @param fImm Whether the port is specified using an immediate operand or
14992 * using the implicit DX register.
14993 * @param cbReg The register size.
14994 *
14995 * @remarks In ring-0 not all of the state needs to be synced in.
14996 */
14997VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
14998{
14999 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15000 Assert(cbReg <= 4 && cbReg != 3);
15001
15002 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15003 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15004 Assert(!pVCpu->iem.s.cActiveMappings);
15005 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15006}
15007
15008
15009/**
15010 * Interface for rawmode to write execute an IN instruction.
15011 *
15012 * @returns Strict VBox status code.
15013 * @param pVCpu The cross context virtual CPU structure.
15014 * @param cbInstr The instruction length in bytes.
15015 * @param u16Port The port to read.
15016 * @param fImm Whether the port is specified using an immediate operand or
15017 * using the implicit DX.
15018 * @param cbReg The register size.
15019 */
15020VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15021{
15022 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15023 Assert(cbReg <= 4 && cbReg != 3);
15024
15025 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15026 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15027 Assert(!pVCpu->iem.s.cActiveMappings);
15028 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15029}
15030
15031
15032/**
15033 * Interface for HM and EM to write to a CRx register.
15034 *
15035 * @returns Strict VBox status code.
15036 * @param pVCpu The cross context virtual CPU structure.
15037 * @param cbInstr The instruction length in bytes.
15038 * @param iCrReg The control register number (destination).
15039 * @param iGReg The general purpose register number (source).
15040 *
15041 * @remarks In ring-0 not all of the state needs to be synced in.
15042 */
15043VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15044{
15045 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15046 Assert(iCrReg < 16);
15047 Assert(iGReg < 16);
15048
15049 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15050 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15051 Assert(!pVCpu->iem.s.cActiveMappings);
15052 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15053}
15054
15055
15056/**
15057 * Interface for HM and EM to read from a CRx register.
15058 *
15059 * @returns Strict VBox status code.
15060 * @param pVCpu The cross context virtual CPU structure.
15061 * @param cbInstr The instruction length in bytes.
15062 * @param iGReg The general purpose register number (destination).
15063 * @param iCrReg The control register number (source).
15064 *
15065 * @remarks In ring-0 not all of the state needs to be synced in.
15066 */
15067VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15068{
15069 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15070 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15071 | CPUMCTX_EXTRN_APIC_TPR);
15072 Assert(iCrReg < 16);
15073 Assert(iGReg < 16);
15074
15075 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15076 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15077 Assert(!pVCpu->iem.s.cActiveMappings);
15078 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15079}
15080
15081
15082/**
15083 * Interface for HM and EM to clear the CR0[TS] bit.
15084 *
15085 * @returns Strict VBox status code.
15086 * @param pVCpu The cross context virtual CPU structure.
15087 * @param cbInstr The instruction length in bytes.
15088 *
15089 * @remarks In ring-0 not all of the state needs to be synced in.
15090 */
15091VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15092{
15093 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15094
15095 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15096 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15097 Assert(!pVCpu->iem.s.cActiveMappings);
15098 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15099}
15100
15101
15102/**
15103 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15104 *
15105 * @returns Strict VBox status code.
15106 * @param pVCpu The cross context virtual CPU structure.
15107 * @param cbInstr The instruction length in bytes.
15108 * @param uValue The value to load into CR0.
15109 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15110 * memory operand. Otherwise pass NIL_RTGCPTR.
15111 *
15112 * @remarks In ring-0 not all of the state needs to be synced in.
15113 */
15114VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15115{
15116 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15117
15118 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15119 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15120 Assert(!pVCpu->iem.s.cActiveMappings);
15121 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15122}
15123
15124
15125/**
15126 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15127 *
15128 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15129 *
15130 * @returns Strict VBox status code.
15131 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15132 * @param cbInstr The instruction length in bytes.
15133 * @remarks In ring-0 not all of the state needs to be synced in.
15134 * @thread EMT(pVCpu)
15135 */
15136VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15137{
15138 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15139
15140 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15141 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15142 Assert(!pVCpu->iem.s.cActiveMappings);
15143 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15144}
15145
15146
15147/**
15148 * Interface for HM and EM to emulate the WBINVD instruction.
15149 *
15150 * @returns Strict VBox status code.
15151 * @param pVCpu The cross context virtual CPU structure.
15152 * @param cbInstr The instruction length in bytes.
15153 *
15154 * @remarks In ring-0 not all of the state needs to be synced in.
15155 */
15156VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15157{
15158 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15159
15160 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15161 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15162 Assert(!pVCpu->iem.s.cActiveMappings);
15163 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15164}
15165
15166
15167/**
15168 * Interface for HM and EM to emulate the INVD instruction.
15169 *
15170 * @returns Strict VBox status code.
15171 * @param pVCpu The cross context virtual CPU structure.
15172 * @param cbInstr The instruction length in bytes.
15173 *
15174 * @remarks In ring-0 not all of the state needs to be synced in.
15175 */
15176VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15177{
15178 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15179
15180 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15181 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15182 Assert(!pVCpu->iem.s.cActiveMappings);
15183 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15184}
15185
15186
15187/**
15188 * Interface for HM and EM to emulate the INVLPG instruction.
15189 *
15190 * @returns Strict VBox status code.
15191 * @retval VINF_PGM_SYNC_CR3
15192 *
15193 * @param pVCpu The cross context virtual CPU structure.
15194 * @param cbInstr The instruction length in bytes.
15195 * @param GCPtrPage The effective address of the page to invalidate.
15196 *
15197 * @remarks In ring-0 not all of the state needs to be synced in.
15198 */
15199VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15200{
15201 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15202
15203 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15204 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15205 Assert(!pVCpu->iem.s.cActiveMappings);
15206 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15207}
15208
15209
15210/**
15211 * Interface for HM and EM to emulate the CPUID instruction.
15212 *
15213 * @returns Strict VBox status code.
15214 *
15215 * @param pVCpu The cross context virtual CPU structure.
15216 * @param cbInstr The instruction length in bytes.
15217 *
15218 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15219 */
15220VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15221{
15222 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15223 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15224
15225 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15226 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15227 Assert(!pVCpu->iem.s.cActiveMappings);
15228 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15229}
15230
15231
15232/**
15233 * Interface for HM and EM to emulate the RDPMC instruction.
15234 *
15235 * @returns Strict VBox status code.
15236 *
15237 * @param pVCpu The cross context virtual CPU structure.
15238 * @param cbInstr The instruction length in bytes.
15239 *
15240 * @remarks Not all of the state needs to be synced in.
15241 */
15242VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15243{
15244 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15245 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15246
15247 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15248 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15249 Assert(!pVCpu->iem.s.cActiveMappings);
15250 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15251}
15252
15253
15254/**
15255 * Interface for HM and EM to emulate the RDTSC instruction.
15256 *
15257 * @returns Strict VBox status code.
15258 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15259 *
15260 * @param pVCpu The cross context virtual CPU structure.
15261 * @param cbInstr The instruction length in bytes.
15262 *
15263 * @remarks Not all of the state needs to be synced in.
15264 */
15265VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15266{
15267 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15268 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15269
15270 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15271 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15272 Assert(!pVCpu->iem.s.cActiveMappings);
15273 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15274}
15275
15276
15277/**
15278 * Interface for HM and EM to emulate the RDTSCP instruction.
15279 *
15280 * @returns Strict VBox status code.
15281 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15282 *
15283 * @param pVCpu The cross context virtual CPU structure.
15284 * @param cbInstr The instruction length in bytes.
15285 *
15286 * @remarks Not all of the state needs to be synced in. Recommended
15287 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15288 */
15289VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15290{
15291 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15292 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15293
15294 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15295 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15296 Assert(!pVCpu->iem.s.cActiveMappings);
15297 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15298}
15299
15300
15301/**
15302 * Interface for HM and EM to emulate the RDMSR instruction.
15303 *
15304 * @returns Strict VBox status code.
15305 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15306 *
15307 * @param pVCpu The cross context virtual CPU structure.
15308 * @param cbInstr The instruction length in bytes.
15309 *
15310 * @remarks Not all of the state needs to be synced in. Requires RCX and
15311 * (currently) all MSRs.
15312 */
15313VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15314{
15315 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15316 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15317
15318 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15319 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15320 Assert(!pVCpu->iem.s.cActiveMappings);
15321 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15322}
15323
15324
15325/**
15326 * Interface for HM and EM to emulate the WRMSR instruction.
15327 *
15328 * @returns Strict VBox status code.
15329 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15330 *
15331 * @param pVCpu The cross context virtual CPU structure.
15332 * @param cbInstr The instruction length in bytes.
15333 *
15334 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15335 * and (currently) all MSRs.
15336 */
15337VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15338{
15339 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15340 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15341 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15342
15343 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15344 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15345 Assert(!pVCpu->iem.s.cActiveMappings);
15346 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15347}
15348
15349
15350/**
15351 * Interface for HM and EM to emulate the MONITOR instruction.
15352 *
15353 * @returns Strict VBox status code.
15354 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15355 *
15356 * @param pVCpu The cross context virtual CPU structure.
15357 * @param cbInstr The instruction length in bytes.
15358 *
15359 * @remarks Not all of the state needs to be synced in.
15360 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15361 * are used.
15362 */
15363VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
15364{
15365 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15366 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15367
15368 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15369 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15370 Assert(!pVCpu->iem.s.cActiveMappings);
15371 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15372}
15373
15374
15375/**
15376 * Interface for HM and EM to emulate the MWAIT instruction.
15377 *
15378 * @returns Strict VBox status code.
15379 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15380 *
15381 * @param pVCpu The cross context virtual CPU structure.
15382 * @param cbInstr The instruction length in bytes.
15383 *
15384 * @remarks Not all of the state needs to be synced in.
15385 */
15386VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
15387{
15388 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15389 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15390
15391 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15392 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15393 Assert(!pVCpu->iem.s.cActiveMappings);
15394 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15395}
15396
15397
15398/**
15399 * Interface for HM and EM to emulate the HLT instruction.
15400 *
15401 * @returns Strict VBox status code.
15402 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15403 *
15404 * @param pVCpu The cross context virtual CPU structure.
15405 * @param cbInstr The instruction length in bytes.
15406 *
15407 * @remarks Not all of the state needs to be synced in.
15408 */
15409VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
15410{
15411 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15412
15413 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15414 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15415 Assert(!pVCpu->iem.s.cActiveMappings);
15416 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15417}
15418
15419
15420/**
15421 * Checks if IEM is in the process of delivering an event (interrupt or
15422 * exception).
15423 *
15424 * @returns true if we're in the process of raising an interrupt or exception,
15425 * false otherwise.
15426 * @param pVCpu The cross context virtual CPU structure.
15427 * @param puVector Where to store the vector associated with the
15428 * currently delivered event, optional.
15429 * @param pfFlags Where to store th event delivery flags (see
15430 * IEM_XCPT_FLAGS_XXX), optional.
15431 * @param puErr Where to store the error code associated with the
15432 * event, optional.
15433 * @param puCr2 Where to store the CR2 associated with the event,
15434 * optional.
15435 * @remarks The caller should check the flags to determine if the error code and
15436 * CR2 are valid for the event.
15437 */
15438VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15439{
15440 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15441 if (fRaisingXcpt)
15442 {
15443 if (puVector)
15444 *puVector = pVCpu->iem.s.uCurXcpt;
15445 if (pfFlags)
15446 *pfFlags = pVCpu->iem.s.fCurXcpt;
15447 if (puErr)
15448 *puErr = pVCpu->iem.s.uCurXcptErr;
15449 if (puCr2)
15450 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15451 }
15452 return fRaisingXcpt;
15453}
15454
15455#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15456
15457/**
15458 * Interface for HM and EM to emulate the CLGI instruction.
15459 *
15460 * @returns Strict VBox status code.
15461 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15462 * @param cbInstr The instruction length in bytes.
15463 * @thread EMT(pVCpu)
15464 */
15465VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15466{
15467 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15468
15469 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15470 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15471 Assert(!pVCpu->iem.s.cActiveMappings);
15472 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15473}
15474
15475
15476/**
15477 * Interface for HM and EM to emulate the STGI instruction.
15478 *
15479 * @returns Strict VBox status code.
15480 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15481 * @param cbInstr The instruction length in bytes.
15482 * @thread EMT(pVCpu)
15483 */
15484VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15485{
15486 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15487
15488 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15489 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15490 Assert(!pVCpu->iem.s.cActiveMappings);
15491 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15492}
15493
15494
15495/**
15496 * Interface for HM and EM to emulate the VMLOAD instruction.
15497 *
15498 * @returns Strict VBox status code.
15499 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15500 * @param cbInstr The instruction length in bytes.
15501 * @thread EMT(pVCpu)
15502 */
15503VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
15504{
15505 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15506
15507 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15508 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15509 Assert(!pVCpu->iem.s.cActiveMappings);
15510 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15511}
15512
15513
15514/**
15515 * Interface for HM and EM to emulate the VMSAVE instruction.
15516 *
15517 * @returns Strict VBox status code.
15518 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15519 * @param cbInstr The instruction length in bytes.
15520 * @thread EMT(pVCpu)
15521 */
15522VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
15523{
15524 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15525
15526 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15527 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15528 Assert(!pVCpu->iem.s.cActiveMappings);
15529 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15530}
15531
15532
15533/**
15534 * Interface for HM and EM to emulate the INVLPGA instruction.
15535 *
15536 * @returns Strict VBox status code.
15537 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15538 * @param cbInstr The instruction length in bytes.
15539 * @thread EMT(pVCpu)
15540 */
15541VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
15542{
15543 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15544
15545 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15546 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15547 Assert(!pVCpu->iem.s.cActiveMappings);
15548 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15549}
15550
15551
15552/**
15553 * Interface for HM and EM to emulate the VMRUN instruction.
15554 *
15555 * @returns Strict VBox status code.
15556 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15557 * @param cbInstr The instruction length in bytes.
15558 * @thread EMT(pVCpu)
15559 */
15560VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
15561{
15562 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15563 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15564
15565 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15566 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15567 Assert(!pVCpu->iem.s.cActiveMappings);
15568 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15569}
15570
15571
15572/**
15573 * Interface for HM and EM to emulate \#VMEXIT.
15574 *
15575 * @returns Strict VBox status code.
15576 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15577 * @param uExitCode The exit code.
15578 * @param uExitInfo1 The exit info. 1 field.
15579 * @param uExitInfo2 The exit info. 2 field.
15580 * @thread EMT(pVCpu)
15581 */
15582VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15583{
15584 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15585 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15586 if (pVCpu->iem.s.cActiveMappings)
15587 iemMemRollback(pVCpu);
15588 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15589}
15590
15591#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15592
15593#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15594
15595/**
15596 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15597 *
15598 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15599 * are performed. Bounds checks are strict builds only.
15600 *
15601 * @param pVmcs Pointer to the virtual VMCS.
15602 * @param u64VmcsField The VMCS field.
15603 * @param pu64Dst Where to store the VMCS value.
15604 *
15605 * @remarks May be called with interrupts disabled.
15606 * @todo This should probably be moved to CPUM someday.
15607 */
15608VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15609{
15610 AssertPtr(pVmcs);
15611 AssertPtr(pu64Dst);
15612 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15613}
15614
15615
15616/**
15617 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15618 *
15619 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15620 * are performed. Bounds checks are strict builds only.
15621 *
15622 * @param pVmcs Pointer to the virtual VMCS.
15623 * @param u64VmcsField The VMCS field.
15624 * @param u64Val The value to write.
15625 *
15626 * @remarks May be called with interrupts disabled.
15627 * @todo This should probably be moved to CPUM someday.
15628 */
15629VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15630{
15631 AssertPtr(pVmcs);
15632 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15633}
15634
15635
15636/**
15637 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15638 *
15639 * @returns Strict VBox status code.
15640 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15641 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15642 * the x2APIC device.
15643 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15644 *
15645 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15646 * @param idMsr The MSR being read.
15647 * @param pu64Value Pointer to the value being written or where to store the
15648 * value being read.
15649 * @param fWrite Whether this is an MSR write or read access.
15650 * @thread EMT(pVCpu)
15651 */
15652VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15653{
15654 Assert(pu64Value);
15655
15656 VBOXSTRICTRC rcStrict;
15657 if (fWrite)
15658 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15659 else
15660 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15661 Assert(!pVCpu->iem.s.cActiveMappings);
15662 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15663
15664}
15665
15666
15667/**
15668 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15669 *
15670 * @returns Strict VBox status code.
15671 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15672 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15673 *
15674 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15675 * @param pExitInfo Pointer to the VM-exit information.
15676 * @param pExitEventInfo Pointer to the VM-exit event information.
15677 * @thread EMT(pVCpu)
15678 */
15679VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15680{
15681 Assert(pExitInfo);
15682 Assert(pExitEventInfo);
15683 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15684 Assert(!pVCpu->iem.s.cActiveMappings);
15685 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15686
15687}
15688
15689
15690/**
15691 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15692 * VM-exit.
15693 *
15694 * @returns Strict VBox status code.
15695 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15696 * @thread EMT(pVCpu)
15697 */
15698VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
15699{
15700 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15701 Assert(!pVCpu->iem.s.cActiveMappings);
15702 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15703}
15704
15705
15706/**
15707 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15708 *
15709 * @returns Strict VBox status code.
15710 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15711 * @thread EMT(pVCpu)
15712 */
15713VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
15714{
15715 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15716 Assert(!pVCpu->iem.s.cActiveMappings);
15717 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15718}
15719
15720
15721/**
15722 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15723 *
15724 * @returns Strict VBox status code.
15725 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15726 * @param uVector The external interrupt vector (pass 0 if the external
15727 * interrupt is still pending).
15728 * @param fIntPending Whether the external interrupt is pending or
15729 * acknowdledged in the interrupt controller.
15730 * @thread EMT(pVCpu)
15731 */
15732VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
15733{
15734 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15735 Assert(!pVCpu->iem.s.cActiveMappings);
15736 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15737}
15738
15739
15740/**
15741 * Interface for HM and EM to emulate VM-exit due to exceptions.
15742 *
15743 * Exception includes NMIs, software exceptions (those generated by INT3 or
15744 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15745 *
15746 * @returns Strict VBox status code.
15747 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15748 * @param pExitInfo Pointer to the VM-exit information.
15749 * @param pExitEventInfo Pointer to the VM-exit event information.
15750 * @thread EMT(pVCpu)
15751 */
15752VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15753{
15754 Assert(pExitInfo);
15755 Assert(pExitEventInfo);
15756 Assert(pExitInfo->uReason == VMX_EXIT_XCPT_OR_NMI);
15757 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15758 Assert(!pVCpu->iem.s.cActiveMappings);
15759 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15760}
15761
15762
15763/**
15764 * Interface for HM and EM to emulate VM-exit due to NMIs.
15765 *
15766 * @returns Strict VBox status code.
15767 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15768 * @thread EMT(pVCpu)
15769 */
15770VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
15771{
15772 VMXVEXITINFO ExitInfo;
15773 RT_ZERO(ExitInfo);
15774 VMXVEXITEVENTINFO ExitEventInfo;
15775 RT_ZERO(ExitInfo);
15776 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15777 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15778 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15779
15780 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15781 Assert(!pVCpu->iem.s.cActiveMappings);
15782 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15783}
15784
15785
15786/**
15787 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15788 *
15789 * @returns Strict VBox status code.
15790 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15791 * @thread EMT(pVCpu)
15792 */
15793VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
15794{
15795 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15796 Assert(!pVCpu->iem.s.cActiveMappings);
15797 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15798}
15799
15800
15801/**
15802 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15803 *
15804 * @returns Strict VBox status code.
15805 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15806 * @param uVector The SIPI vector.
15807 * @thread EMT(pVCpu)
15808 */
15809VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
15810{
15811 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
15812 Assert(!pVCpu->iem.s.cActiveMappings);
15813 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15814}
15815
15816
15817/**
15818 * Interface for HM and EM to emulate a VM-exit.
15819 *
15820 * If a specialized version of a VM-exit handler exists, that must be used instead.
15821 *
15822 * @returns Strict VBox status code.
15823 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15824 * @param uExitReason The VM-exit reason.
15825 * @param u64ExitQual The Exit qualification.
15826 * @thread EMT(pVCpu)
15827 */
15828VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
15829{
15830 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
15831 Assert(!pVCpu->iem.s.cActiveMappings);
15832 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15833}
15834
15835
15836/**
15837 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15838 *
15839 * This is meant to be used for those instructions that VMX provides additional
15840 * decoding information beyond just the instruction length!
15841 *
15842 * @returns Strict VBox status code.
15843 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15844 * @param pExitInfo Pointer to the VM-exit information.
15845 * @thread EMT(pVCpu)
15846 */
15847VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15848{
15849 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
15850 Assert(!pVCpu->iem.s.cActiveMappings);
15851 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15852}
15853
15854
15855/**
15856 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15857 *
15858 * This is meant to be used for those instructions that VMX provides only the
15859 * instruction length.
15860 *
15861 * @returns Strict VBox status code.
15862 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15863 * @param pExitInfo Pointer to the VM-exit information.
15864 * @param cbInstr The instruction length in bytes.
15865 * @thread EMT(pVCpu)
15866 */
15867VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
15868{
15869 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
15870 Assert(!pVCpu->iem.s.cActiveMappings);
15871 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15872}
15873
15874
15875/**
15876 * Interface for HM and EM to emulate a VM-exit due to a task switch.
15877 *
15878 * @returns Strict VBox status code.
15879 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15880 * @param pExitInfo Pointer to the VM-exit information.
15881 * @param pExitEventInfo Pointer to the VM-exit event information.
15882 * @thread EMT(pVCpu)
15883 */
15884VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15885{
15886 Assert(pExitInfo);
15887 Assert(pExitEventInfo);
15888 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
15889 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15890 Assert(!pVCpu->iem.s.cActiveMappings);
15891 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15892}
15893
15894
15895/**
15896 * Interface for HM and EM to emulate the VMREAD instruction.
15897 *
15898 * @returns Strict VBox status code.
15899 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15900 * @param pExitInfo Pointer to the VM-exit information.
15901 * @thread EMT(pVCpu)
15902 */
15903VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15904{
15905 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15906 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15907 Assert(pExitInfo);
15908
15909 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15910
15911 VBOXSTRICTRC rcStrict;
15912 uint8_t const cbInstr = pExitInfo->cbInstr;
15913 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
15914 uint64_t const u64FieldEnc = fIs64BitMode
15915 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
15916 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15917 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15918 {
15919 if (fIs64BitMode)
15920 {
15921 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15922 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
15923 }
15924 else
15925 {
15926 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15927 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
15928 }
15929 }
15930 else
15931 {
15932 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
15933 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15934 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
15935 }
15936 Assert(!pVCpu->iem.s.cActiveMappings);
15937 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15938}
15939
15940
15941/**
15942 * Interface for HM and EM to emulate the VMWRITE instruction.
15943 *
15944 * @returns Strict VBox status code.
15945 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15946 * @param pExitInfo Pointer to the VM-exit information.
15947 * @thread EMT(pVCpu)
15948 */
15949VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15950{
15951 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15952 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15953 Assert(pExitInfo);
15954
15955 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15956
15957 uint64_t u64Val;
15958 uint8_t iEffSeg;
15959 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15960 {
15961 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15962 iEffSeg = UINT8_MAX;
15963 }
15964 else
15965 {
15966 u64Val = pExitInfo->GCPtrEffAddr;
15967 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15968 }
15969 uint8_t const cbInstr = pExitInfo->cbInstr;
15970 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
15971 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
15972 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15973 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
15974 Assert(!pVCpu->iem.s.cActiveMappings);
15975 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15976}
15977
15978
15979/**
15980 * Interface for HM and EM to emulate the VMPTRLD instruction.
15981 *
15982 * @returns Strict VBox status code.
15983 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15984 * @param pExitInfo Pointer to the VM-exit information.
15985 * @thread EMT(pVCpu)
15986 */
15987VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15988{
15989 Assert(pExitInfo);
15990 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15991 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15992
15993 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15994
15995 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15996 uint8_t const cbInstr = pExitInfo->cbInstr;
15997 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15998 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15999 Assert(!pVCpu->iem.s.cActiveMappings);
16000 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16001}
16002
16003
16004/**
16005 * Interface for HM and EM to emulate the VMPTRST instruction.
16006 *
16007 * @returns Strict VBox status code.
16008 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16009 * @param pExitInfo Pointer to the VM-exit information.
16010 * @thread EMT(pVCpu)
16011 */
16012VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16013{
16014 Assert(pExitInfo);
16015 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16016 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16017
16018 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16019
16020 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16021 uint8_t const cbInstr = pExitInfo->cbInstr;
16022 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16023 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16024 Assert(!pVCpu->iem.s.cActiveMappings);
16025 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16026}
16027
16028
16029/**
16030 * Interface for HM and EM to emulate the VMCLEAR instruction.
16031 *
16032 * @returns Strict VBox status code.
16033 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16034 * @param pExitInfo Pointer to the VM-exit information.
16035 * @thread EMT(pVCpu)
16036 */
16037VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16038{
16039 Assert(pExitInfo);
16040 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16041 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16042
16043 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16044
16045 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16046 uint8_t const cbInstr = pExitInfo->cbInstr;
16047 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16048 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16049 Assert(!pVCpu->iem.s.cActiveMappings);
16050 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16051}
16052
16053
16054/**
16055 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16056 *
16057 * @returns Strict VBox status code.
16058 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16059 * @param cbInstr The instruction length in bytes.
16060 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16061 * VMXINSTRID_VMRESUME).
16062 * @thread EMT(pVCpu)
16063 */
16064VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16065{
16066 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16067 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16068
16069 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16070 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16071 Assert(!pVCpu->iem.s.cActiveMappings);
16072 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16073}
16074
16075
16076/**
16077 * Interface for HM and EM to emulate the VMXON instruction.
16078 *
16079 * @returns Strict VBox status code.
16080 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16081 * @param pExitInfo Pointer to the VM-exit information.
16082 * @thread EMT(pVCpu)
16083 */
16084VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16085{
16086 Assert(pExitInfo);
16087 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16088 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16089
16090 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16091
16092 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16093 uint8_t const cbInstr = pExitInfo->cbInstr;
16094 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16095 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16096 Assert(!pVCpu->iem.s.cActiveMappings);
16097 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16098}
16099
16100
16101/**
16102 * Interface for HM and EM to emulate the VMXOFF instruction.
16103 *
16104 * @returns Strict VBox status code.
16105 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16106 * @param cbInstr The instruction length in bytes.
16107 * @thread EMT(pVCpu)
16108 */
16109VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16110{
16111 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16112 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16113
16114 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16115 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16116 Assert(!pVCpu->iem.s.cActiveMappings);
16117 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16118}
16119
16120
16121/**
16122 * Interface for HM and EM to emulate the INVVPID instruction.
16123 *
16124 * @returns Strict VBox status code.
16125 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16126 * @param pExitInfo Pointer to the VM-exit information.
16127 * @thread EMT(pVCpu)
16128 */
16129VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16130{
16131 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16132 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16133 Assert(pExitInfo);
16134
16135 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16136
16137 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16138 uint8_t const cbInstr = pExitInfo->cbInstr;
16139 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16140 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16141 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16142 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16143 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16144 Assert(!pVCpu->iem.s.cActiveMappings);
16145 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16146}
16147
16148
16149/**
16150 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16151 *
16152 * @remarks The @a pvUser argument is currently unused.
16153 */
16154PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVM pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16155 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16156 PGMACCESSORIGIN enmOrigin, void *pvUser)
16157{
16158 RT_NOREF4(pVM, pvPhys, enmOrigin, pvUser);
16159
16160 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16161 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16162 {
16163 Assert(CPUMIsGuestVmxProcCtls2Set(pVCpu, IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16164 Assert(CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16165
16166 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16167 * Currently they will go through as read accesses. */
16168 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16169 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16170 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16171 if (RT_FAILURE(rcStrict))
16172 return rcStrict;
16173
16174 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16175 return VINF_SUCCESS;
16176 }
16177
16178 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16179 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16180 if (RT_FAILURE(rc))
16181 return rc;
16182
16183 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16184 return VINF_PGM_HANDLER_DO_DEFAULT;
16185}
16186
16187#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16188
16189#ifdef IN_RING3
16190
16191/**
16192 * Handles the unlikely and probably fatal merge cases.
16193 *
16194 * @returns Merged status code.
16195 * @param rcStrict Current EM status code.
16196 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16197 * with @a rcStrict.
16198 * @param iMemMap The memory mapping index. For error reporting only.
16199 * @param pVCpu The cross context virtual CPU structure of the calling
16200 * thread, for error reporting only.
16201 */
16202DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16203 unsigned iMemMap, PVMCPUCC pVCpu)
16204{
16205 if (RT_FAILURE_NP(rcStrict))
16206 return rcStrict;
16207
16208 if (RT_FAILURE_NP(rcStrictCommit))
16209 return rcStrictCommit;
16210
16211 if (rcStrict == rcStrictCommit)
16212 return rcStrictCommit;
16213
16214 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16215 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16216 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16217 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16218 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16219 return VERR_IOM_FF_STATUS_IPE;
16220}
16221
16222
16223/**
16224 * Helper for IOMR3ProcessForceFlag.
16225 *
16226 * @returns Merged status code.
16227 * @param rcStrict Current EM status code.
16228 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16229 * with @a rcStrict.
16230 * @param iMemMap The memory mapping index. For error reporting only.
16231 * @param pVCpu The cross context virtual CPU structure of the calling
16232 * thread, for error reporting only.
16233 */
16234DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
16235{
16236 /* Simple. */
16237 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16238 return rcStrictCommit;
16239
16240 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16241 return rcStrict;
16242
16243 /* EM scheduling status codes. */
16244 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16245 && rcStrict <= VINF_EM_LAST))
16246 {
16247 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16248 && rcStrictCommit <= VINF_EM_LAST))
16249 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16250 }
16251
16252 /* Unlikely */
16253 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16254}
16255
16256
16257/**
16258 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16259 *
16260 * @returns Merge between @a rcStrict and what the commit operation returned.
16261 * @param pVM The cross context VM structure.
16262 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16263 * @param rcStrict The status code returned by ring-0 or raw-mode.
16264 */
16265VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
16266{
16267 /*
16268 * Reset the pending commit.
16269 */
16270 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16271 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16272 ("%#x %#x %#x\n",
16273 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16274 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16275
16276 /*
16277 * Commit the pending bounce buffers (usually just one).
16278 */
16279 unsigned cBufs = 0;
16280 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16281 while (iMemMap-- > 0)
16282 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16283 {
16284 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16285 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16286 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16287
16288 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16289 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16290 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16291
16292 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16293 {
16294 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16295 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16296 pbBuf,
16297 cbFirst,
16298 PGMACCESSORIGIN_IEM);
16299 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16300 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16301 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16302 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16303 }
16304
16305 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16306 {
16307 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16308 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16309 pbBuf + cbFirst,
16310 cbSecond,
16311 PGMACCESSORIGIN_IEM);
16312 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16313 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16314 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16315 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16316 }
16317 cBufs++;
16318 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16319 }
16320
16321 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16322 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16323 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16324 pVCpu->iem.s.cActiveMappings = 0;
16325 return rcStrict;
16326}
16327
16328#endif /* IN_RING3 */
16329
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette