VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInternal.h@ 103847

Last change on this file since 103847 was 103847, checked in by vboxsync, 11 months ago

Move iemTbFlagsToString() to be accessible to both callers, bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 310.4 KB
Line 
1/* $Id: IEMInternal.h 103847 2024-03-14 11:29:54Z vboxsync $ */
2/** @file
3 * IEM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInternal_h
29#define VMM_INCLUDED_SRC_include_IEMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/vmm/cpum.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/stam.h>
38#include <VBox/param.h>
39
40#include <iprt/setjmp-without-sigmask.h>
41#include <iprt/list.h>
42
43
44RT_C_DECLS_BEGIN
45
46
47/** @defgroup grp_iem_int Internals
48 * @ingroup grp_iem
49 * @internal
50 * @{
51 */
52
53/** For expanding symbol in slickedit and other products tagging and
54 * crossreferencing IEM symbols. */
55#ifndef IEM_STATIC
56# define IEM_STATIC static
57#endif
58
59/** @def IEM_WITH_SETJMP
60 * Enables alternative status code handling using setjmps.
61 *
62 * This adds a bit of expense via the setjmp() call since it saves all the
63 * non-volatile registers. However, it eliminates return code checks and allows
64 * for more optimal return value passing (return regs instead of stack buffer).
65 */
66#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
67# define IEM_WITH_SETJMP
68#endif
69
70/** @def IEM_WITH_THROW_CATCH
71 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
72 * mode code when IEM_WITH_SETJMP is in effect.
73 *
74 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
75 * setjmp/long resulted in bs2-test-1 running 3.00% faster and all but on test
76 * result value improving by more than 1%. (Best out of three.)
77 *
78 * With Visual C++ 2019 and code TLB on windows, using throw/catch instead of
79 * setjmp/long resulted in bs2-test-1 running 3.68% faster and all but some of
80 * the MMIO and CPUID tests ran noticeably faster. Variation is greater than on
81 * Linux, but it should be quite a bit faster for normal code.
82 */
83#if (defined(__cplusplus) && defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
84 || defined(DOXYGEN_RUNNING)
85# define IEM_WITH_THROW_CATCH
86#endif
87
88/** @def IEMNATIVE_WITH_DELAYED_PC_UPDATING
89 * Enables the delayed PC updating optimization (see @bugref{10373}).
90 */
91#if defined(DOXYGEN_RUNNING) || 1
92# define IEMNATIVE_WITH_DELAYED_PC_UPDATING
93#endif
94
95/** Enables the SIMD register allocator @bugref{10614}. */
96#if defined(DOXYGEN_RUNNING) || 1
97# define IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
98#endif
99/** Enables access to even callee saved registers. */
100//# define IEMNATIVE_WITH_SIMD_REG_ACCESS_ALL_REGISTERS
101
102/** @def VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
103 * Enables a quicker alternative to throw/longjmp for IEM_DO_LONGJMP when
104 * executing native translation blocks.
105 *
106 * This exploits the fact that we save all non-volatile registers in the TB
107 * prologue and thus just need to do the same as the TB epilogue to get the
108 * effect of a longjmp/throw. Since MSC marks XMM6 thru XMM15 as
109 * non-volatile (and does something even more crazy for ARM), this probably
110 * won't work reliably on Windows. */
111#if defined(DOXYGEN_RUNNING) || (!defined(RT_OS_WINDOWS) && (defined(RT_ARCH_ARM64) /*|| defined(_RT_ARCH_AMD64)*/))
112# define VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
113#endif
114#ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
115# if !defined(IN_RING3) \
116 || !defined(VBOX_WITH_IEM_RECOMPILER) \
117 || !defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
118# undef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
119# elif defined(RT_OS_WINDOWS)
120# pragma message("VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is not safe to use on windows")
121# endif
122#endif
123
124
125/** @def IEM_DO_LONGJMP
126 *
127 * Wrapper around longjmp / throw.
128 *
129 * @param a_pVCpu The CPU handle.
130 * @param a_rc The status code jump back with / throw.
131 */
132#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
133# ifdef IEM_WITH_THROW_CATCH
134# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
135# define IEM_DO_LONGJMP(a_pVCpu, a_rc) do { \
136 if ((a_pVCpu)->iem.s.pvTbFramePointerR3) \
137 iemNativeTbLongJmp((a_pVCpu)->iem.s.pvTbFramePointerR3, (a_rc)); \
138 throw int(a_rc); \
139 } while (0)
140# else
141# define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
142# endif
143# else
144# define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
145# endif
146#endif
147
148/** For use with IEM function that may do a longjmp (when enabled).
149 *
150 * Visual C++ has trouble longjmp'ing from/over functions with the noexcept
151 * attribute. So, we indicate that function that may be part of a longjmp may
152 * throw "exceptions" and that the compiler should definitely not generate and
153 * std::terminate calling unwind code.
154 *
155 * Here is one example of this ending in std::terminate:
156 * @code{.txt}
15700 00000041`cadfda10 00007ffc`5d5a1f9f ucrtbase!abort+0x4e
15801 00000041`cadfda40 00007ffc`57af229a ucrtbase!terminate+0x1f
15902 00000041`cadfda70 00007ffb`eec91030 VCRUNTIME140!__std_terminate+0xa [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\ehhelpers.cpp @ 192]
16003 00000041`cadfdaa0 00007ffb`eec92c6d VCRUNTIME140_1!_CallSettingFrame+0x20 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\handlers.asm @ 50]
16104 00000041`cadfdad0 00007ffb`eec93ae5 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToState+0x241 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 1085]
16205 00000041`cadfdc00 00007ffb`eec92258 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToEmptyState+0x2d [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 218]
16306 00000041`cadfdc30 00007ffb`eec940e9 VCRUNTIME140_1!__InternalCxxFrameHandler<__FrameHandler4>+0x194 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 304]
16407 00000041`cadfdcd0 00007ffc`5f9f249f VCRUNTIME140_1!__CxxFrameHandler4+0xa9 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 290]
16508 00000041`cadfdd40 00007ffc`5f980939 ntdll!RtlpExecuteHandlerForUnwind+0xf
16609 00000041`cadfdd70 00007ffc`5f9a0edd ntdll!RtlUnwindEx+0x339
1670a 00000041`cadfe490 00007ffc`57aff976 ntdll!RtlUnwind+0xcd
1680b 00000041`cadfea00 00007ffb`e1b5de01 VCRUNTIME140!__longjmp_internal+0xe6 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\longjmp.asm @ 140]
1690c (Inline Function) --------`-------- VBoxVMM!iemOpcodeGetNextU8SlowJmp+0x95 [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 1155]
1700d 00000041`cadfea50 00007ffb`e1b60f6b VBoxVMM!iemOpcodeGetNextU8Jmp+0xc1 [L:\vbox-intern\src\VBox\VMM\include\IEMInline.h @ 402]
1710e 00000041`cadfea90 00007ffb`e1cc6201 VBoxVMM!IEMExecForExits+0xdb [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 10185]
1720f 00000041`cadfec70 00007ffb`e1d0df8d VBoxVMM!EMHistoryExec+0x4f1 [L:\vbox-intern\src\VBox\VMM\VMMAll\EMAll.cpp @ 452]
17310 00000041`cadfed60 00007ffb`e1d0d4c0 VBoxVMM!nemR3WinHandleExitCpuId+0x79d [L:\vbox-intern\src\VBox\VMM\VMMAll\NEMAllNativeTemplate-win.cpp.h @ 1829] @encode
174 @endcode
175 *
176 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
177 */
178#if defined(IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))
179# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false)
180#else
181# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT
182#endif
183
184#define IEM_IMPLEMENTS_TASKSWITCH
185
186/** @def IEM_WITH_3DNOW
187 * Includes the 3DNow decoding. */
188#if (!defined(IEM_WITH_3DNOW) && !defined(IEM_WITHOUT_3DNOW)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
189# define IEM_WITH_3DNOW
190#endif
191
192/** @def IEM_WITH_THREE_0F_38
193 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
194#if (!defined(IEM_WITH_THREE_0F_38) && !defined(IEM_WITHOUT_THREE_0F_38)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
195# define IEM_WITH_THREE_0F_38
196#endif
197
198/** @def IEM_WITH_THREE_0F_3A
199 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
200#if (!defined(IEM_WITH_THREE_0F_3A) && !defined(IEM_WITHOUT_THREE_0F_3A)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
201# define IEM_WITH_THREE_0F_3A
202#endif
203
204/** @def IEM_WITH_VEX
205 * Includes the VEX decoding. */
206#if (!defined(IEM_WITH_VEX) && !defined(IEM_WITHOUT_VEX)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
207# define IEM_WITH_VEX
208#endif
209
210/** @def IEM_CFG_TARGET_CPU
211 * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
212 *
213 * By default we allow this to be configured by the user via the
214 * CPUM/GuestCpuName config string, but this comes at a slight cost during
215 * decoding. So, for applications of this code where there is no need to
216 * be dynamic wrt target CPU, just modify this define.
217 */
218#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
219# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
220#endif
221
222//#define IEM_WITH_CODE_TLB // - work in progress
223//#define IEM_WITH_DATA_TLB // - work in progress
224
225
226/** @def IEM_USE_UNALIGNED_DATA_ACCESS
227 * Use unaligned accesses instead of elaborate byte assembly. */
228#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
229# define IEM_USE_UNALIGNED_DATA_ACCESS
230#endif
231
232//#define IEM_LOG_MEMORY_WRITES
233
234#if !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
235/** Instruction statistics. */
236typedef struct IEMINSTRSTATS
237{
238# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
239# include "IEMInstructionStatisticsTmpl.h"
240# undef IEM_DO_INSTR_STAT
241} IEMINSTRSTATS;
242#else
243struct IEMINSTRSTATS;
244typedef struct IEMINSTRSTATS IEMINSTRSTATS;
245#endif
246/** Pointer to IEM instruction statistics. */
247typedef IEMINSTRSTATS *PIEMINSTRSTATS;
248
249
250/** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour
251 * @{ */
252#define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 0 /**< Native x86 EFLAGS result; Intel EFLAGS when on non-x86 hosts. */
253#define IEMTARGETCPU_EFL_BEHAVIOR_INTEL 1 /**< Intel EFLAGS result. */
254#define IEMTARGETCPU_EFL_BEHAVIOR_AMD 2 /**< AMD EFLAGS result */
255#define IEMTARGETCPU_EFL_BEHAVIOR_RESERVED 3 /**< Reserved/dummy entry slot that's the same as 0. */
256#define IEMTARGETCPU_EFL_BEHAVIOR_MASK 3 /**< For masking the index before use. */
257/** Selects the right variant from a_aArray.
258 * pVCpu is implicit in the caller context. */
259#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \
260 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
261/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can
262 * be used because the host CPU does not support the operation. */
263#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \
264 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
265/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional
266 * array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index
267 * into the two.
268 * @sa IEM_SELECT_NATIVE_OR_FALLBACK */
269#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
270# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
271 (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
272#else
273# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
274 (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
275#endif
276/** @} */
277
278/**
279 * Picks @a a_pfnNative or @a a_pfnFallback according to the host CPU feature
280 * indicator given by @a a_fCpumFeatureMember (CPUMFEATURES member).
281 *
282 * On non-x86 hosts, this will shortcut to the fallback w/o checking the
283 * indicator.
284 *
285 * @sa IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX
286 */
287#if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
288# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) \
289 (g_CpumHostFeatures.s.a_fCpumFeatureMember ? a_pfnNative : a_pfnFallback)
290#else
291# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) (a_pfnFallback)
292#endif
293
294
295/**
296 * Extended operand mode that includes a representation of 8-bit.
297 *
298 * This is used for packing down modes when invoking some C instruction
299 * implementations.
300 */
301typedef enum IEMMODEX
302{
303 IEMMODEX_16BIT = IEMMODE_16BIT,
304 IEMMODEX_32BIT = IEMMODE_32BIT,
305 IEMMODEX_64BIT = IEMMODE_64BIT,
306 IEMMODEX_8BIT
307} IEMMODEX;
308AssertCompileSize(IEMMODEX, 4);
309
310
311/**
312 * Branch types.
313 */
314typedef enum IEMBRANCH
315{
316 IEMBRANCH_JUMP = 1,
317 IEMBRANCH_CALL,
318 IEMBRANCH_TRAP,
319 IEMBRANCH_SOFTWARE_INT,
320 IEMBRANCH_HARDWARE_INT
321} IEMBRANCH;
322AssertCompileSize(IEMBRANCH, 4);
323
324
325/**
326 * INT instruction types.
327 */
328typedef enum IEMINT
329{
330 /** INT n instruction (opcode 0xcd imm). */
331 IEMINT_INTN = 0,
332 /** Single byte INT3 instruction (opcode 0xcc). */
333 IEMINT_INT3 = IEM_XCPT_FLAGS_BP_INSTR,
334 /** Single byte INTO instruction (opcode 0xce). */
335 IEMINT_INTO = IEM_XCPT_FLAGS_OF_INSTR,
336 /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
337 IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
338} IEMINT;
339AssertCompileSize(IEMINT, 4);
340
341
342/**
343 * A FPU result.
344 */
345typedef struct IEMFPURESULT
346{
347 /** The output value. */
348 RTFLOAT80U r80Result;
349 /** The output status. */
350 uint16_t FSW;
351} IEMFPURESULT;
352AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
353/** Pointer to a FPU result. */
354typedef IEMFPURESULT *PIEMFPURESULT;
355/** Pointer to a const FPU result. */
356typedef IEMFPURESULT const *PCIEMFPURESULT;
357
358
359/**
360 * A FPU result consisting of two output values and FSW.
361 */
362typedef struct IEMFPURESULTTWO
363{
364 /** The first output value. */
365 RTFLOAT80U r80Result1;
366 /** The output status. */
367 uint16_t FSW;
368 /** The second output value. */
369 RTFLOAT80U r80Result2;
370} IEMFPURESULTTWO;
371AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
372AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
373/** Pointer to a FPU result consisting of two output values and FSW. */
374typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
375/** Pointer to a const FPU result consisting of two output values and FSW. */
376typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
377
378
379/**
380 * IEM TLB entry.
381 *
382 * Lookup assembly:
383 * @code{.asm}
384 ; Calculate tag.
385 mov rax, [VA]
386 shl rax, 16
387 shr rax, 16 + X86_PAGE_SHIFT
388 or rax, [uTlbRevision]
389
390 ; Do indexing.
391 movzx ecx, al
392 lea rcx, [pTlbEntries + rcx]
393
394 ; Check tag.
395 cmp [rcx + IEMTLBENTRY.uTag], rax
396 jne .TlbMiss
397
398 ; Check access.
399 mov rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
400 and rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
401 cmp rax, [uTlbPhysRev]
402 jne .TlbMiss
403
404 ; Calc address and we're done.
405 mov eax, X86_PAGE_OFFSET_MASK
406 and eax, [VA]
407 or rax, [rcx + IEMTLBENTRY.pMappingR3]
408 %ifdef VBOX_WITH_STATISTICS
409 inc qword [cTlbHits]
410 %endif
411 jmp .Done
412
413 .TlbMiss:
414 mov r8d, ACCESS_FLAGS
415 mov rdx, [VA]
416 mov rcx, [pVCpu]
417 call iemTlbTypeMiss
418 .Done:
419
420 @endcode
421 *
422 */
423typedef struct IEMTLBENTRY
424{
425 /** The TLB entry tag.
426 * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
427 * is ASSUMING a virtual address width of 48 bits.
428 *
429 * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
430 *
431 * The TLB lookup code uses the current TLB revision, which won't ever be zero,
432 * enabling an extremely cheap TLB invalidation most of the time. When the TLB
433 * revision wraps around though, the tags needs to be zeroed.
434 *
435 * @note Try use SHRD instruction? After seeing
436 * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
437 *
438 * @todo This will need to be reorganized for 57-bit wide virtual address and
439 * PCID (currently 12 bits) and ASID (currently 6 bits) support. We'll
440 * have to move the TLB entry versioning entirely to the
441 * fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
442 * 19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
443 * consumed by PCID and ASID (12 + 6 = 18).
444 */
445 uint64_t uTag;
446 /** Access flags and physical TLB revision.
447 *
448 * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
449 * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
450 * - Bit 2 - page tables - not user (complemented X86_PTE_US).
451 * - Bit 3 - pgm phys/virt - not directly writable.
452 * - Bit 4 - pgm phys page - not directly readable.
453 * - Bit 5 - page tables - not accessed (complemented X86_PTE_A).
454 * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
455 * - Bit 7 - tlb entry - pMappingR3 member not valid.
456 * - Bits 63 thru 8 are used for the physical TLB revision number.
457 *
458 * We're using complemented bit meanings here because it makes it easy to check
459 * whether special action is required. For instance a user mode write access
460 * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
461 * non-zero result would mean special handling needed because either it wasn't
462 * writable, or it wasn't user, or the page wasn't dirty. A user mode read
463 * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
464 * need to check any PTE flag.
465 */
466 uint64_t fFlagsAndPhysRev;
467 /** The guest physical page address. */
468 uint64_t GCPhys;
469 /** Pointer to the ring-3 mapping. */
470 R3PTRTYPE(uint8_t *) pbMappingR3;
471#if HC_ARCH_BITS == 32
472 uint32_t u32Padding1;
473#endif
474} IEMTLBENTRY;
475AssertCompileSize(IEMTLBENTRY, 32);
476/** Pointer to an IEM TLB entry. */
477typedef IEMTLBENTRY *PIEMTLBENTRY;
478
479/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
480 * @{ */
481#define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
482#define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
483#define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
484#define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
485#define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
486#define IEMTLBE_F_PT_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */
487#define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
488#define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(7) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
489#define IEMTLBE_F_PG_UNASSIGNED RT_BIT_64(8) /**< Phys page: Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
490#define IEMTLBE_F_PG_CODE_PAGE RT_BIT_64(9) /**< Phys page: Code page. */
491#define IEMTLBE_F_PHYS_REV UINT64_C(0xfffffffffffffc00) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
492/** @} */
493
494
495/**
496 * An IEM TLB.
497 *
498 * We've got two of these, one for data and one for instructions.
499 */
500typedef struct IEMTLB
501{
502 /** The TLB entries.
503 * We've choosen 256 because that way we can obtain the result directly from a
504 * 8-bit register without an additional AND instruction. */
505 IEMTLBENTRY aEntries[256];
506 /** The TLB revision.
507 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
508 * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
509 * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
510 * (The revision zero indicates an invalid TLB entry.)
511 *
512 * The initial value is choosen to cause an early wraparound. */
513 uint64_t uTlbRevision;
514 /** The TLB physical address revision - shadow of PGM variable.
515 *
516 * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
517 * incremented by adding RT_BIT_64(8). When it wraps around and becomes zero,
518 * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
519 * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
520 *
521 * The initial value is choosen to cause an early wraparound. */
522 uint64_t volatile uTlbPhysRev;
523
524 /* Statistics: */
525
526 /** TLB hits (VBOX_WITH_STATISTICS only). */
527 uint64_t cTlbHits;
528 /** TLB misses. */
529 uint32_t cTlbMisses;
530 /** Slow read path. */
531 uint32_t cTlbSlowReadPath;
532 /** Safe read path. */
533 uint32_t cTlbSafeReadPath;
534 /** Safe write path. */
535 uint32_t cTlbSafeWritePath;
536#if 0
537 /** TLB misses because of tag mismatch. */
538 uint32_t cTlbMissesTag;
539 /** TLB misses because of virtual access violation. */
540 uint32_t cTlbMissesVirtAccess;
541 /** TLB misses because of dirty bit. */
542 uint32_t cTlbMissesDirty;
543 /** TLB misses because of MMIO */
544 uint32_t cTlbMissesMmio;
545 /** TLB misses because of write access handlers. */
546 uint32_t cTlbMissesWriteHandler;
547 /** TLB misses because no r3(/r0) mapping. */
548 uint32_t cTlbMissesMapping;
549#endif
550 /** Alignment padding. */
551 uint32_t au32Padding[6];
552} IEMTLB;
553AssertCompileSizeAlignment(IEMTLB, 64);
554/** IEMTLB::uTlbRevision increment. */
555#define IEMTLB_REVISION_INCR RT_BIT_64(36)
556/** IEMTLB::uTlbRevision mask. */
557#define IEMTLB_REVISION_MASK (~(RT_BIT_64(36) - 1))
558/** IEMTLB::uTlbPhysRev increment.
559 * @sa IEMTLBE_F_PHYS_REV */
560#define IEMTLB_PHYS_REV_INCR RT_BIT_64(10)
561/**
562 * Calculates the TLB tag for a virtual address.
563 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
564 * @param a_pTlb The TLB.
565 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
566 * the clearing of the top 16 bits won't work (if 32-bit
567 * we'll end up with mostly zeros).
568 */
569#define IEMTLB_CALC_TAG(a_pTlb, a_GCPtr) ( IEMTLB_CALC_TAG_NO_REV(a_GCPtr) | (a_pTlb)->uTlbRevision )
570/**
571 * Calculates the TLB tag for a virtual address but without TLB revision.
572 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
573 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
574 * the clearing of the top 16 bits won't work (if 32-bit
575 * we'll end up with mostly zeros).
576 */
577#define IEMTLB_CALC_TAG_NO_REV(a_GCPtr) ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
578/**
579 * Converts a TLB tag value into a TLB index.
580 * @returns Index into IEMTLB::aEntries.
581 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
582 */
583#define IEMTLB_TAG_TO_INDEX(a_uTag) ( (uint8_t)(a_uTag) )
584/**
585 * Converts a TLB tag value into a TLB index.
586 * @returns Index into IEMTLB::aEntries.
587 * @param a_pTlb The TLB.
588 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
589 */
590#define IEMTLB_TAG_TO_ENTRY(a_pTlb, a_uTag) ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_INDEX(a_uTag)] )
591
592
593/** @name IEM_MC_F_XXX - MC block flags/clues.
594 * @todo Merge with IEM_CIMPL_F_XXX
595 * @{ */
596#define IEM_MC_F_ONLY_8086 RT_BIT_32(0)
597#define IEM_MC_F_MIN_186 RT_BIT_32(1)
598#define IEM_MC_F_MIN_286 RT_BIT_32(2)
599#define IEM_MC_F_NOT_286_OR_OLDER IEM_MC_F_MIN_386
600#define IEM_MC_F_MIN_386 RT_BIT_32(3)
601#define IEM_MC_F_MIN_486 RT_BIT_32(4)
602#define IEM_MC_F_MIN_PENTIUM RT_BIT_32(5)
603#define IEM_MC_F_MIN_PENTIUM_II IEM_MC_F_MIN_PENTIUM
604#define IEM_MC_F_MIN_CORE IEM_MC_F_MIN_PENTIUM
605#define IEM_MC_F_64BIT RT_BIT_32(6)
606#define IEM_MC_F_NOT_64BIT RT_BIT_32(7)
607/** This is set by IEMAllN8vePython.py to indicate a variation without the
608 * flags-clearing-and-checking, when there is also a variation with that.
609 * @note Do not use this manully, it's only for python and for testing in
610 * the native recompiler! */
611#define IEM_MC_F_WITHOUT_FLAGS RT_BIT_32(8)
612/** @} */
613
614/** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls.
615 *
616 * These clues are mainly for the recompiler, so that it can emit correct code.
617 *
618 * They are processed by the python script and which also automatically
619 * calculates flags for MC blocks based on the statements, extending the use of
620 * these flags to describe MC block behavior to the recompiler core. The python
621 * script pass the flags to the IEM_MC2_END_EMIT_CALLS macro, but mainly for
622 * error checking purposes. The script emits the necessary fEndTb = true and
623 * similar statements as this reduces compile time a tiny bit.
624 *
625 * @{ */
626/** Flag set if direct branch, clear if absolute or indirect. */
627#define IEM_CIMPL_F_BRANCH_DIRECT RT_BIT_32(0)
628/** Flag set if indirect branch, clear if direct or relative.
629 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
630 * as well as for return instructions (RET, IRET, RETF). */
631#define IEM_CIMPL_F_BRANCH_INDIRECT RT_BIT_32(1)
632/** Flag set if relative branch, clear if absolute or indirect. */
633#define IEM_CIMPL_F_BRANCH_RELATIVE RT_BIT_32(2)
634/** Flag set if conditional branch, clear if unconditional. */
635#define IEM_CIMPL_F_BRANCH_CONDITIONAL RT_BIT_32(3)
636/** Flag set if it's a far branch (changes CS). */
637#define IEM_CIMPL_F_BRANCH_FAR RT_BIT_32(4)
638/** Convenience: Testing any kind of branch. */
639#define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
640
641/** Execution flags may change (IEMCPU::fExec). */
642#define IEM_CIMPL_F_MODE RT_BIT_32(5)
643/** May change significant portions of RFLAGS. */
644#define IEM_CIMPL_F_RFLAGS RT_BIT_32(6)
645/** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS. */
646#define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(7)
647/** May trigger interrupt shadowing. */
648#define IEM_CIMPL_F_INHIBIT_SHADOW RT_BIT_32(8)
649/** May enable interrupts, so recheck IRQ immediately afterwards executing
650 * the instruction. */
651#define IEM_CIMPL_F_CHECK_IRQ_AFTER RT_BIT_32(9)
652/** May disable interrupts, so recheck IRQ immediately before executing the
653 * instruction. */
654#define IEM_CIMPL_F_CHECK_IRQ_BEFORE RT_BIT_32(10)
655/** Convenience: Check for IRQ both before and after an instruction. */
656#define IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER (IEM_CIMPL_F_CHECK_IRQ_BEFORE | IEM_CIMPL_F_CHECK_IRQ_AFTER)
657/** May trigger a VM exit (treated like IEM_CIMPL_F_MODE atm). */
658#define IEM_CIMPL_F_VMEXIT RT_BIT_32(11)
659/** May modify FPU state.
660 * @todo Not sure if this is useful yet. */
661#define IEM_CIMPL_F_FPU RT_BIT_32(12)
662/** REP prefixed instruction which may yield before updating PC.
663 * @todo Not sure if this is useful, REP functions now return non-zero
664 * status if they don't update the PC. */
665#define IEM_CIMPL_F_REP RT_BIT_32(13)
666/** I/O instruction.
667 * @todo Not sure if this is useful yet. */
668#define IEM_CIMPL_F_IO RT_BIT_32(14)
669/** Force end of TB after the instruction. */
670#define IEM_CIMPL_F_END_TB RT_BIT_32(15)
671/** Flag set if a branch may also modify the stack (push/pop return address). */
672#define IEM_CIMPL_F_BRANCH_STACK RT_BIT_32(16)
673/** Flag set if a branch may also modify the stack (push/pop return address)
674 * and switch it (load/restore SS:RSP). */
675#define IEM_CIMPL_F_BRANCH_STACK_FAR RT_BIT_32(17)
676/** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
677#define IEM_CIMPL_F_XCPT \
678 (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_STACK_FAR \
679 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
680
681/** The block calls a C-implementation instruction function with two implicit arguments.
682 * Mutually exclusive with IEM_CIMPL_F_CALLS_AIMPL and
683 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
684 * @note The python scripts will add this is missing. */
685#define IEM_CIMPL_F_CALLS_CIMPL RT_BIT_32(18)
686/** The block calls an ASM-implementation instruction function.
687 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and
688 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
689 * @note The python scripts will add this is missing. */
690#define IEM_CIMPL_F_CALLS_AIMPL RT_BIT_32(19)
691/** The block calls an ASM-implementation instruction function with an implicit
692 * X86FXSTATE pointer argument.
693 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and IEM_CIMPL_F_CALLS_AIMPL.
694 * @note The python scripts will add this is missing. */
695#define IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE RT_BIT_32(20)
696/** @} */
697
698
699/** @name IEM_F_XXX - Execution mode flags (IEMCPU::fExec, IEMTB::fFlags).
700 *
701 * These flags are set when entering IEM and adjusted as code is executed, such
702 * that they will always contain the current values as instructions are
703 * finished.
704 *
705 * In recompiled execution mode, (most of) these flags are included in the
706 * translation block selection key and stored in IEMTB::fFlags alongside the
707 * IEMTB_F_XXX flags. The latter flags uses bits 31 thru 24, which are all zero
708 * in IEMCPU::fExec.
709 *
710 * @{ */
711/** Mode: The block target mode mask. */
712#define IEM_F_MODE_MASK UINT32_C(0x0000001f)
713/** Mode: The IEMMODE part of the IEMTB_F_MODE_MASK value. */
714#define IEM_F_MODE_CPUMODE_MASK UINT32_C(0x00000003)
715/** X86 Mode: Bit used to indicating pre-386 CPU in 16-bit mode (for eliminating
716 * conditional in EIP/IP updating), and flat wide open CS, SS, DS, and ES in
717 * 32-bit mode (for simplifying most memory accesses). */
718#define IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK UINT32_C(0x00000004)
719/** X86 Mode: Bit indicating protected mode, real mode (or SMM) when not set. */
720#define IEM_F_MODE_X86_PROT_MASK UINT32_C(0x00000008)
721/** X86 Mode: Bit used to indicate virtual 8086 mode (only 16-bit). */
722#define IEM_F_MODE_X86_V86_MASK UINT32_C(0x00000010)
723
724/** X86 Mode: 16-bit on 386 or later. */
725#define IEM_F_MODE_X86_16BIT UINT32_C(0x00000000)
726/** X86 Mode: 80286, 80186 and 8086/88 targetting blocks (EIP update opt). */
727#define IEM_F_MODE_X86_16BIT_PRE_386 UINT32_C(0x00000004)
728/** X86 Mode: 16-bit protected mode on 386 or later. */
729#define IEM_F_MODE_X86_16BIT_PROT UINT32_C(0x00000008)
730/** X86 Mode: 16-bit protected mode on 386 or later. */
731#define IEM_F_MODE_X86_16BIT_PROT_PRE_386 UINT32_C(0x0000000c)
732/** X86 Mode: 16-bit virtual 8086 protected mode (on 386 or later). */
733#define IEM_F_MODE_X86_16BIT_PROT_V86 UINT32_C(0x00000018)
734
735/** X86 Mode: 32-bit on 386 or later. */
736#define IEM_F_MODE_X86_32BIT UINT32_C(0x00000001)
737/** X86 Mode: 32-bit mode with wide open flat CS, SS, DS and ES. */
738#define IEM_F_MODE_X86_32BIT_FLAT UINT32_C(0x00000005)
739/** X86 Mode: 32-bit protected mode. */
740#define IEM_F_MODE_X86_32BIT_PROT UINT32_C(0x00000009)
741/** X86 Mode: 32-bit protected mode with wide open flat CS, SS, DS and ES. */
742#define IEM_F_MODE_X86_32BIT_PROT_FLAT UINT32_C(0x0000000d)
743
744/** X86 Mode: 64-bit (includes protected, but not the flat bit). */
745#define IEM_F_MODE_X86_64BIT UINT32_C(0x0000000a)
746
747/** X86 Mode: Checks if @a a_fExec represent a FLAT mode. */
748#define IEM_F_MODE_X86_IS_FLAT(a_fExec) ( ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT \
749 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT \
750 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT)
751
752/** Bypass access handlers when set. */
753#define IEM_F_BYPASS_HANDLERS UINT32_C(0x00010000)
754/** Have pending hardware instruction breakpoints. */
755#define IEM_F_PENDING_BRK_INSTR UINT32_C(0x00020000)
756/** Have pending hardware data breakpoints. */
757#define IEM_F_PENDING_BRK_DATA UINT32_C(0x00040000)
758
759/** X86: Have pending hardware I/O breakpoints. */
760#define IEM_F_PENDING_BRK_X86_IO UINT32_C(0x00000400)
761/** X86: Disregard the lock prefix (implied or not) when set. */
762#define IEM_F_X86_DISREGARD_LOCK UINT32_C(0x00000800)
763
764/** Pending breakpoint mask (what iemCalcExecDbgFlags works out). */
765#define IEM_F_PENDING_BRK_MASK (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA | IEM_F_PENDING_BRK_X86_IO)
766
767/** Caller configurable options. */
768#define IEM_F_USER_OPTS (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK)
769
770/** X86: The current protection level (CPL) shift factor. */
771#define IEM_F_X86_CPL_SHIFT 8
772/** X86: The current protection level (CPL) mask. */
773#define IEM_F_X86_CPL_MASK UINT32_C(0x00000300)
774/** X86: The current protection level (CPL) shifted mask. */
775#define IEM_F_X86_CPL_SMASK UINT32_C(0x00000003)
776
777/** X86 execution context.
778 * The IEM_F_X86_CTX_XXX values are individual flags that can be combined (with
779 * the exception of IEM_F_X86_CTX_NORMAL). This allows running VMs from SMM
780 * mode. */
781#define IEM_F_X86_CTX_MASK UINT32_C(0x0000f000)
782/** X86 context: Plain regular execution context. */
783#define IEM_F_X86_CTX_NORMAL UINT32_C(0x00000000)
784/** X86 context: VT-x enabled. */
785#define IEM_F_X86_CTX_VMX UINT32_C(0x00001000)
786/** X86 context: AMD-V enabled. */
787#define IEM_F_X86_CTX_SVM UINT32_C(0x00002000)
788/** X86 context: In AMD-V or VT-x guest mode. */
789#define IEM_F_X86_CTX_IN_GUEST UINT32_C(0x00004000)
790/** X86 context: System management mode (SMM). */
791#define IEM_F_X86_CTX_SMM UINT32_C(0x00008000)
792
793/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
794 * iemRegFinishClearingRF() most for most situations (CPUMCTX_DBG_HIT_DRX_MASK
795 * and CPUMCTX_DBG_DBGF_MASK are covered by the IEM_F_PENDING_BRK_XXX bits
796 * alread). */
797
798/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
799 * iemRegFinishClearingRF() most for most situations
800 * (CPUMCTX_DBG_HIT_DRX_MASK and CPUMCTX_DBG_DBGF_MASK are covered by
801 * the IEM_F_PENDING_BRK_XXX bits alread). */
802
803/** @} */
804
805
806/** @name IEMTB_F_XXX - Translation block flags (IEMTB::fFlags).
807 *
808 * Extends the IEM_F_XXX flags (subject to IEMTB_F_IEM_F_MASK) to make up the
809 * translation block flags. The combined flag mask (subject to
810 * IEMTB_F_KEY_MASK) is used as part of the lookup key for translation blocks.
811 *
812 * @{ */
813/** Mask of IEM_F_XXX flags included in IEMTB_F_XXX. */
814#define IEMTB_F_IEM_F_MASK UINT32_C(0x00ffffff)
815
816/** Type: The block type mask. */
817#define IEMTB_F_TYPE_MASK UINT32_C(0x03000000)
818/** Type: Purly threaded recompiler (via tables). */
819#define IEMTB_F_TYPE_THREADED UINT32_C(0x01000000)
820/** Type: Native recompilation. */
821#define IEMTB_F_TYPE_NATIVE UINT32_C(0x02000000)
822
823/** Set when we're starting the block in an "interrupt shadow".
824 * We don't need to distingish between the two types of this mask, thus the one.
825 * @see CPUMCTX_INHIBIT_SHADOW, CPUMIsInInterruptShadow() */
826#define IEMTB_F_INHIBIT_SHADOW UINT32_C(0x04000000)
827/** Set when we're currently inhibiting NMIs
828 * @see CPUMCTX_INHIBIT_NMI, CPUMAreInterruptsInhibitedByNmi() */
829#define IEMTB_F_INHIBIT_NMI UINT32_C(0x08000000)
830
831/** Checks that EIP/IP is wihin CS.LIM before each instruction. Used when
832 * we're close the limit before starting a TB, as determined by
833 * iemGetTbFlagsForCurrentPc(). */
834#define IEMTB_F_CS_LIM_CHECKS UINT32_C(0x10000000)
835
836/** Mask of the IEMTB_F_XXX flags that are part of the TB lookup key.
837 *
838 * @note We skip all of IEM_F_X86_CTX_MASK, with the exception of SMM (which we
839 * don't implement), because we don't currently generate any context
840 * specific code - that's all handled in CIMPL functions.
841 *
842 * For the threaded recompiler we don't generate any CPL specific code
843 * either, but the native recompiler does for memory access (saves getting
844 * the CPL from fExec and turning it into IEMTLBE_F_PT_NO_USER).
845 * Since most OSes will not share code between rings, this shouldn't
846 * have any real effect on TB/memory/recompiling load.
847 */
848#define IEMTB_F_KEY_MASK ((UINT32_MAX & ~(IEM_F_X86_CTX_MASK | IEMTB_F_TYPE_MASK)) | IEM_F_X86_CTX_SMM)
849/** @} */
850
851AssertCompile( (IEM_F_MODE_X86_16BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
852AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
853AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_PROT_MASK));
854AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_V86_MASK));
855AssertCompile( (IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
856AssertCompile( IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
857AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_PROT_MASK));
858AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
859AssertCompile( (IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
860AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
861AssertCompile( IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
862AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_V86_MASK));
863AssertCompile( (IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
864AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
865AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_PROT_MASK);
866AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
867AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_PROT_MASK);
868AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
869AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_V86_MASK);
870
871AssertCompile( (IEM_F_MODE_X86_32BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
872AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
873AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_PROT_MASK));
874AssertCompile( (IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
875AssertCompile( IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
876AssertCompile(!(IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_PROT_MASK));
877AssertCompile( (IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
878AssertCompile(!(IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
879AssertCompile( IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
880AssertCompile( (IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
881AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
882AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_PROT_MASK);
883
884AssertCompile( (IEM_F_MODE_X86_64BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT);
885AssertCompile( IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_PROT_MASK);
886AssertCompile(!(IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
887
888/** Native instruction type for use with the native code generator.
889 * This is a byte (uint8_t) for x86 and amd64 and uint32_t for the other(s). */
890#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
891typedef uint8_t IEMNATIVEINSTR;
892#else
893typedef uint32_t IEMNATIVEINSTR;
894#endif
895/** Pointer to a native instruction unit. */
896typedef IEMNATIVEINSTR *PIEMNATIVEINSTR;
897/** Pointer to a const native instruction unit. */
898typedef IEMNATIVEINSTR const *PCIEMNATIVEINSTR;
899
900/**
901 * A call for the threaded call table.
902 */
903typedef struct IEMTHRDEDCALLENTRY
904{
905 /** The function to call (IEMTHREADEDFUNCS). */
906 uint16_t enmFunction;
907 /** Instruction number in the TB (for statistics). */
908 uint8_t idxInstr;
909 uint8_t uUnused0;
910
911 /** Offset into IEMTB::pabOpcodes. */
912 uint16_t offOpcode;
913 /** The opcode length. */
914 uint8_t cbOpcode;
915 /** Index in to IEMTB::aRanges. */
916 uint8_t idxRange;
917
918 /** Generic parameters. */
919 uint64_t auParams[3];
920} IEMTHRDEDCALLENTRY;
921AssertCompileSize(IEMTHRDEDCALLENTRY, sizeof(uint64_t) * 4);
922/** Pointer to a threaded call entry. */
923typedef struct IEMTHRDEDCALLENTRY *PIEMTHRDEDCALLENTRY;
924/** Pointer to a const threaded call entry. */
925typedef IEMTHRDEDCALLENTRY const *PCIEMTHRDEDCALLENTRY;
926
927/**
928 * Native IEM TB 'function' typedef.
929 *
930 * This will throw/longjmp on occation.
931 *
932 * @note AMD64 doesn't have that many non-volatile registers and does sport
933 * 32-bit address displacments, so we don't need pCtx.
934 *
935 * On ARM64 pCtx allows us to directly address the whole register
936 * context without requiring a separate indexing register holding the
937 * offset. This saves an instruction loading the offset for each guest
938 * CPU context access, at the cost of a non-volatile register.
939 * Fortunately, ARM64 has quite a lot more registers.
940 */
941typedef
942#ifdef RT_ARCH_AMD64
943int FNIEMTBNATIVE(PVMCPUCC pVCpu)
944#else
945int FNIEMTBNATIVE(PVMCPUCC pVCpu, PCPUMCTX pCtx)
946#endif
947#if RT_CPLUSPLUS_PREREQ(201700)
948 IEM_NOEXCEPT_MAY_LONGJMP
949#endif
950 ;
951/** Pointer to a native IEM TB entry point function.
952 * This will throw/longjmp on occation. */
953typedef FNIEMTBNATIVE *PFNIEMTBNATIVE;
954
955
956/**
957 * Translation block debug info entry type.
958 */
959typedef enum IEMTBDBGENTRYTYPE
960{
961 kIemTbDbgEntryType_Invalid = 0,
962 /** The entry is for marking a native code position.
963 * Entries following this all apply to this position. */
964 kIemTbDbgEntryType_NativeOffset,
965 /** The entry is for a new guest instruction. */
966 kIemTbDbgEntryType_GuestInstruction,
967 /** Marks the start of a threaded call. */
968 kIemTbDbgEntryType_ThreadedCall,
969 /** Marks the location of a label. */
970 kIemTbDbgEntryType_Label,
971 /** Info about a host register shadowing a guest register. */
972 kIemTbDbgEntryType_GuestRegShadowing,
973#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
974 /** Info about a host SIMD register shadowing a guest SIMD register. */
975 kIemTbDbgEntryType_GuestSimdRegShadowing,
976#endif
977#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
978 /** Info about a delayed RIP update. */
979 kIemTbDbgEntryType_DelayedPcUpdate,
980#endif
981 kIemTbDbgEntryType_End
982} IEMTBDBGENTRYTYPE;
983
984/**
985 * Translation block debug info entry.
986 */
987typedef union IEMTBDBGENTRY
988{
989 /** Plain 32-bit view. */
990 uint32_t u;
991
992 /** Generic view for getting at the type field. */
993 struct
994 {
995 /** IEMTBDBGENTRYTYPE */
996 uint32_t uType : 4;
997 uint32_t uTypeSpecific : 28;
998 } Gen;
999
1000 struct
1001 {
1002 /** kIemTbDbgEntryType_ThreadedCall1. */
1003 uint32_t uType : 4;
1004 /** Native code offset. */
1005 uint32_t offNative : 28;
1006 } NativeOffset;
1007
1008 struct
1009 {
1010 /** kIemTbDbgEntryType_GuestInstruction. */
1011 uint32_t uType : 4;
1012 uint32_t uUnused : 4;
1013 /** The IEM_F_XXX flags. */
1014 uint32_t fExec : 24;
1015 } GuestInstruction;
1016
1017 struct
1018 {
1019 /* kIemTbDbgEntryType_ThreadedCall. */
1020 uint32_t uType : 4;
1021 /** Set if the call was recompiled to native code, clear if just calling
1022 * threaded function. */
1023 uint32_t fRecompiled : 1;
1024 uint32_t uUnused : 11;
1025 /** The threaded call number (IEMTHREADEDFUNCS). */
1026 uint32_t enmCall : 16;
1027 } ThreadedCall;
1028
1029 struct
1030 {
1031 /* kIemTbDbgEntryType_Label. */
1032 uint32_t uType : 4;
1033 uint32_t uUnused : 4;
1034 /** The label type (IEMNATIVELABELTYPE). */
1035 uint32_t enmLabel : 8;
1036 /** The label data. */
1037 uint32_t uData : 16;
1038 } Label;
1039
1040 struct
1041 {
1042 /* kIemTbDbgEntryType_GuestRegShadowing. */
1043 uint32_t uType : 4;
1044 uint32_t uUnused : 4;
1045 /** The guest register being shadowed (IEMNATIVEGSTREG). */
1046 uint32_t idxGstReg : 8;
1047 /** The host new register number, UINT8_MAX if dropped. */
1048 uint32_t idxHstReg : 8;
1049 /** The previous host register number, UINT8_MAX if new. */
1050 uint32_t idxHstRegPrev : 8;
1051 } GuestRegShadowing;
1052
1053#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
1054 struct
1055 {
1056 /* kIemTbDbgEntryType_GuestSimdRegShadowing. */
1057 uint32_t uType : 4;
1058 uint32_t uUnused : 4;
1059 /** The guest register being shadowed (IEMNATIVEGSTSIMDREG). */
1060 uint32_t idxGstSimdReg : 8;
1061 /** The host new register number, UINT8_MAX if dropped. */
1062 uint32_t idxHstSimdReg : 8;
1063 /** The previous host register number, UINT8_MAX if new. */
1064 uint32_t idxHstSimdRegPrev : 8;
1065 } GuestSimdRegShadowing;
1066#endif
1067
1068#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
1069 struct
1070 {
1071 /* kIemTbDbgEntryType_DelayedPcUpdate. */
1072 uint32_t uType : 4;
1073 /* The instruction offset added to the program counter. */
1074 uint32_t offPc : 14;
1075 /** Number of instructions skipped. */
1076 uint32_t cInstrSkipped : 14;
1077 } DelayedPcUpdate;
1078#endif
1079
1080} IEMTBDBGENTRY;
1081AssertCompileSize(IEMTBDBGENTRY, sizeof(uint32_t));
1082/** Pointer to a debug info entry. */
1083typedef IEMTBDBGENTRY *PIEMTBDBGENTRY;
1084/** Pointer to a const debug info entry. */
1085typedef IEMTBDBGENTRY const *PCIEMTBDBGENTRY;
1086
1087/**
1088 * Translation block debug info.
1089 */
1090typedef struct IEMTBDBG
1091{
1092 /** Number of entries in aEntries. */
1093 uint32_t cEntries;
1094 /** Debug info entries. */
1095 RT_FLEXIBLE_ARRAY_EXTENSION
1096 IEMTBDBGENTRY aEntries[RT_FLEXIBLE_ARRAY];
1097} IEMTBDBG;
1098/** Pointer to TB debug info. */
1099typedef IEMTBDBG *PIEMTBDBG;
1100/** Pointer to const TB debug info. */
1101typedef IEMTBDBG const *PCIEMTBDBG;
1102
1103
1104/**
1105 * Translation block.
1106 *
1107 * The current plan is to just keep TBs and associated lookup hash table private
1108 * to each VCpu as that simplifies TB removal greatly (no races) and generally
1109 * avoids using expensive atomic primitives for updating lists and stuff.
1110 */
1111#pragma pack(2) /* to prevent the Thrd structure from being padded unnecessarily */
1112typedef struct IEMTB
1113{
1114 /** Next block with the same hash table entry. */
1115 struct IEMTB *pNext;
1116 /** Usage counter. */
1117 uint32_t cUsed;
1118 /** The IEMCPU::msRecompilerPollNow last time it was used. */
1119 uint32_t msLastUsed;
1120
1121 /** @name What uniquely identifies the block.
1122 * @{ */
1123 RTGCPHYS GCPhysPc;
1124 /** IEMTB_F_XXX (i.e. IEM_F_XXX ++). */
1125 uint32_t fFlags;
1126 union
1127 {
1128 struct
1129 {
1130 /**< Relevant CS X86DESCATTR_XXX bits. */
1131 uint16_t fAttr;
1132 } x86;
1133 };
1134 /** @} */
1135
1136 /** Number of opcode ranges. */
1137 uint8_t cRanges;
1138 /** Statistics: Number of instructions in the block. */
1139 uint8_t cInstructions;
1140
1141 /** Type specific info. */
1142 union
1143 {
1144 struct
1145 {
1146 /** The call sequence table. */
1147 PIEMTHRDEDCALLENTRY paCalls;
1148 /** Number of calls in paCalls. */
1149 uint16_t cCalls;
1150 /** Number of calls allocated. */
1151 uint16_t cAllocated;
1152 } Thrd;
1153 struct
1154 {
1155 /** The native instructions (PFNIEMTBNATIVE). */
1156 PIEMNATIVEINSTR paInstructions;
1157 /** Number of instructions pointed to by paInstructions. */
1158 uint32_t cInstructions;
1159 } Native;
1160 /** Generic view for zeroing when freeing. */
1161 struct
1162 {
1163 uintptr_t uPtr;
1164 uint32_t uData;
1165 } Gen;
1166 };
1167
1168 /** The allocation chunk this TB belongs to. */
1169 uint8_t idxAllocChunk;
1170 uint8_t bUnused;
1171
1172 /** Number of bytes of opcodes stored in pabOpcodes.
1173 * @todo this field isn't really needed, aRanges keeps the actual info. */
1174 uint16_t cbOpcodes;
1175 /** Pointer to the opcode bytes this block was recompiled from. */
1176 uint8_t *pabOpcodes;
1177
1178 /** Debug info if enabled.
1179 * This is only generated by the native recompiler. */
1180 PIEMTBDBG pDbgInfo;
1181
1182 /* --- 64 byte cache line end --- */
1183
1184 /** Opcode ranges.
1185 *
1186 * The opcode checkers and maybe TLB loading functions will use this to figure
1187 * out what to do. The parameter will specify an entry and the opcode offset to
1188 * start at and the minimum number of bytes to verify (instruction length).
1189 *
1190 * When VT-x and AMD-V looks up the opcode bytes for an exitting instruction,
1191 * they'll first translate RIP (+ cbInstr - 1) to a physical address using the
1192 * code TLB (must have a valid entry for that address) and scan the ranges to
1193 * locate the corresponding opcodes. Probably.
1194 */
1195 struct IEMTBOPCODERANGE
1196 {
1197 /** Offset within pabOpcodes. */
1198 uint16_t offOpcodes;
1199 /** Number of bytes. */
1200 uint16_t cbOpcodes;
1201 /** The page offset. */
1202 RT_GCC_EXTENSION
1203 uint16_t offPhysPage : 12;
1204 /** Unused bits. */
1205 RT_GCC_EXTENSION
1206 uint16_t u2Unused : 2;
1207 /** Index into GCPhysPc + aGCPhysPages for the physical page address. */
1208 RT_GCC_EXTENSION
1209 uint16_t idxPhysPage : 2;
1210 } aRanges[8];
1211
1212 /** Physical pages that this TB covers.
1213 * The GCPhysPc w/o page offset is element zero, so starting here with 1. */
1214 RTGCPHYS aGCPhysPages[2];
1215} IEMTB;
1216#pragma pack()
1217AssertCompileMemberAlignment(IEMTB, GCPhysPc, sizeof(RTGCPHYS));
1218AssertCompileMemberAlignment(IEMTB, Thrd, sizeof(void *));
1219AssertCompileMemberAlignment(IEMTB, pabOpcodes, sizeof(void *));
1220AssertCompileMemberAlignment(IEMTB, pDbgInfo, sizeof(void *));
1221AssertCompileMemberAlignment(IEMTB, aGCPhysPages, sizeof(RTGCPHYS));
1222AssertCompileMemberOffset(IEMTB, aRanges, 64);
1223AssertCompileMemberSize(IEMTB, aRanges[0], 6);
1224#if 1
1225AssertCompileSize(IEMTB, 128);
1226# define IEMTB_SIZE_IS_POWER_OF_TWO /**< The IEMTB size is a power of two. */
1227#else
1228AssertCompileSize(IEMTB, 168);
1229# undef IEMTB_SIZE_IS_POWER_OF_TWO
1230#endif
1231
1232/** Pointer to a translation block. */
1233typedef IEMTB *PIEMTB;
1234/** Pointer to a const translation block. */
1235typedef IEMTB const *PCIEMTB;
1236
1237/**
1238 * A chunk of memory in the TB allocator.
1239 */
1240typedef struct IEMTBCHUNK
1241{
1242 /** Pointer to the translation blocks in this chunk. */
1243 PIEMTB paTbs;
1244#ifdef IN_RING0
1245 /** Allocation handle. */
1246 RTR0MEMOBJ hMemObj;
1247#endif
1248} IEMTBCHUNK;
1249
1250/**
1251 * A per-CPU translation block allocator.
1252 *
1253 * Because of how the IEMTBCACHE uses the lower 6 bits of the TB address to keep
1254 * the length of the collision list, and of course also for cache line alignment
1255 * reasons, the TBs must be allocated with at least 64-byte alignment.
1256 * Memory is there therefore allocated using one of the page aligned allocators.
1257 *
1258 *
1259 * To avoid wasting too much memory, it is allocated piecemeal as needed,
1260 * in chunks (IEMTBCHUNK) of 2 MiB or more. The TB has an 8-bit chunk index
1261 * that enables us to quickly calculate the allocation bitmap position when
1262 * freeing the translation block.
1263 */
1264typedef struct IEMTBALLOCATOR
1265{
1266 /** Magic value (IEMTBALLOCATOR_MAGIC). */
1267 uint32_t uMagic;
1268
1269#ifdef IEMTB_SIZE_IS_POWER_OF_TWO
1270 /** Mask corresponding to cTbsPerChunk - 1. */
1271 uint32_t fChunkMask;
1272 /** Shift count corresponding to cTbsPerChunk. */
1273 uint8_t cChunkShift;
1274#else
1275 uint32_t uUnused;
1276 uint8_t bUnused;
1277#endif
1278 /** Number of chunks we're allowed to allocate. */
1279 uint8_t cMaxChunks;
1280 /** Number of chunks currently populated. */
1281 uint16_t cAllocatedChunks;
1282 /** Number of translation blocks per chunk. */
1283 uint32_t cTbsPerChunk;
1284 /** Chunk size. */
1285 uint32_t cbPerChunk;
1286
1287 /** The maximum number of TBs. */
1288 uint32_t cMaxTbs;
1289 /** Total number of TBs in the populated chunks.
1290 * (cAllocatedChunks * cTbsPerChunk) */
1291 uint32_t cTotalTbs;
1292 /** The current number of TBs in use.
1293 * The number of free TBs: cAllocatedTbs - cInUseTbs; */
1294 uint32_t cInUseTbs;
1295 /** Statistics: Number of the cInUseTbs that are native ones. */
1296 uint32_t cNativeTbs;
1297 /** Statistics: Number of the cInUseTbs that are threaded ones. */
1298 uint32_t cThreadedTbs;
1299
1300 /** Where to start pruning TBs from when we're out.
1301 * See iemTbAllocatorAllocSlow for details. */
1302 uint32_t iPruneFrom;
1303 /** Hint about which bit to start scanning the bitmap from. */
1304 uint32_t iStartHint;
1305 /** Where to start pruning native TBs from when we're out of executable memory.
1306 * See iemTbAllocatorFreeupNativeSpace for details. */
1307 uint32_t iPruneNativeFrom;
1308 uint32_t uPadding;
1309
1310 /** Statistics: Number of TB allocation calls. */
1311 STAMCOUNTER StatAllocs;
1312 /** Statistics: Number of TB free calls. */
1313 STAMCOUNTER StatFrees;
1314 /** Statistics: Time spend pruning. */
1315 STAMPROFILE StatPrune;
1316 /** Statistics: Time spend pruning native TBs. */
1317 STAMPROFILE StatPruneNative;
1318
1319 /** The delayed free list (see iemTbAlloctorScheduleForFree). */
1320 PIEMTB pDelayedFreeHead;
1321
1322 /** Allocation chunks. */
1323 IEMTBCHUNK aChunks[256];
1324
1325 /** Allocation bitmap for all possible chunk chunks. */
1326 RT_FLEXIBLE_ARRAY_EXTENSION
1327 uint64_t bmAllocated[RT_FLEXIBLE_ARRAY];
1328} IEMTBALLOCATOR;
1329/** Pointer to a TB allocator. */
1330typedef struct IEMTBALLOCATOR *PIEMTBALLOCATOR;
1331
1332/** Magic value for the TB allocator (Emmet Harley Cohen). */
1333#define IEMTBALLOCATOR_MAGIC UINT32_C(0x19900525)
1334
1335
1336/**
1337 * A per-CPU translation block cache (hash table).
1338 *
1339 * The hash table is allocated once during IEM initialization and size double
1340 * the max TB count, rounded up to the nearest power of two (so we can use and
1341 * AND mask rather than a rest division when hashing).
1342 */
1343typedef struct IEMTBCACHE
1344{
1345 /** Magic value (IEMTBCACHE_MAGIC). */
1346 uint32_t uMagic;
1347 /** Size of the hash table. This is a power of two. */
1348 uint32_t cHash;
1349 /** The mask corresponding to cHash. */
1350 uint32_t uHashMask;
1351 uint32_t uPadding;
1352
1353 /** @name Statistics
1354 * @{ */
1355 /** Number of collisions ever. */
1356 STAMCOUNTER cCollisions;
1357
1358 /** Statistics: Number of TB lookup misses. */
1359 STAMCOUNTER cLookupMisses;
1360 /** Statistics: Number of TB lookup hits (debug only). */
1361 STAMCOUNTER cLookupHits;
1362 STAMCOUNTER auPadding2[3];
1363 /** Statistics: Collision list length pruning. */
1364 STAMPROFILE StatPrune;
1365 /** @} */
1366
1367 /** The hash table itself.
1368 * @note The lower 6 bits of the pointer is used for keeping the collision
1369 * list length, so we can take action when it grows too long.
1370 * This works because TBs are allocated using a 64 byte (or
1371 * higher) alignment from page aligned chunks of memory, so the lower
1372 * 6 bits of the address will always be zero.
1373 * See IEMTBCACHE_PTR_COUNT_MASK, IEMTBCACHE_PTR_MAKE and friends.
1374 */
1375 RT_FLEXIBLE_ARRAY_EXTENSION
1376 PIEMTB apHash[RT_FLEXIBLE_ARRAY];
1377} IEMTBCACHE;
1378/** Pointer to a per-CPU translation block cahce. */
1379typedef IEMTBCACHE *PIEMTBCACHE;
1380
1381/** Magic value for IEMTBCACHE (Johnny O'Neal). */
1382#define IEMTBCACHE_MAGIC UINT32_C(0x19561010)
1383
1384/** The collision count mask for IEMTBCACHE::apHash entries. */
1385#define IEMTBCACHE_PTR_COUNT_MASK ((uintptr_t)0x3f)
1386/** The max collision count for IEMTBCACHE::apHash entries before pruning. */
1387#define IEMTBCACHE_PTR_MAX_COUNT ((uintptr_t)0x30)
1388/** Combine a TB pointer and a collision list length into a value for an
1389 * IEMTBCACHE::apHash entry. */
1390#define IEMTBCACHE_PTR_MAKE(a_pTb, a_cCount) (PIEMTB)((uintptr_t)(a_pTb) | (a_cCount))
1391/** Combine a TB pointer and a collision list length into a value for an
1392 * IEMTBCACHE::apHash entry. */
1393#define IEMTBCACHE_PTR_GET_TB(a_pHashEntry) (PIEMTB)((uintptr_t)(a_pHashEntry) & ~IEMTBCACHE_PTR_COUNT_MASK)
1394/** Combine a TB pointer and a collision list length into a value for an
1395 * IEMTBCACHE::apHash entry. */
1396#define IEMTBCACHE_PTR_GET_COUNT(a_pHashEntry) ((uintptr_t)(a_pHashEntry) & IEMTBCACHE_PTR_COUNT_MASK)
1397
1398/**
1399 * Calculates the hash table slot for a TB from physical PC address and TB flags.
1400 */
1401#define IEMTBCACHE_HASH(a_paCache, a_fTbFlags, a_GCPhysPc) \
1402 IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, (a_fTbFlags) & IEMTB_F_KEY_MASK, a_GCPhysPc)
1403
1404/**
1405 * Calculates the hash table slot for a TB from physical PC address and TB
1406 * flags, ASSUMING the caller has applied IEMTB_F_KEY_MASK to @a a_fTbFlags.
1407 */
1408#define IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, a_fTbFlags, a_GCPhysPc) \
1409 (((uint32_t)(a_GCPhysPc) ^ (a_fTbFlags)) & (a_paCache)->uHashMask)
1410
1411
1412/** @name IEMBRANCHED_F_XXX - Branched indicator (IEMCPU::fTbBranched).
1413 *
1414 * These flags parallels the main IEM_CIMPL_F_BRANCH_XXX flags.
1415 *
1416 * @{ */
1417/** Value if no branching happened recently. */
1418#define IEMBRANCHED_F_NO UINT8_C(0x00)
1419/** Flag set if direct branch, clear if absolute or indirect. */
1420#define IEMBRANCHED_F_DIRECT UINT8_C(0x01)
1421/** Flag set if indirect branch, clear if direct or relative. */
1422#define IEMBRANCHED_F_INDIRECT UINT8_C(0x02)
1423/** Flag set if relative branch, clear if absolute or indirect. */
1424#define IEMBRANCHED_F_RELATIVE UINT8_C(0x04)
1425/** Flag set if conditional branch, clear if unconditional. */
1426#define IEMBRANCHED_F_CONDITIONAL UINT8_C(0x08)
1427/** Flag set if it's a far branch. */
1428#define IEMBRANCHED_F_FAR UINT8_C(0x10)
1429/** Flag set if the stack pointer is modified. */
1430#define IEMBRANCHED_F_STACK UINT8_C(0x20)
1431/** Flag set if the stack pointer and (maybe) the stack segment are modified. */
1432#define IEMBRANCHED_F_STACK_FAR UINT8_C(0x40)
1433/** Flag set (by IEM_MC_REL_JMP_XXX) if it's a zero bytes relative jump. */
1434#define IEMBRANCHED_F_ZERO UINT8_C(0x80)
1435/** @} */
1436
1437
1438/**
1439 * The per-CPU IEM state.
1440 */
1441typedef struct IEMCPU
1442{
1443 /** Info status code that needs to be propagated to the IEM caller.
1444 * This cannot be passed internally, as it would complicate all success
1445 * checks within the interpreter making the code larger and almost impossible
1446 * to get right. Instead, we'll store status codes to pass on here. Each
1447 * source of these codes will perform appropriate sanity checks. */
1448 int32_t rcPassUp; /* 0x00 */
1449 /** Execution flag, IEM_F_XXX. */
1450 uint32_t fExec; /* 0x04 */
1451
1452 /** @name Decoder state.
1453 * @{ */
1454#ifdef IEM_WITH_CODE_TLB
1455 /** The offset of the next instruction byte. */
1456 uint32_t offInstrNextByte; /* 0x08 */
1457 /** The number of bytes available at pbInstrBuf for the current instruction.
1458 * This takes the max opcode length into account so that doesn't need to be
1459 * checked separately. */
1460 uint32_t cbInstrBuf; /* 0x0c */
1461 /** Pointer to the page containing RIP, user specified buffer or abOpcode.
1462 * This can be NULL if the page isn't mappable for some reason, in which
1463 * case we'll do fallback stuff.
1464 *
1465 * If we're executing an instruction from a user specified buffer,
1466 * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page
1467 * aligned pointer but pointer to the user data.
1468 *
1469 * For instructions crossing pages, this will start on the first page and be
1470 * advanced to the next page by the time we've decoded the instruction. This
1471 * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt>
1472 */
1473 uint8_t const *pbInstrBuf; /* 0x10 */
1474# if ARCH_BITS == 32
1475 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */
1476# endif
1477 /** The program counter corresponding to pbInstrBuf.
1478 * This is set to a non-canonical address when we need to invalidate it. */
1479 uint64_t uInstrBufPc; /* 0x18 */
1480 /** The guest physical address corresponding to pbInstrBuf. */
1481 RTGCPHYS GCPhysInstrBuf; /* 0x20 */
1482 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots).
1483 * This takes the CS segment limit into account.
1484 * @note Set to zero when the code TLB is flushed to trigger TLB reload. */
1485 uint16_t cbInstrBufTotal; /* 0x28 */
1486# ifndef IEM_WITH_OPAQUE_DECODER_STATE
1487 /** Offset into pbInstrBuf of the first byte of the current instruction.
1488 * Can be negative to efficiently handle cross page instructions. */
1489 int16_t offCurInstrStart; /* 0x2a */
1490
1491 /** The prefix mask (IEM_OP_PRF_XXX). */
1492 uint32_t fPrefixes; /* 0x2c */
1493 /** The extra REX ModR/M register field bit (REX.R << 3). */
1494 uint8_t uRexReg; /* 0x30 */
1495 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1496 * (REX.B << 3). */
1497 uint8_t uRexB; /* 0x31 */
1498 /** The extra REX SIB index field bit (REX.X << 3). */
1499 uint8_t uRexIndex; /* 0x32 */
1500
1501 /** The effective segment register (X86_SREG_XXX). */
1502 uint8_t iEffSeg; /* 0x33 */
1503
1504 /** The offset of the ModR/M byte relative to the start of the instruction. */
1505 uint8_t offModRm; /* 0x34 */
1506
1507# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1508 /** The current offset into abOpcode. */
1509 uint8_t offOpcode; /* 0x35 */
1510# else
1511 uint8_t bUnused; /* 0x35 */
1512# endif
1513# else /* IEM_WITH_OPAQUE_DECODER_STATE */
1514 uint8_t abOpaqueDecoderPart1[0x36 - 0x2a];
1515# endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1516
1517#else /* !IEM_WITH_CODE_TLB */
1518# ifndef IEM_WITH_OPAQUE_DECODER_STATE
1519 /** The size of what has currently been fetched into abOpcode. */
1520 uint8_t cbOpcode; /* 0x08 */
1521 /** The current offset into abOpcode. */
1522 uint8_t offOpcode; /* 0x09 */
1523 /** The offset of the ModR/M byte relative to the start of the instruction. */
1524 uint8_t offModRm; /* 0x0a */
1525
1526 /** The effective segment register (X86_SREG_XXX). */
1527 uint8_t iEffSeg; /* 0x0b */
1528
1529 /** The prefix mask (IEM_OP_PRF_XXX). */
1530 uint32_t fPrefixes; /* 0x0c */
1531 /** The extra REX ModR/M register field bit (REX.R << 3). */
1532 uint8_t uRexReg; /* 0x10 */
1533 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1534 * (REX.B << 3). */
1535 uint8_t uRexB; /* 0x11 */
1536 /** The extra REX SIB index field bit (REX.X << 3). */
1537 uint8_t uRexIndex; /* 0x12 */
1538
1539# else /* IEM_WITH_OPAQUE_DECODER_STATE */
1540 uint8_t abOpaqueDecoderPart1[0x13 - 0x08];
1541# endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1542#endif /* !IEM_WITH_CODE_TLB */
1543
1544#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1545 /** The effective operand mode. */
1546 IEMMODE enmEffOpSize; /* 0x36, 0x13 */
1547 /** The default addressing mode. */
1548 IEMMODE enmDefAddrMode; /* 0x37, 0x14 */
1549 /** The effective addressing mode. */
1550 IEMMODE enmEffAddrMode; /* 0x38, 0x15 */
1551 /** The default operand mode. */
1552 IEMMODE enmDefOpSize; /* 0x39, 0x16 */
1553
1554 /** Prefix index (VEX.pp) for two byte and three byte tables. */
1555 uint8_t idxPrefix; /* 0x3a, 0x17 */
1556 /** 3rd VEX/EVEX/XOP register.
1557 * Please use IEM_GET_EFFECTIVE_VVVV to access. */
1558 uint8_t uVex3rdReg; /* 0x3b, 0x18 */
1559 /** The VEX/EVEX/XOP length field. */
1560 uint8_t uVexLength; /* 0x3c, 0x19 */
1561 /** Additional EVEX stuff. */
1562 uint8_t fEvexStuff; /* 0x3d, 0x1a */
1563
1564# ifndef IEM_WITH_CODE_TLB
1565 /** Explicit alignment padding. */
1566 uint8_t abAlignment2a[1]; /* 0x1b */
1567# endif
1568 /** The FPU opcode (FOP). */
1569 uint16_t uFpuOpcode; /* 0x3e, 0x1c */
1570# ifndef IEM_WITH_CODE_TLB
1571 /** Explicit alignment padding. */
1572 uint8_t abAlignment2b[2]; /* 0x1e */
1573# endif
1574
1575 /** The opcode bytes. */
1576 uint8_t abOpcode[15]; /* 0x40, 0x20 */
1577 /** Explicit alignment padding. */
1578# ifdef IEM_WITH_CODE_TLB
1579 //uint8_t abAlignment2c[0x4f - 0x4f]; /* 0x4f */
1580# else
1581 uint8_t abAlignment2c[0x4f - 0x2f]; /* 0x2f */
1582# endif
1583
1584#else /* IEM_WITH_OPAQUE_DECODER_STATE */
1585# ifdef IEM_WITH_CODE_TLB
1586 uint8_t abOpaqueDecoderPart2[0x4f - 0x36];
1587# else
1588 uint8_t abOpaqueDecoderPart2[0x4f - 0x13];
1589# endif
1590#endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1591 /** @} */
1592
1593
1594 /** The number of active guest memory mappings. */
1595 uint8_t cActiveMappings; /* 0x4f, 0x4f */
1596
1597 /** Records for tracking guest memory mappings. */
1598 struct
1599 {
1600 /** The address of the mapped bytes. */
1601 R3R0PTRTYPE(void *) pv;
1602 /** The access flags (IEM_ACCESS_XXX).
1603 * IEM_ACCESS_INVALID if the entry is unused. */
1604 uint32_t fAccess;
1605#if HC_ARCH_BITS == 64
1606 uint32_t u32Alignment4; /**< Alignment padding. */
1607#endif
1608 } aMemMappings[3]; /* 0x50 LB 0x30 */
1609
1610 /** Locking records for the mapped memory. */
1611 union
1612 {
1613 PGMPAGEMAPLOCK Lock;
1614 uint64_t au64Padding[2];
1615 } aMemMappingLocks[3]; /* 0x80 LB 0x30 */
1616
1617 /** Bounce buffer info.
1618 * This runs in parallel to aMemMappings. */
1619 struct
1620 {
1621 /** The physical address of the first byte. */
1622 RTGCPHYS GCPhysFirst;
1623 /** The physical address of the second page. */
1624 RTGCPHYS GCPhysSecond;
1625 /** The number of bytes in the first page. */
1626 uint16_t cbFirst;
1627 /** The number of bytes in the second page. */
1628 uint16_t cbSecond;
1629 /** Whether it's unassigned memory. */
1630 bool fUnassigned;
1631 /** Explicit alignment padding. */
1632 bool afAlignment5[3];
1633 } aMemBbMappings[3]; /* 0xb0 LB 0x48 */
1634
1635 /** The flags of the current exception / interrupt. */
1636 uint32_t fCurXcpt; /* 0xf8 */
1637 /** The current exception / interrupt. */
1638 uint8_t uCurXcpt; /* 0xfc */
1639 /** Exception / interrupt recursion depth. */
1640 int8_t cXcptRecursions; /* 0xfb */
1641
1642 /** The next unused mapping index.
1643 * @todo try find room for this up with cActiveMappings. */
1644 uint8_t iNextMapping; /* 0xfd */
1645 uint8_t abAlignment7[1];
1646
1647 /** Bounce buffer storage.
1648 * This runs in parallel to aMemMappings and aMemBbMappings. */
1649 struct
1650 {
1651 uint8_t ab[512];
1652 } aBounceBuffers[3]; /* 0x100 LB 0x600 */
1653
1654
1655 /** Pointer set jump buffer - ring-3 context. */
1656 R3PTRTYPE(jmp_buf *) pJmpBufR3;
1657 /** Pointer set jump buffer - ring-0 context. */
1658 R0PTRTYPE(jmp_buf *) pJmpBufR0;
1659
1660 /** @todo Should move this near @a fCurXcpt later. */
1661 /** The CR2 for the current exception / interrupt. */
1662 uint64_t uCurXcptCr2;
1663 /** The error code for the current exception / interrupt. */
1664 uint32_t uCurXcptErr;
1665
1666 /** @name Statistics
1667 * @{ */
1668 /** The number of instructions we've executed. */
1669 uint32_t cInstructions;
1670 /** The number of potential exits. */
1671 uint32_t cPotentialExits;
1672 /** The number of bytes data or stack written (mostly for IEMExecOneEx).
1673 * This may contain uncommitted writes. */
1674 uint32_t cbWritten;
1675 /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
1676 uint32_t cRetInstrNotImplemented;
1677 /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
1678 uint32_t cRetAspectNotImplemented;
1679 /** Counts informational statuses returned (other than VINF_SUCCESS). */
1680 uint32_t cRetInfStatuses;
1681 /** Counts other error statuses returned. */
1682 uint32_t cRetErrStatuses;
1683 /** Number of times rcPassUp has been used. */
1684 uint32_t cRetPassUpStatus;
1685 /** Number of times RZ left with instruction commit pending for ring-3. */
1686 uint32_t cPendingCommit;
1687 /** Number of misaligned (host sense) atomic instruction accesses. */
1688 uint32_t cMisalignedAtomics;
1689 /** Number of long jumps. */
1690 uint32_t cLongJumps;
1691 /** @} */
1692
1693 /** @name Target CPU information.
1694 * @{ */
1695#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1696 /** The target CPU. */
1697 uint8_t uTargetCpu;
1698#else
1699 uint8_t bTargetCpuPadding;
1700#endif
1701 /** For selecting assembly works matching the target CPU EFLAGS behaviour, see
1702 * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no
1703 * native host support and the 2nd for when there is.
1704 *
1705 * The two values are typically indexed by a g_CpumHostFeatures bit.
1706 *
1707 * This is for instance used for the BSF & BSR instructions where AMD and
1708 * Intel CPUs produce different EFLAGS. */
1709 uint8_t aidxTargetCpuEflFlavour[2];
1710
1711 /** The CPU vendor. */
1712 CPUMCPUVENDOR enmCpuVendor;
1713 /** @} */
1714
1715 /** @name Host CPU information.
1716 * @{ */
1717 /** The CPU vendor. */
1718 CPUMCPUVENDOR enmHostCpuVendor;
1719 /** @} */
1720
1721 /** Counts RDMSR \#GP(0) LogRel(). */
1722 uint8_t cLogRelRdMsr;
1723 /** Counts WRMSR \#GP(0) LogRel(). */
1724 uint8_t cLogRelWrMsr;
1725 /** Alignment padding. */
1726 uint8_t abAlignment9[42];
1727
1728 /** @name Recompilation
1729 * @{ */
1730 /** Pointer to the current translation block.
1731 * This can either be one being executed or one being compiled. */
1732 R3PTRTYPE(PIEMTB) pCurTbR3;
1733#ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
1734 /** Frame pointer for the last native TB to execute. */
1735 R3PTRTYPE(void *) pvTbFramePointerR3;
1736#else
1737 R3PTRTYPE(void *) pvUnusedR3;
1738#endif
1739 /** Fixed TB used for threaded recompilation.
1740 * This is allocated once with maxed-out sizes and re-used afterwards. */
1741 R3PTRTYPE(PIEMTB) pThrdCompileTbR3;
1742 /** Pointer to the ring-3 TB cache for this EMT. */
1743 R3PTRTYPE(PIEMTBCACHE) pTbCacheR3;
1744 /** The PC (RIP) at the start of pCurTbR3/pCurTbR0.
1745 * The TBs are based on physical addresses, so this is needed to correleated
1746 * RIP to opcode bytes stored in the TB (AMD-V / VT-x). */
1747 uint64_t uCurTbStartPc;
1748 /** Number of threaded TBs executed. */
1749 uint64_t cTbExecThreaded;
1750 /** Number of native TBs executed. */
1751 uint64_t cTbExecNative;
1752 /** Whether we need to check the opcode bytes for the current instruction.
1753 * This is set by a previous instruction if it modified memory or similar. */
1754 bool fTbCheckOpcodes;
1755 /** Indicates whether and how we just branched - IEMBRANCHED_F_XXX. */
1756 uint8_t fTbBranched;
1757 /** Set when GCPhysInstrBuf is updated because of a page crossing. */
1758 bool fTbCrossedPage;
1759 /** Whether to end the current TB. */
1760 bool fEndTb;
1761 /** Number of instructions before we need emit an IRQ check call again.
1762 * This helps making sure we don't execute too long w/o checking for
1763 * interrupts and immediately following instructions that may enable
1764 * interrupts (e.g. POPF, IRET, STI). With STI an additional hack is
1765 * required to make sure we check following the next instruction as well, see
1766 * fTbCurInstrIsSti. */
1767 uint8_t cInstrTillIrqCheck;
1768 /** Indicates that the current instruction is an STI. This is set by the
1769 * iemCImpl_sti code and subsequently cleared by the recompiler. */
1770 bool fTbCurInstrIsSti;
1771 /** The size of the IEMTB::pabOpcodes allocation in pThrdCompileTbR3. */
1772 uint16_t cbOpcodesAllocated;
1773 /** The current instruction number in a native TB.
1774 * This is set by code that may trigger an unexpected TB exit (throw/longjmp)
1775 * and will be picked up by the TB execution loop. Only used when
1776 * IEMNATIVE_WITH_INSTRUCTION_COUNTING is defined. */
1777 uint8_t idxTbCurInstr;
1778 /** Spaced reserved for recompiler data / alignment. */
1779 bool afRecompilerStuff1[3];
1780 /** The virtual sync time at the last timer poll call. */
1781 uint32_t msRecompilerPollNow;
1782 /** The IEM_CIMPL_F_XXX mask for the current instruction. */
1783 uint32_t fTbCurInstr;
1784 /** The IEM_CIMPL_F_XXX mask for the previous instruction. */
1785 uint32_t fTbPrevInstr;
1786 /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set. */
1787 RTGCPHYS GCPhysInstrBufPrev;
1788 /** Strict: Tracking skipped EFLAGS calculations. Any bits set here are
1789 * currently not up to date in EFLAGS. */
1790 uint32_t fSkippingEFlags;
1791 uint32_t au32Padding[1];
1792 /** Pointer to the ring-3 TB allocator for this EMT. */
1793 R3PTRTYPE(PIEMTBALLOCATOR) pTbAllocatorR3;
1794 /** Pointer to the ring-3 executable memory allocator for this EMT. */
1795 R3PTRTYPE(struct IEMEXECMEMALLOCATOR *) pExecMemAllocatorR3;
1796 /** Pointer to the native recompiler state for ring-3. */
1797 R3PTRTYPE(struct IEMRECOMPILERSTATE *) pNativeRecompilerStateR3;
1798
1799 /** Statistics: Times TB execution was broken off before reaching the end. */
1800 STAMCOUNTER StatTbExecBreaks;
1801 /** Statistics: Times BltIn_CheckIrq breaks out of the TB. */
1802 STAMCOUNTER StatCheckIrqBreaks;
1803 /** Statistics: Times BltIn_CheckMode breaks out of the TB. */
1804 STAMCOUNTER StatCheckModeBreaks;
1805 /** Statistics: Times a post jump target check missed and had to find new TB. */
1806 STAMCOUNTER StatCheckBranchMisses;
1807 /** Statistics: Times a jump or page crossing required a TB with CS.LIM checking. */
1808 STAMCOUNTER StatCheckNeedCsLimChecking;
1809 /** Native TB statistics: Number of fully recompiled TBs. */
1810 STAMCOUNTER StatNativeFullyRecompiledTbs;
1811 /** Threaded TB statistics: Number of instructions per TB. */
1812 STAMPROFILE StatTbThreadedInstr;
1813 /** Threaded TB statistics: Number of calls per TB. */
1814 STAMPROFILE StatTbThreadedCalls;
1815 /** Native TB statistics: Native code size per TB. */
1816 STAMPROFILE StatTbNativeCode;
1817 /** Native TB statistics: Profiling native recompilation. */
1818 STAMPROFILE StatNativeRecompilation;
1819 /** Native TB statistics: Number of calls per TB that were recompiled properly. */
1820 STAMPROFILE StatNativeCallsRecompiled;
1821 /** Native TB statistics: Number of threaded calls per TB that weren't recompiled. */
1822 STAMPROFILE StatNativeCallsThreaded;
1823 /** Native recompiled execution: TLB hits for data fetches. */
1824 STAMCOUNTER StatNativeTlbHitsForFetch;
1825 /** Native recompiled execution: TLB hits for data stores. */
1826 STAMCOUNTER StatNativeTlbHitsForStore;
1827 /** Native recompiled execution: TLB hits for stack accesses. */
1828 STAMCOUNTER StatNativeTlbHitsForStack;
1829 /** Native recompiled execution: TLB hits for mapped accesses. */
1830 STAMCOUNTER StatNativeTlbHitsForMapped;
1831 /** Native recompiled execution: Code TLB misses for new page. */
1832 STAMCOUNTER StatNativeCodeTlbMissesNewPage;
1833 /** Native recompiled execution: Code TLB hits for new page. */
1834 STAMCOUNTER StatNativeCodeTlbHitsForNewPage;
1835 /** Native recompiled execution: Code TLB misses for new page with offset. */
1836 STAMCOUNTER StatNativeCodeTlbMissesNewPageWithOffset;
1837 /** Native recompiled execution: Code TLB hits for new page with offset. */
1838 STAMCOUNTER StatNativeCodeTlbHitsForNewPageWithOffset;
1839
1840 /** Native recompiler: Number of calls to iemNativeRegAllocFindFree. */
1841 STAMCOUNTER StatNativeRegFindFree;
1842 /** Native recompiler: Number of times iemNativeRegAllocFindFree needed
1843 * to free a variable. */
1844 STAMCOUNTER StatNativeRegFindFreeVar;
1845 /** Native recompiler: Number of times iemNativeRegAllocFindFree did
1846 * not need to free any variables. */
1847 STAMCOUNTER StatNativeRegFindFreeNoVar;
1848 /** Native recompiler: Liveness info freed shadowed guest registers in
1849 * iemNativeRegAllocFindFree. */
1850 STAMCOUNTER StatNativeRegFindFreeLivenessUnshadowed;
1851 /** Native recompiler: Liveness info helped with the allocation in
1852 * iemNativeRegAllocFindFree. */
1853 STAMCOUNTER StatNativeRegFindFreeLivenessHelped;
1854
1855 /** Native recompiler: Number of times status flags calc has been skipped. */
1856 STAMCOUNTER StatNativeEflArithmeticSkipped;
1857
1858 /** Native recompiler: Number of opportunities to skip EFLAGS.CF updating. */
1859 STAMCOUNTER StatNativeLivenessEflCfSkippable;
1860 /** Native recompiler: Number of opportunities to skip EFLAGS.PF updating. */
1861 STAMCOUNTER StatNativeLivenessEflPfSkippable;
1862 /** Native recompiler: Number of opportunities to skip EFLAGS.AF updating. */
1863 STAMCOUNTER StatNativeLivenessEflAfSkippable;
1864 /** Native recompiler: Number of opportunities to skip EFLAGS.ZF updating. */
1865 STAMCOUNTER StatNativeLivenessEflZfSkippable;
1866 /** Native recompiler: Number of opportunities to skip EFLAGS.SF updating. */
1867 STAMCOUNTER StatNativeLivenessEflSfSkippable;
1868 /** Native recompiler: Number of opportunities to skip EFLAGS.OF updating. */
1869 STAMCOUNTER StatNativeLivenessEflOfSkippable;
1870 /** Native recompiler: Number of required EFLAGS.CF updates. */
1871 STAMCOUNTER StatNativeLivenessEflCfRequired;
1872 /** Native recompiler: Number of required EFLAGS.PF updates. */
1873 STAMCOUNTER StatNativeLivenessEflPfRequired;
1874 /** Native recompiler: Number of required EFLAGS.AF updates. */
1875 STAMCOUNTER StatNativeLivenessEflAfRequired;
1876 /** Native recompiler: Number of required EFLAGS.ZF updates. */
1877 STAMCOUNTER StatNativeLivenessEflZfRequired;
1878 /** Native recompiler: Number of required EFLAGS.SF updates. */
1879 STAMCOUNTER StatNativeLivenessEflSfRequired;
1880 /** Native recompiler: Number of required EFLAGS.OF updates. */
1881 STAMCOUNTER StatNativeLivenessEflOfRequired;
1882 /** Native recompiler: Number of potentially delayable EFLAGS.CF updates. */
1883 STAMCOUNTER StatNativeLivenessEflCfDelayable;
1884 /** Native recompiler: Number of potentially delayable EFLAGS.PF updates. */
1885 STAMCOUNTER StatNativeLivenessEflPfDelayable;
1886 /** Native recompiler: Number of potentially delayable EFLAGS.AF updates. */
1887 STAMCOUNTER StatNativeLivenessEflAfDelayable;
1888 /** Native recompiler: Number of potentially delayable EFLAGS.ZF updates. */
1889 STAMCOUNTER StatNativeLivenessEflZfDelayable;
1890 /** Native recompiler: Number of potentially delayable EFLAGS.SF updates. */
1891 STAMCOUNTER StatNativeLivenessEflSfDelayable;
1892 /** Native recompiler: Number of potentially delayable EFLAGS.OF updates. */
1893 STAMCOUNTER StatNativeLivenessEflOfDelayable;
1894
1895 /** Native recompiler: Number of potential PC updates in total. */
1896 STAMCOUNTER StatNativePcUpdateTotal;
1897 /** Native recompiler: Number of PC updates which could be delayed. */
1898 STAMCOUNTER StatNativePcUpdateDelayed;
1899
1900#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
1901 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks. */
1902 STAMCOUNTER StatNativeMaybeDeviceNotAvailXcptCheckPotential;
1903 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks. */
1904 STAMCOUNTER StatNativeMaybeSseXcptCheckPotential;
1905 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks. */
1906 STAMCOUNTER StatNativeMaybeAvxXcptCheckPotential;
1907
1908 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted. */
1909 STAMCOUNTER StatNativeMaybeDeviceNotAvailXcptCheckOmitted;
1910 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted. */
1911 STAMCOUNTER StatNativeMaybeSseXcptCheckOmitted;
1912 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted. */
1913 STAMCOUNTER StatNativeMaybeAvxXcptCheckOmitted;
1914#endif
1915
1916 uint64_t au64Padding[3];
1917 /** @} */
1918
1919 /** Data TLB.
1920 * @remarks Must be 64-byte aligned. */
1921 IEMTLB DataTlb;
1922 /** Instruction TLB.
1923 * @remarks Must be 64-byte aligned. */
1924 IEMTLB CodeTlb;
1925
1926 /** Exception statistics. */
1927 STAMCOUNTER aStatXcpts[32];
1928 /** Interrupt statistics. */
1929 uint32_t aStatInts[256];
1930
1931#if defined(VBOX_WITH_STATISTICS) && !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
1932 /** Instruction statistics for ring-0/raw-mode. */
1933 IEMINSTRSTATS StatsRZ;
1934 /** Instruction statistics for ring-3. */
1935 IEMINSTRSTATS StatsR3;
1936# ifdef VBOX_WITH_IEM_RECOMPILER
1937 /** Statistics per threaded function call.
1938 * Updated by both the threaded and native recompilers. */
1939 uint32_t acThreadedFuncStats[0x5000 /*20480*/];
1940# endif
1941#endif
1942} IEMCPU;
1943AssertCompileMemberOffset(IEMCPU, cActiveMappings, 0x4f);
1944AssertCompileMemberAlignment(IEMCPU, aMemMappings, 16);
1945AssertCompileMemberAlignment(IEMCPU, aMemMappingLocks, 16);
1946AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64);
1947AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
1948AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
1949
1950/** Pointer to the per-CPU IEM state. */
1951typedef IEMCPU *PIEMCPU;
1952/** Pointer to the const per-CPU IEM state. */
1953typedef IEMCPU const *PCIEMCPU;
1954
1955
1956/** @def IEM_GET_CTX
1957 * Gets the guest CPU context for the calling EMT.
1958 * @returns PCPUMCTX
1959 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1960 */
1961#define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
1962
1963/** @def IEM_CTX_ASSERT
1964 * Asserts that the @a a_fExtrnMbz is present in the CPU context.
1965 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1966 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
1967 */
1968#define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) \
1969 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
1970 ("fExtrn=%#RX64 & fExtrnMbz=%#RX64 -> %#RX64\n", \
1971 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz), (a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz) ))
1972
1973/** @def IEM_CTX_IMPORT_RET
1974 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
1975 *
1976 * Will call the keep to import the bits as needed.
1977 *
1978 * Returns on import failure.
1979 *
1980 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1981 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
1982 */
1983#define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \
1984 do { \
1985 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1986 { /* likely */ } \
1987 else \
1988 { \
1989 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1990 AssertRCReturn(rcCtxImport, rcCtxImport); \
1991 } \
1992 } while (0)
1993
1994/** @def IEM_CTX_IMPORT_NORET
1995 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
1996 *
1997 * Will call the keep to import the bits as needed.
1998 *
1999 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2000 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
2001 */
2002#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \
2003 do { \
2004 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
2005 { /* likely */ } \
2006 else \
2007 { \
2008 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
2009 AssertLogRelRC(rcCtxImport); \
2010 } \
2011 } while (0)
2012
2013/** @def IEM_CTX_IMPORT_JMP
2014 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
2015 *
2016 * Will call the keep to import the bits as needed.
2017 *
2018 * Jumps on import failure.
2019 *
2020 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2021 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
2022 */
2023#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \
2024 do { \
2025 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
2026 { /* likely */ } \
2027 else \
2028 { \
2029 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
2030 AssertRCStmt(rcCtxImport, IEM_DO_LONGJMP(pVCpu, rcCtxImport)); \
2031 } \
2032 } while (0)
2033
2034
2035
2036/** @def IEM_GET_TARGET_CPU
2037 * Gets the current IEMTARGETCPU value.
2038 * @returns IEMTARGETCPU value.
2039 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2040 */
2041#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
2042# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
2043#else
2044# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
2045#endif
2046
2047/** @def IEM_GET_INSTR_LEN
2048 * Gets the instruction length. */
2049#ifdef IEM_WITH_CODE_TLB
2050# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(int32_t)(a_pVCpu)->iem.s.offCurInstrStart)
2051#else
2052# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offOpcode)
2053#endif
2054
2055/** @def IEM_TRY_SETJMP
2056 * Wrapper around setjmp / try, hiding all the ugly differences.
2057 *
2058 * @note Use with extreme care as this is a fragile macro.
2059 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2060 * @param a_rcTarget The variable that should receive the status code in case
2061 * of a longjmp/throw.
2062 */
2063/** @def IEM_TRY_SETJMP_AGAIN
2064 * For when setjmp / try is used again in the same variable scope as a previous
2065 * IEM_TRY_SETJMP invocation.
2066 */
2067/** @def IEM_CATCH_LONGJMP_BEGIN
2068 * Start wrapper for catch / setjmp-else.
2069 *
2070 * This will set up a scope.
2071 *
2072 * @note Use with extreme care as this is a fragile macro.
2073 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2074 * @param a_rcTarget The variable that should receive the status code in case
2075 * of a longjmp/throw.
2076 */
2077/** @def IEM_CATCH_LONGJMP_END
2078 * End wrapper for catch / setjmp-else.
2079 *
2080 * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
2081 * state.
2082 *
2083 * @note Use with extreme care as this is a fragile macro.
2084 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2085 */
2086#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
2087# ifdef IEM_WITH_THROW_CATCH
2088# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
2089 a_rcTarget = VINF_SUCCESS; \
2090 try
2091# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
2092 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
2093# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
2094 catch (int rcThrown) \
2095 { \
2096 a_rcTarget = rcThrown
2097# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
2098 } \
2099 ((void)0)
2100# else /* !IEM_WITH_THROW_CATCH */
2101# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
2102 jmp_buf JmpBuf; \
2103 jmp_buf * volatile pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
2104 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
2105 if ((rcStrict = setjmp(JmpBuf)) == 0)
2106# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
2107 pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
2108 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
2109 if ((rcStrict = setjmp(JmpBuf)) == 0)
2110# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
2111 else \
2112 { \
2113 ((void)0)
2114# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
2115 } \
2116 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
2117# endif /* !IEM_WITH_THROW_CATCH */
2118#endif /* IEM_WITH_SETJMP */
2119
2120
2121/**
2122 * Shared per-VM IEM data.
2123 */
2124typedef struct IEM
2125{
2126 /** The VMX APIC-access page handler type. */
2127 PGMPHYSHANDLERTYPE hVmxApicAccessPage;
2128#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
2129 /** Set if the CPUID host call functionality is enabled. */
2130 bool fCpuIdHostCall;
2131#endif
2132} IEM;
2133
2134
2135
2136/** @name IEM_ACCESS_XXX - Access details.
2137 * @{ */
2138#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
2139#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
2140#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
2141#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
2142#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
2143#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
2144#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
2145#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
2146#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
2147#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
2148/** The writes are partial, so if initialize the bounce buffer with the
2149 * orignal RAM content. */
2150#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
2151/** Used in aMemMappings to indicate that the entry is bounce buffered. */
2152#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
2153/** Bounce buffer with ring-3 write pending, first page. */
2154#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
2155/** Bounce buffer with ring-3 write pending, second page. */
2156#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
2157/** Not locked, accessed via the TLB. */
2158#define IEM_ACCESS_NOT_LOCKED UINT32_C(0x00001000)
2159/** Atomic access.
2160 * This enables special alignment checks and the VINF_EM_EMULATE_SPLIT_LOCK
2161 * fallback for misaligned stuff. See @bugref{10547}. */
2162#define IEM_ACCESS_ATOMIC UINT32_C(0x00002000)
2163/** Valid bit mask. */
2164#define IEM_ACCESS_VALID_MASK UINT32_C(0x00003fff)
2165/** Shift count for the TLB flags (upper word). */
2166#define IEM_ACCESS_SHIFT_TLB_FLAGS 16
2167
2168/** Atomic read+write data alias. */
2169#define IEM_ACCESS_DATA_ATOMIC (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA | IEM_ACCESS_ATOMIC)
2170/** Read+write data alias. */
2171#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
2172/** Write data alias. */
2173#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
2174/** Read data alias. */
2175#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
2176/** Instruction fetch alias. */
2177#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
2178/** Stack write alias. */
2179#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
2180/** Stack read alias. */
2181#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
2182/** Stack read+write alias. */
2183#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
2184/** Read system table alias. */
2185#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
2186/** Read+write system table alias. */
2187#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
2188/** @} */
2189
2190/** @name Prefix constants (IEMCPU::fPrefixes)
2191 * @{ */
2192#define IEM_OP_PRF_SEG_CS RT_BIT_32(0) /**< CS segment prefix (0x2e). */
2193#define IEM_OP_PRF_SEG_SS RT_BIT_32(1) /**< SS segment prefix (0x36). */
2194#define IEM_OP_PRF_SEG_DS RT_BIT_32(2) /**< DS segment prefix (0x3e). */
2195#define IEM_OP_PRF_SEG_ES RT_BIT_32(3) /**< ES segment prefix (0x26). */
2196#define IEM_OP_PRF_SEG_FS RT_BIT_32(4) /**< FS segment prefix (0x64). */
2197#define IEM_OP_PRF_SEG_GS RT_BIT_32(5) /**< GS segment prefix (0x65). */
2198#define IEM_OP_PRF_SEG_MASK UINT32_C(0x3f)
2199
2200#define IEM_OP_PRF_SIZE_OP RT_BIT_32(8) /**< Operand size prefix (0x66). */
2201#define IEM_OP_PRF_SIZE_REX_W RT_BIT_32(9) /**< REX.W prefix (0x48-0x4f). */
2202#define IEM_OP_PRF_SIZE_ADDR RT_BIT_32(10) /**< Address size prefix (0x67). */
2203
2204#define IEM_OP_PRF_LOCK RT_BIT_32(16) /**< Lock prefix (0xf0). */
2205#define IEM_OP_PRF_REPNZ RT_BIT_32(17) /**< Repeat-not-zero prefix (0xf2). */
2206#define IEM_OP_PRF_REPZ RT_BIT_32(18) /**< Repeat-if-zero prefix (0xf3). */
2207
2208#define IEM_OP_PRF_REX RT_BIT_32(24) /**< Any REX prefix (0x40-0x4f). */
2209#define IEM_OP_PRF_REX_B RT_BIT_32(25) /**< REX.B prefix (0x41,0x43,0x45,0x47,0x49,0x4b,0x4d,0x4f). */
2210#define IEM_OP_PRF_REX_X RT_BIT_32(26) /**< REX.X prefix (0x42,0x43,0x46,0x47,0x4a,0x4b,0x4e,0x4f). */
2211#define IEM_OP_PRF_REX_R RT_BIT_32(27) /**< REX.R prefix (0x44,0x45,0x46,0x47,0x4c,0x4d,0x4e,0x4f). */
2212/** Mask with all the REX prefix flags.
2213 * This is generally for use when needing to undo the REX prefixes when they
2214 * are followed legacy prefixes and therefore does not immediately preceed
2215 * the first opcode byte.
2216 * For testing whether any REX prefix is present, use IEM_OP_PRF_REX instead. */
2217#define IEM_OP_PRF_REX_MASK (IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W )
2218
2219#define IEM_OP_PRF_VEX RT_BIT_32(28) /**< Indiciates VEX prefix. */
2220#define IEM_OP_PRF_EVEX RT_BIT_32(29) /**< Indiciates EVEX prefix. */
2221#define IEM_OP_PRF_XOP RT_BIT_32(30) /**< Indiciates XOP prefix. */
2222/** @} */
2223
2224/** @name IEMOPFORM_XXX - Opcode forms
2225 * @note These are ORed together with IEMOPHINT_XXX.
2226 * @{ */
2227/** ModR/M: reg, r/m */
2228#define IEMOPFORM_RM 0
2229/** ModR/M: reg, r/m (register) */
2230#define IEMOPFORM_RM_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
2231/** ModR/M: reg, r/m (memory) */
2232#define IEMOPFORM_RM_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
2233/** ModR/M: reg, r/m, imm */
2234#define IEMOPFORM_RMI 1
2235/** ModR/M: reg, r/m (register), imm */
2236#define IEMOPFORM_RMI_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
2237/** ModR/M: reg, r/m (memory), imm */
2238#define IEMOPFORM_RMI_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
2239/** ModR/M: r/m, reg */
2240#define IEMOPFORM_MR 2
2241/** ModR/M: r/m (register), reg */
2242#define IEMOPFORM_MR_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
2243/** ModR/M: r/m (memory), reg */
2244#define IEMOPFORM_MR_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
2245/** ModR/M: r/m, reg, imm */
2246#define IEMOPFORM_MRI 3
2247/** ModR/M: r/m (register), reg, imm */
2248#define IEMOPFORM_MRI_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
2249/** ModR/M: r/m (memory), reg, imm */
2250#define IEMOPFORM_MRI_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
2251/** ModR/M: r/m only */
2252#define IEMOPFORM_M 4
2253/** ModR/M: r/m only (register). */
2254#define IEMOPFORM_M_REG (IEMOPFORM_M | IEMOPFORM_MOD3)
2255/** ModR/M: r/m only (memory). */
2256#define IEMOPFORM_M_MEM (IEMOPFORM_M | IEMOPFORM_NOT_MOD3)
2257/** ModR/M: r/m, imm */
2258#define IEMOPFORM_MI 5
2259/** ModR/M: r/m (register), imm */
2260#define IEMOPFORM_MI_REG (IEMOPFORM_MI | IEMOPFORM_MOD3)
2261/** ModR/M: r/m (memory), imm */
2262#define IEMOPFORM_MI_MEM (IEMOPFORM_MI | IEMOPFORM_NOT_MOD3)
2263/** ModR/M: r/m, 1 (shift and rotate instructions) */
2264#define IEMOPFORM_M1 6
2265/** ModR/M: r/m (register), 1. */
2266#define IEMOPFORM_M1_REG (IEMOPFORM_M1 | IEMOPFORM_MOD3)
2267/** ModR/M: r/m (memory), 1. */
2268#define IEMOPFORM_M1_MEM (IEMOPFORM_M1 | IEMOPFORM_NOT_MOD3)
2269/** ModR/M: r/m, CL (shift and rotate instructions)
2270 * @todo This should just've been a generic fixed register. But the python
2271 * code doesn't needs more convincing. */
2272#define IEMOPFORM_M_CL 7
2273/** ModR/M: r/m (register), CL. */
2274#define IEMOPFORM_M_CL_REG (IEMOPFORM_M_CL | IEMOPFORM_MOD3)
2275/** ModR/M: r/m (memory), CL. */
2276#define IEMOPFORM_M_CL_MEM (IEMOPFORM_M_CL | IEMOPFORM_NOT_MOD3)
2277/** ModR/M: reg only */
2278#define IEMOPFORM_R 8
2279
2280/** VEX+ModR/M: reg, r/m */
2281#define IEMOPFORM_VEX_RM 16
2282/** VEX+ModR/M: reg, r/m (register) */
2283#define IEMOPFORM_VEX_RM_REG (IEMOPFORM_VEX_RM | IEMOPFORM_MOD3)
2284/** VEX+ModR/M: reg, r/m (memory) */
2285#define IEMOPFORM_VEX_RM_MEM (IEMOPFORM_VEX_RM | IEMOPFORM_NOT_MOD3)
2286/** VEX+ModR/M: r/m, reg */
2287#define IEMOPFORM_VEX_MR 17
2288/** VEX+ModR/M: r/m (register), reg */
2289#define IEMOPFORM_VEX_MR_REG (IEMOPFORM_VEX_MR | IEMOPFORM_MOD3)
2290/** VEX+ModR/M: r/m (memory), reg */
2291#define IEMOPFORM_VEX_MR_MEM (IEMOPFORM_VEX_MR | IEMOPFORM_NOT_MOD3)
2292/** VEX+ModR/M: r/m only */
2293#define IEMOPFORM_VEX_M 18
2294/** VEX+ModR/M: r/m only (register). */
2295#define IEMOPFORM_VEX_M_REG (IEMOPFORM_VEX_M | IEMOPFORM_MOD3)
2296/** VEX+ModR/M: r/m only (memory). */
2297#define IEMOPFORM_VEX_M_MEM (IEMOPFORM_VEX_M | IEMOPFORM_NOT_MOD3)
2298/** VEX+ModR/M: reg only */
2299#define IEMOPFORM_VEX_R 19
2300/** VEX+ModR/M: reg, vvvv, r/m */
2301#define IEMOPFORM_VEX_RVM 20
2302/** VEX+ModR/M: reg, vvvv, r/m (register). */
2303#define IEMOPFORM_VEX_RVM_REG (IEMOPFORM_VEX_RVM | IEMOPFORM_MOD3)
2304/** VEX+ModR/M: reg, vvvv, r/m (memory). */
2305#define IEMOPFORM_VEX_RVM_MEM (IEMOPFORM_VEX_RVM | IEMOPFORM_NOT_MOD3)
2306/** VEX+ModR/M: reg, r/m, vvvv */
2307#define IEMOPFORM_VEX_RMV 21
2308/** VEX+ModR/M: reg, r/m, vvvv (register). */
2309#define IEMOPFORM_VEX_RMV_REG (IEMOPFORM_VEX_RMV | IEMOPFORM_MOD3)
2310/** VEX+ModR/M: reg, r/m, vvvv (memory). */
2311#define IEMOPFORM_VEX_RMV_MEM (IEMOPFORM_VEX_RMV | IEMOPFORM_NOT_MOD3)
2312/** VEX+ModR/M: reg, r/m, imm8 */
2313#define IEMOPFORM_VEX_RMI 22
2314/** VEX+ModR/M: reg, r/m, imm8 (register). */
2315#define IEMOPFORM_VEX_RMI_REG (IEMOPFORM_VEX_RMI | IEMOPFORM_MOD3)
2316/** VEX+ModR/M: reg, r/m, imm8 (memory). */
2317#define IEMOPFORM_VEX_RMI_MEM (IEMOPFORM_VEX_RMI | IEMOPFORM_NOT_MOD3)
2318/** VEX+ModR/M: r/m, vvvv, reg */
2319#define IEMOPFORM_VEX_MVR 23
2320/** VEX+ModR/M: r/m, vvvv, reg (register) */
2321#define IEMOPFORM_VEX_MVR_REG (IEMOPFORM_VEX_MVR | IEMOPFORM_MOD3)
2322/** VEX+ModR/M: r/m, vvvv, reg (memory) */
2323#define IEMOPFORM_VEX_MVR_MEM (IEMOPFORM_VEX_MVR | IEMOPFORM_NOT_MOD3)
2324/** VEX+ModR/M+/n: vvvv, r/m */
2325#define IEMOPFORM_VEX_VM 24
2326/** VEX+ModR/M+/n: vvvv, r/m (register) */
2327#define IEMOPFORM_VEX_VM_REG (IEMOPFORM_VEX_VM | IEMOPFORM_MOD3)
2328/** VEX+ModR/M+/n: vvvv, r/m (memory) */
2329#define IEMOPFORM_VEX_VM_MEM (IEMOPFORM_VEX_VM | IEMOPFORM_NOT_MOD3)
2330/** VEX+ModR/M+/n: vvvv, r/m, imm8 */
2331#define IEMOPFORM_VEX_VMI 25
2332/** VEX+ModR/M+/n: vvvv, r/m, imm8 (register) */
2333#define IEMOPFORM_VEX_VMI_REG (IEMOPFORM_VEX_VMI | IEMOPFORM_MOD3)
2334/** VEX+ModR/M+/n: vvvv, r/m, imm8 (memory) */
2335#define IEMOPFORM_VEX_VMI_MEM (IEMOPFORM_VEX_VMI | IEMOPFORM_NOT_MOD3)
2336
2337/** Fixed register instruction, no R/M. */
2338#define IEMOPFORM_FIXED 32
2339
2340/** The r/m is a register. */
2341#define IEMOPFORM_MOD3 RT_BIT_32(8)
2342/** The r/m is a memory access. */
2343#define IEMOPFORM_NOT_MOD3 RT_BIT_32(9)
2344/** @} */
2345
2346/** @name IEMOPHINT_XXX - Additional Opcode Hints
2347 * @note These are ORed together with IEMOPFORM_XXX.
2348 * @{ */
2349/** Ignores the operand size prefix (66h). */
2350#define IEMOPHINT_IGNORES_OZ_PFX RT_BIT_32(10)
2351/** Ignores REX.W (aka WIG). */
2352#define IEMOPHINT_IGNORES_REXW RT_BIT_32(11)
2353/** Both the operand size prefixes (66h + REX.W) are ignored. */
2354#define IEMOPHINT_IGNORES_OP_SIZES (IEMOPHINT_IGNORES_OZ_PFX | IEMOPHINT_IGNORES_REXW)
2355/** Allowed with the lock prefix. */
2356#define IEMOPHINT_LOCK_ALLOWED RT_BIT_32(11)
2357/** The VEX.L value is ignored (aka LIG). */
2358#define IEMOPHINT_VEX_L_IGNORED RT_BIT_32(12)
2359/** The VEX.L value must be zero (i.e. 128-bit width only). */
2360#define IEMOPHINT_VEX_L_ZERO RT_BIT_32(13)
2361/** The VEX.V value must be zero. */
2362#define IEMOPHINT_VEX_V_ZERO RT_BIT_32(14)
2363
2364/** Hint to IEMAllInstructionPython.py that this macro should be skipped. */
2365#define IEMOPHINT_SKIP_PYTHON RT_BIT_32(31)
2366/** @} */
2367
2368/**
2369 * Possible hardware task switch sources.
2370 */
2371typedef enum IEMTASKSWITCH
2372{
2373 /** Task switch caused by an interrupt/exception. */
2374 IEMTASKSWITCH_INT_XCPT = 1,
2375 /** Task switch caused by a far CALL. */
2376 IEMTASKSWITCH_CALL,
2377 /** Task switch caused by a far JMP. */
2378 IEMTASKSWITCH_JUMP,
2379 /** Task switch caused by an IRET. */
2380 IEMTASKSWITCH_IRET
2381} IEMTASKSWITCH;
2382AssertCompileSize(IEMTASKSWITCH, 4);
2383
2384/**
2385 * Possible CrX load (write) sources.
2386 */
2387typedef enum IEMACCESSCRX
2388{
2389 /** CrX access caused by 'mov crX' instruction. */
2390 IEMACCESSCRX_MOV_CRX,
2391 /** CrX (CR0) write caused by 'lmsw' instruction. */
2392 IEMACCESSCRX_LMSW,
2393 /** CrX (CR0) write caused by 'clts' instruction. */
2394 IEMACCESSCRX_CLTS,
2395 /** CrX (CR0) read caused by 'smsw' instruction. */
2396 IEMACCESSCRX_SMSW
2397} IEMACCESSCRX;
2398
2399#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2400/** @name IEM_SLAT_FAIL_XXX - Second-level address translation failure information.
2401 *
2402 * These flags provide further context to SLAT page-walk failures that could not be
2403 * determined by PGM (e.g, PGM is not privy to memory access permissions).
2404 *
2405 * @{
2406 */
2407/** Translating a nested-guest linear address failed accessing a nested-guest
2408 * physical address. */
2409# define IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR RT_BIT_32(0)
2410/** Translating a nested-guest linear address failed accessing a
2411 * paging-structure entry or updating accessed/dirty bits. */
2412# define IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE RT_BIT_32(1)
2413/** @} */
2414
2415DECLCALLBACK(FNPGMPHYSHANDLER) iemVmxApicAccessPageHandler;
2416# ifndef IN_RING3
2417DECLCALLBACK(FNPGMRZPHYSPFHANDLER) iemVmxApicAccessPagePfHandler;
2418# endif
2419#endif
2420
2421/**
2422 * Indicates to the verifier that the given flag set is undefined.
2423 *
2424 * Can be invoked again to add more flags.
2425 *
2426 * This is a NOOP if the verifier isn't compiled in.
2427 *
2428 * @note We're temporarily keeping this until code is converted to new
2429 * disassembler style opcode handling.
2430 */
2431#define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0)
2432
2433
2434/** @def IEM_DECL_IMPL_TYPE
2435 * For typedef'ing an instruction implementation function.
2436 *
2437 * @param a_RetType The return type.
2438 * @param a_Name The name of the type.
2439 * @param a_ArgList The argument list enclosed in parentheses.
2440 */
2441
2442/** @def IEM_DECL_IMPL_DEF
2443 * For defining an instruction implementation function.
2444 *
2445 * @param a_RetType The return type.
2446 * @param a_Name The name of the type.
2447 * @param a_ArgList The argument list enclosed in parentheses.
2448 */
2449
2450#if defined(__GNUC__) && defined(RT_ARCH_X86)
2451# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2452 __attribute__((__fastcall__)) a_RetType (a_Name) a_ArgList
2453# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2454 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2455# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2456 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2457
2458#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
2459# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2460 a_RetType (__fastcall a_Name) a_ArgList
2461# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2462 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2463# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2464 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2465
2466#elif __cplusplus >= 201700 /* P0012R1 support */
2467# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2468 a_RetType (VBOXCALL a_Name) a_ArgList RT_NOEXCEPT
2469# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2470 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2471# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2472 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2473
2474#else
2475# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2476 a_RetType (VBOXCALL a_Name) a_ArgList
2477# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2478 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2479# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2480 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2481
2482#endif
2483
2484/** Defined in IEMAllAImplC.cpp but also used by IEMAllAImplA.asm. */
2485RT_C_DECLS_BEGIN
2486extern uint8_t const g_afParity[256];
2487RT_C_DECLS_END
2488
2489
2490/** @name Arithmetic assignment operations on bytes (binary).
2491 * @{ */
2492typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU8, (uint8_t *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
2493typedef FNIEMAIMPLBINU8 *PFNIEMAIMPLBINU8;
2494FNIEMAIMPLBINU8 iemAImpl_add_u8, iemAImpl_add_u8_locked;
2495FNIEMAIMPLBINU8 iemAImpl_adc_u8, iemAImpl_adc_u8_locked;
2496FNIEMAIMPLBINU8 iemAImpl_sub_u8, iemAImpl_sub_u8_locked;
2497FNIEMAIMPLBINU8 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked;
2498FNIEMAIMPLBINU8 iemAImpl_or_u8, iemAImpl_or_u8_locked;
2499FNIEMAIMPLBINU8 iemAImpl_xor_u8, iemAImpl_xor_u8_locked;
2500FNIEMAIMPLBINU8 iemAImpl_and_u8, iemAImpl_and_u8_locked;
2501/** @} */
2502
2503/** @name Arithmetic assignment operations on words (binary).
2504 * @{ */
2505typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU16, (uint16_t *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
2506typedef FNIEMAIMPLBINU16 *PFNIEMAIMPLBINU16;
2507FNIEMAIMPLBINU16 iemAImpl_add_u16, iemAImpl_add_u16_locked;
2508FNIEMAIMPLBINU16 iemAImpl_adc_u16, iemAImpl_adc_u16_locked;
2509FNIEMAIMPLBINU16 iemAImpl_sub_u16, iemAImpl_sub_u16_locked;
2510FNIEMAIMPLBINU16 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked;
2511FNIEMAIMPLBINU16 iemAImpl_or_u16, iemAImpl_or_u16_locked;
2512FNIEMAIMPLBINU16 iemAImpl_xor_u16, iemAImpl_xor_u16_locked;
2513FNIEMAIMPLBINU16 iemAImpl_and_u16, iemAImpl_and_u16_locked;
2514/** @} */
2515
2516/** @name Arithmetic assignment operations on double words (binary).
2517 * @{ */
2518typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU32, (uint32_t *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
2519typedef FNIEMAIMPLBINU32 *PFNIEMAIMPLBINU32;
2520FNIEMAIMPLBINU32 iemAImpl_add_u32, iemAImpl_add_u32_locked;
2521FNIEMAIMPLBINU32 iemAImpl_adc_u32, iemAImpl_adc_u32_locked;
2522FNIEMAIMPLBINU32 iemAImpl_sub_u32, iemAImpl_sub_u32_locked;
2523FNIEMAIMPLBINU32 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked;
2524FNIEMAIMPLBINU32 iemAImpl_or_u32, iemAImpl_or_u32_locked;
2525FNIEMAIMPLBINU32 iemAImpl_xor_u32, iemAImpl_xor_u32_locked;
2526FNIEMAIMPLBINU32 iemAImpl_and_u32, iemAImpl_and_u32_locked;
2527FNIEMAIMPLBINU32 iemAImpl_blsi_u32, iemAImpl_blsi_u32_fallback;
2528FNIEMAIMPLBINU32 iemAImpl_blsr_u32, iemAImpl_blsr_u32_fallback;
2529FNIEMAIMPLBINU32 iemAImpl_blsmsk_u32, iemAImpl_blsmsk_u32_fallback;
2530/** @} */
2531
2532/** @name Arithmetic assignment operations on quad words (binary).
2533 * @{ */
2534typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU64, (uint64_t *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
2535typedef FNIEMAIMPLBINU64 *PFNIEMAIMPLBINU64;
2536FNIEMAIMPLBINU64 iemAImpl_add_u64, iemAImpl_add_u64_locked;
2537FNIEMAIMPLBINU64 iemAImpl_adc_u64, iemAImpl_adc_u64_locked;
2538FNIEMAIMPLBINU64 iemAImpl_sub_u64, iemAImpl_sub_u64_locked;
2539FNIEMAIMPLBINU64 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked;
2540FNIEMAIMPLBINU64 iemAImpl_or_u64, iemAImpl_or_u64_locked;
2541FNIEMAIMPLBINU64 iemAImpl_xor_u64, iemAImpl_xor_u64_locked;
2542FNIEMAIMPLBINU64 iemAImpl_and_u64, iemAImpl_and_u64_locked;
2543FNIEMAIMPLBINU64 iemAImpl_blsi_u64, iemAImpl_blsi_u64_fallback;
2544FNIEMAIMPLBINU64 iemAImpl_blsr_u64, iemAImpl_blsr_u64_fallback;
2545FNIEMAIMPLBINU64 iemAImpl_blsmsk_u64, iemAImpl_blsmsk_u64_fallback;
2546/** @} */
2547
2548typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU8,(uint8_t const *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
2549typedef FNIEMAIMPLBINROU8 *PFNIEMAIMPLBINROU8;
2550typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU16,(uint16_t const *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
2551typedef FNIEMAIMPLBINROU16 *PFNIEMAIMPLBINROU16;
2552typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU32,(uint32_t const *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
2553typedef FNIEMAIMPLBINROU32 *PFNIEMAIMPLBINROU32;
2554typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU64,(uint64_t const *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
2555typedef FNIEMAIMPLBINROU64 *PFNIEMAIMPLBINROU64;
2556
2557/** @name Compare operations (thrown in with the binary ops).
2558 * @{ */
2559FNIEMAIMPLBINROU8 iemAImpl_cmp_u8;
2560FNIEMAIMPLBINROU16 iemAImpl_cmp_u16;
2561FNIEMAIMPLBINROU32 iemAImpl_cmp_u32;
2562FNIEMAIMPLBINROU64 iemAImpl_cmp_u64;
2563/** @} */
2564
2565/** @name Test operations (thrown in with the binary ops).
2566 * @{ */
2567FNIEMAIMPLBINROU8 iemAImpl_test_u8;
2568FNIEMAIMPLBINROU16 iemAImpl_test_u16;
2569FNIEMAIMPLBINROU32 iemAImpl_test_u32;
2570FNIEMAIMPLBINROU64 iemAImpl_test_u64;
2571/** @} */
2572
2573/** @name Bit operations operations (thrown in with the binary ops).
2574 * @{ */
2575FNIEMAIMPLBINROU16 iemAImpl_bt_u16;
2576FNIEMAIMPLBINROU32 iemAImpl_bt_u32;
2577FNIEMAIMPLBINROU64 iemAImpl_bt_u64;
2578FNIEMAIMPLBINU16 iemAImpl_btc_u16, iemAImpl_btc_u16_locked;
2579FNIEMAIMPLBINU32 iemAImpl_btc_u32, iemAImpl_btc_u32_locked;
2580FNIEMAIMPLBINU64 iemAImpl_btc_u64, iemAImpl_btc_u64_locked;
2581FNIEMAIMPLBINU16 iemAImpl_btr_u16, iemAImpl_btr_u16_locked;
2582FNIEMAIMPLBINU32 iemAImpl_btr_u32, iemAImpl_btr_u32_locked;
2583FNIEMAIMPLBINU64 iemAImpl_btr_u64, iemAImpl_btr_u64_locked;
2584FNIEMAIMPLBINU16 iemAImpl_bts_u16, iemAImpl_bts_u16_locked;
2585FNIEMAIMPLBINU32 iemAImpl_bts_u32, iemAImpl_bts_u32_locked;
2586FNIEMAIMPLBINU64 iemAImpl_bts_u64, iemAImpl_bts_u64_locked;
2587/** @} */
2588
2589/** @name Arithmetic three operand operations on double words (binary).
2590 * @{ */
2591typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2, uint32_t *pEFlags));
2592typedef FNIEMAIMPLBINVEXU32 *PFNIEMAIMPLBINVEXU32;
2593FNIEMAIMPLBINVEXU32 iemAImpl_andn_u32, iemAImpl_andn_u32_fallback;
2594FNIEMAIMPLBINVEXU32 iemAImpl_bextr_u32, iemAImpl_bextr_u32_fallback;
2595FNIEMAIMPLBINVEXU32 iemAImpl_bzhi_u32, iemAImpl_bzhi_u32_fallback;
2596/** @} */
2597
2598/** @name Arithmetic three operand operations on quad words (binary).
2599 * @{ */
2600typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2, uint32_t *pEFlags));
2601typedef FNIEMAIMPLBINVEXU64 *PFNIEMAIMPLBINVEXU64;
2602FNIEMAIMPLBINVEXU64 iemAImpl_andn_u64, iemAImpl_andn_u64_fallback;
2603FNIEMAIMPLBINVEXU64 iemAImpl_bextr_u64, iemAImpl_bextr_u64_fallback;
2604FNIEMAIMPLBINVEXU64 iemAImpl_bzhi_u64, iemAImpl_bzhi_u64_fallback;
2605/** @} */
2606
2607/** @name Arithmetic three operand operations on double words w/o EFLAGS (binary).
2608 * @{ */
2609typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32NOEFL, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2));
2610typedef FNIEMAIMPLBINVEXU32NOEFL *PFNIEMAIMPLBINVEXU32NOEFL;
2611FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pdep_u32, iemAImpl_pdep_u32_fallback;
2612FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pext_u32, iemAImpl_pext_u32_fallback;
2613FNIEMAIMPLBINVEXU32NOEFL iemAImpl_sarx_u32, iemAImpl_sarx_u32_fallback;
2614FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shlx_u32, iemAImpl_shlx_u32_fallback;
2615FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shrx_u32, iemAImpl_shrx_u32_fallback;
2616FNIEMAIMPLBINVEXU32NOEFL iemAImpl_rorx_u32;
2617/** @} */
2618
2619/** @name Arithmetic three operand operations on quad words w/o EFLAGS (binary).
2620 * @{ */
2621typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64NOEFL, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2));
2622typedef FNIEMAIMPLBINVEXU64NOEFL *PFNIEMAIMPLBINVEXU64NOEFL;
2623FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pdep_u64, iemAImpl_pdep_u64_fallback;
2624FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pext_u64, iemAImpl_pext_u64_fallback;
2625FNIEMAIMPLBINVEXU64NOEFL iemAImpl_sarx_u64, iemAImpl_sarx_u64_fallback;
2626FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shlx_u64, iemAImpl_shlx_u64_fallback;
2627FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shrx_u64, iemAImpl_shrx_u64_fallback;
2628FNIEMAIMPLBINVEXU64NOEFL iemAImpl_rorx_u64;
2629/** @} */
2630
2631/** @name MULX 32-bit and 64-bit.
2632 * @{ */
2633typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU32, (uint32_t *puDst1, uint32_t *puDst2, uint32_t uSrc1, uint32_t uSrc2));
2634typedef FNIEMAIMPLMULXVEXU32 *PFNIEMAIMPLMULXVEXU32;
2635FNIEMAIMPLMULXVEXU32 iemAImpl_mulx_u32, iemAImpl_mulx_u32_fallback;
2636
2637typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU64, (uint64_t *puDst1, uint64_t *puDst2, uint64_t uSrc1, uint64_t uSrc2));
2638typedef FNIEMAIMPLMULXVEXU64 *PFNIEMAIMPLMULXVEXU64;
2639FNIEMAIMPLMULXVEXU64 iemAImpl_mulx_u64, iemAImpl_mulx_u64_fallback;
2640/** @} */
2641
2642
2643/** @name Exchange memory with register operations.
2644 * @{ */
2645IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_locked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2646IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_locked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2647IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_locked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2648IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_locked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2649IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_unlocked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2650IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_unlocked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2651IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_unlocked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2652IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_unlocked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2653/** @} */
2654
2655/** @name Exchange and add operations.
2656 * @{ */
2657IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2658IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2659IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2660IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2661IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8_locked, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2662IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16_locked,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2663IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32_locked,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2664IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64_locked,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2665/** @} */
2666
2667/** @name Compare and exchange.
2668 * @{ */
2669IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2670IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8_locked, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2671IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16, (uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2672IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16_locked,(uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2673IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32, (uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2674IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32_locked,(uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2675#if ARCH_BITS == 32
2676IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2677IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2678#else
2679IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2680IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2681#endif
2682IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2683 uint32_t *pEFlags));
2684IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b_locked,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2685 uint32_t *pEFlags));
2686IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2687 uint32_t *pEFlags));
2688IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_locked,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2689 uint32_t *pEFlags));
2690#ifndef RT_ARCH_ARM64
2691IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_fallback,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx,
2692 PRTUINT128U pu128RbxRcx, uint32_t *pEFlags));
2693#endif
2694/** @} */
2695
2696/** @name Memory ordering
2697 * @{ */
2698typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEMFENCE,(void));
2699typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE;
2700IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void));
2701IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void));
2702IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void));
2703#ifndef RT_ARCH_ARM64
2704IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void));
2705#endif
2706/** @} */
2707
2708/** @name Double precision shifts
2709 * @{ */
2710typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags));
2711typedef FNIEMAIMPLSHIFTDBLU16 *PFNIEMAIMPLSHIFTDBLU16;
2712typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU32,(uint32_t *pu32Dst, uint32_t u32Src, uint8_t cShift, uint32_t *pEFlags));
2713typedef FNIEMAIMPLSHIFTDBLU32 *PFNIEMAIMPLSHIFTDBLU32;
2714typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t cShift, uint32_t *pEFlags));
2715typedef FNIEMAIMPLSHIFTDBLU64 *PFNIEMAIMPLSHIFTDBLU64;
2716FNIEMAIMPLSHIFTDBLU16 iemAImpl_shld_u16, iemAImpl_shld_u16_amd, iemAImpl_shld_u16_intel;
2717FNIEMAIMPLSHIFTDBLU32 iemAImpl_shld_u32, iemAImpl_shld_u32_amd, iemAImpl_shld_u32_intel;
2718FNIEMAIMPLSHIFTDBLU64 iemAImpl_shld_u64, iemAImpl_shld_u64_amd, iemAImpl_shld_u64_intel;
2719FNIEMAIMPLSHIFTDBLU16 iemAImpl_shrd_u16, iemAImpl_shrd_u16_amd, iemAImpl_shrd_u16_intel;
2720FNIEMAIMPLSHIFTDBLU32 iemAImpl_shrd_u32, iemAImpl_shrd_u32_amd, iemAImpl_shrd_u32_intel;
2721FNIEMAIMPLSHIFTDBLU64 iemAImpl_shrd_u64, iemAImpl_shrd_u64_amd, iemAImpl_shrd_u64_intel;
2722/** @} */
2723
2724
2725/** @name Bit search operations (thrown in with the binary ops).
2726 * @{ */
2727FNIEMAIMPLBINU16 iemAImpl_bsf_u16, iemAImpl_bsf_u16_amd, iemAImpl_bsf_u16_intel;
2728FNIEMAIMPLBINU32 iemAImpl_bsf_u32, iemAImpl_bsf_u32_amd, iemAImpl_bsf_u32_intel;
2729FNIEMAIMPLBINU64 iemAImpl_bsf_u64, iemAImpl_bsf_u64_amd, iemAImpl_bsf_u64_intel;
2730FNIEMAIMPLBINU16 iemAImpl_bsr_u16, iemAImpl_bsr_u16_amd, iemAImpl_bsr_u16_intel;
2731FNIEMAIMPLBINU32 iemAImpl_bsr_u32, iemAImpl_bsr_u32_amd, iemAImpl_bsr_u32_intel;
2732FNIEMAIMPLBINU64 iemAImpl_bsr_u64, iemAImpl_bsr_u64_amd, iemAImpl_bsr_u64_intel;
2733FNIEMAIMPLBINU16 iemAImpl_lzcnt_u16, iemAImpl_lzcnt_u16_amd, iemAImpl_lzcnt_u16_intel;
2734FNIEMAIMPLBINU32 iemAImpl_lzcnt_u32, iemAImpl_lzcnt_u32_amd, iemAImpl_lzcnt_u32_intel;
2735FNIEMAIMPLBINU64 iemAImpl_lzcnt_u64, iemAImpl_lzcnt_u64_amd, iemAImpl_lzcnt_u64_intel;
2736FNIEMAIMPLBINU16 iemAImpl_tzcnt_u16, iemAImpl_tzcnt_u16_amd, iemAImpl_tzcnt_u16_intel;
2737FNIEMAIMPLBINU32 iemAImpl_tzcnt_u32, iemAImpl_tzcnt_u32_amd, iemAImpl_tzcnt_u32_intel;
2738FNIEMAIMPLBINU64 iemAImpl_tzcnt_u64, iemAImpl_tzcnt_u64_amd, iemAImpl_tzcnt_u64_intel;
2739FNIEMAIMPLBINU16 iemAImpl_popcnt_u16, iemAImpl_popcnt_u16_fallback;
2740FNIEMAIMPLBINU32 iemAImpl_popcnt_u32, iemAImpl_popcnt_u32_fallback;
2741FNIEMAIMPLBINU64 iemAImpl_popcnt_u64, iemAImpl_popcnt_u64_fallback;
2742/** @} */
2743
2744/** @name Signed multiplication operations (thrown in with the binary ops).
2745 * @{ */
2746FNIEMAIMPLBINU16 iemAImpl_imul_two_u16, iemAImpl_imul_two_u16_amd, iemAImpl_imul_two_u16_intel;
2747FNIEMAIMPLBINU32 iemAImpl_imul_two_u32, iemAImpl_imul_two_u32_amd, iemAImpl_imul_two_u32_intel;
2748FNIEMAIMPLBINU64 iemAImpl_imul_two_u64, iemAImpl_imul_two_u64_amd, iemAImpl_imul_two_u64_intel;
2749/** @} */
2750
2751/** @name Arithmetic assignment operations on bytes (unary).
2752 * @{ */
2753typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU8, (uint8_t *pu8Dst, uint32_t *pEFlags));
2754typedef FNIEMAIMPLUNARYU8 *PFNIEMAIMPLUNARYU8;
2755FNIEMAIMPLUNARYU8 iemAImpl_inc_u8, iemAImpl_inc_u8_locked;
2756FNIEMAIMPLUNARYU8 iemAImpl_dec_u8, iemAImpl_dec_u8_locked;
2757FNIEMAIMPLUNARYU8 iemAImpl_not_u8, iemAImpl_not_u8_locked;
2758FNIEMAIMPLUNARYU8 iemAImpl_neg_u8, iemAImpl_neg_u8_locked;
2759/** @} */
2760
2761/** @name Arithmetic assignment operations on words (unary).
2762 * @{ */
2763typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU16, (uint16_t *pu16Dst, uint32_t *pEFlags));
2764typedef FNIEMAIMPLUNARYU16 *PFNIEMAIMPLUNARYU16;
2765FNIEMAIMPLUNARYU16 iemAImpl_inc_u16, iemAImpl_inc_u16_locked;
2766FNIEMAIMPLUNARYU16 iemAImpl_dec_u16, iemAImpl_dec_u16_locked;
2767FNIEMAIMPLUNARYU16 iemAImpl_not_u16, iemAImpl_not_u16_locked;
2768FNIEMAIMPLUNARYU16 iemAImpl_neg_u16, iemAImpl_neg_u16_locked;
2769/** @} */
2770
2771/** @name Arithmetic assignment operations on double words (unary).
2772 * @{ */
2773typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU32, (uint32_t *pu32Dst, uint32_t *pEFlags));
2774typedef FNIEMAIMPLUNARYU32 *PFNIEMAIMPLUNARYU32;
2775FNIEMAIMPLUNARYU32 iemAImpl_inc_u32, iemAImpl_inc_u32_locked;
2776FNIEMAIMPLUNARYU32 iemAImpl_dec_u32, iemAImpl_dec_u32_locked;
2777FNIEMAIMPLUNARYU32 iemAImpl_not_u32, iemAImpl_not_u32_locked;
2778FNIEMAIMPLUNARYU32 iemAImpl_neg_u32, iemAImpl_neg_u32_locked;
2779/** @} */
2780
2781/** @name Arithmetic assignment operations on quad words (unary).
2782 * @{ */
2783typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU64, (uint64_t *pu64Dst, uint32_t *pEFlags));
2784typedef FNIEMAIMPLUNARYU64 *PFNIEMAIMPLUNARYU64;
2785FNIEMAIMPLUNARYU64 iemAImpl_inc_u64, iemAImpl_inc_u64_locked;
2786FNIEMAIMPLUNARYU64 iemAImpl_dec_u64, iemAImpl_dec_u64_locked;
2787FNIEMAIMPLUNARYU64 iemAImpl_not_u64, iemAImpl_not_u64_locked;
2788FNIEMAIMPLUNARYU64 iemAImpl_neg_u64, iemAImpl_neg_u64_locked;
2789/** @} */
2790
2791
2792/** @name Shift operations on bytes (Group 2).
2793 * @{ */
2794typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU8,(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags));
2795typedef FNIEMAIMPLSHIFTU8 *PFNIEMAIMPLSHIFTU8;
2796FNIEMAIMPLSHIFTU8 iemAImpl_rol_u8, iemAImpl_rol_u8_amd, iemAImpl_rol_u8_intel;
2797FNIEMAIMPLSHIFTU8 iemAImpl_ror_u8, iemAImpl_ror_u8_amd, iemAImpl_ror_u8_intel;
2798FNIEMAIMPLSHIFTU8 iemAImpl_rcl_u8, iemAImpl_rcl_u8_amd, iemAImpl_rcl_u8_intel;
2799FNIEMAIMPLSHIFTU8 iemAImpl_rcr_u8, iemAImpl_rcr_u8_amd, iemAImpl_rcr_u8_intel;
2800FNIEMAIMPLSHIFTU8 iemAImpl_shl_u8, iemAImpl_shl_u8_amd, iemAImpl_shl_u8_intel;
2801FNIEMAIMPLSHIFTU8 iemAImpl_shr_u8, iemAImpl_shr_u8_amd, iemAImpl_shr_u8_intel;
2802FNIEMAIMPLSHIFTU8 iemAImpl_sar_u8, iemAImpl_sar_u8_amd, iemAImpl_sar_u8_intel;
2803/** @} */
2804
2805/** @name Shift operations on words (Group 2).
2806 * @{ */
2807typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU16,(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags));
2808typedef FNIEMAIMPLSHIFTU16 *PFNIEMAIMPLSHIFTU16;
2809FNIEMAIMPLSHIFTU16 iemAImpl_rol_u16, iemAImpl_rol_u16_amd, iemAImpl_rol_u16_intel;
2810FNIEMAIMPLSHIFTU16 iemAImpl_ror_u16, iemAImpl_ror_u16_amd, iemAImpl_ror_u16_intel;
2811FNIEMAIMPLSHIFTU16 iemAImpl_rcl_u16, iemAImpl_rcl_u16_amd, iemAImpl_rcl_u16_intel;
2812FNIEMAIMPLSHIFTU16 iemAImpl_rcr_u16, iemAImpl_rcr_u16_amd, iemAImpl_rcr_u16_intel;
2813FNIEMAIMPLSHIFTU16 iemAImpl_shl_u16, iemAImpl_shl_u16_amd, iemAImpl_shl_u16_intel;
2814FNIEMAIMPLSHIFTU16 iemAImpl_shr_u16, iemAImpl_shr_u16_amd, iemAImpl_shr_u16_intel;
2815FNIEMAIMPLSHIFTU16 iemAImpl_sar_u16, iemAImpl_sar_u16_amd, iemAImpl_sar_u16_intel;
2816/** @} */
2817
2818/** @name Shift operations on double words (Group 2).
2819 * @{ */
2820typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU32,(uint32_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags));
2821typedef FNIEMAIMPLSHIFTU32 *PFNIEMAIMPLSHIFTU32;
2822FNIEMAIMPLSHIFTU32 iemAImpl_rol_u32, iemAImpl_rol_u32_amd, iemAImpl_rol_u32_intel;
2823FNIEMAIMPLSHIFTU32 iemAImpl_ror_u32, iemAImpl_ror_u32_amd, iemAImpl_ror_u32_intel;
2824FNIEMAIMPLSHIFTU32 iemAImpl_rcl_u32, iemAImpl_rcl_u32_amd, iemAImpl_rcl_u32_intel;
2825FNIEMAIMPLSHIFTU32 iemAImpl_rcr_u32, iemAImpl_rcr_u32_amd, iemAImpl_rcr_u32_intel;
2826FNIEMAIMPLSHIFTU32 iemAImpl_shl_u32, iemAImpl_shl_u32_amd, iemAImpl_shl_u32_intel;
2827FNIEMAIMPLSHIFTU32 iemAImpl_shr_u32, iemAImpl_shr_u32_amd, iemAImpl_shr_u32_intel;
2828FNIEMAIMPLSHIFTU32 iemAImpl_sar_u32, iemAImpl_sar_u32_amd, iemAImpl_sar_u32_intel;
2829/** @} */
2830
2831/** @name Shift operations on words (Group 2).
2832 * @{ */
2833typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU64,(uint64_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags));
2834typedef FNIEMAIMPLSHIFTU64 *PFNIEMAIMPLSHIFTU64;
2835FNIEMAIMPLSHIFTU64 iemAImpl_rol_u64, iemAImpl_rol_u64_amd, iemAImpl_rol_u64_intel;
2836FNIEMAIMPLSHIFTU64 iemAImpl_ror_u64, iemAImpl_ror_u64_amd, iemAImpl_ror_u64_intel;
2837FNIEMAIMPLSHIFTU64 iemAImpl_rcl_u64, iemAImpl_rcl_u64_amd, iemAImpl_rcl_u64_intel;
2838FNIEMAIMPLSHIFTU64 iemAImpl_rcr_u64, iemAImpl_rcr_u64_amd, iemAImpl_rcr_u64_intel;
2839FNIEMAIMPLSHIFTU64 iemAImpl_shl_u64, iemAImpl_shl_u64_amd, iemAImpl_shl_u64_intel;
2840FNIEMAIMPLSHIFTU64 iemAImpl_shr_u64, iemAImpl_shr_u64_amd, iemAImpl_shr_u64_intel;
2841FNIEMAIMPLSHIFTU64 iemAImpl_sar_u64, iemAImpl_sar_u64_amd, iemAImpl_sar_u64_intel;
2842/** @} */
2843
2844/** @name Multiplication and division operations.
2845 * @{ */
2846typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t *pEFlags));
2847typedef FNIEMAIMPLMULDIVU8 *PFNIEMAIMPLMULDIVU8;
2848FNIEMAIMPLMULDIVU8 iemAImpl_mul_u8, iemAImpl_mul_u8_amd, iemAImpl_mul_u8_intel;
2849FNIEMAIMPLMULDIVU8 iemAImpl_imul_u8, iemAImpl_imul_u8_amd, iemAImpl_imul_u8_intel;
2850FNIEMAIMPLMULDIVU8 iemAImpl_div_u8, iemAImpl_div_u8_amd, iemAImpl_div_u8_intel;
2851FNIEMAIMPLMULDIVU8 iemAImpl_idiv_u8, iemAImpl_idiv_u8_amd, iemAImpl_idiv_u8_intel;
2852
2853typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t *pEFlags));
2854typedef FNIEMAIMPLMULDIVU16 *PFNIEMAIMPLMULDIVU16;
2855FNIEMAIMPLMULDIVU16 iemAImpl_mul_u16, iemAImpl_mul_u16_amd, iemAImpl_mul_u16_intel;
2856FNIEMAIMPLMULDIVU16 iemAImpl_imul_u16, iemAImpl_imul_u16_amd, iemAImpl_imul_u16_intel;
2857FNIEMAIMPLMULDIVU16 iemAImpl_div_u16, iemAImpl_div_u16_amd, iemAImpl_div_u16_intel;
2858FNIEMAIMPLMULDIVU16 iemAImpl_idiv_u16, iemAImpl_idiv_u16_amd, iemAImpl_idiv_u16_intel;
2859
2860typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t *pEFlags));
2861typedef FNIEMAIMPLMULDIVU32 *PFNIEMAIMPLMULDIVU32;
2862FNIEMAIMPLMULDIVU32 iemAImpl_mul_u32, iemAImpl_mul_u32_amd, iemAImpl_mul_u32_intel;
2863FNIEMAIMPLMULDIVU32 iemAImpl_imul_u32, iemAImpl_imul_u32_amd, iemAImpl_imul_u32_intel;
2864FNIEMAIMPLMULDIVU32 iemAImpl_div_u32, iemAImpl_div_u32_amd, iemAImpl_div_u32_intel;
2865FNIEMAIMPLMULDIVU32 iemAImpl_idiv_u32, iemAImpl_idiv_u32_amd, iemAImpl_idiv_u32_intel;
2866
2867typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t *pEFlags));
2868typedef FNIEMAIMPLMULDIVU64 *PFNIEMAIMPLMULDIVU64;
2869FNIEMAIMPLMULDIVU64 iemAImpl_mul_u64, iemAImpl_mul_u64_amd, iemAImpl_mul_u64_intel;
2870FNIEMAIMPLMULDIVU64 iemAImpl_imul_u64, iemAImpl_imul_u64_amd, iemAImpl_imul_u64_intel;
2871FNIEMAIMPLMULDIVU64 iemAImpl_div_u64, iemAImpl_div_u64_amd, iemAImpl_div_u64_intel;
2872FNIEMAIMPLMULDIVU64 iemAImpl_idiv_u64, iemAImpl_idiv_u64_amd, iemAImpl_idiv_u64_intel;
2873/** @} */
2874
2875/** @name Byte Swap.
2876 * @{ */
2877IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u16,(uint32_t *pu32Dst)); /* Yes, 32-bit register access. */
2878IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u32,(uint32_t *pu32Dst));
2879IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u64,(uint64_t *pu64Dst));
2880/** @} */
2881
2882/** @name Misc.
2883 * @{ */
2884FNIEMAIMPLBINU16 iemAImpl_arpl;
2885/** @} */
2886
2887/** @name RDRAND and RDSEED
2888 * @{ */
2889typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU16,(uint16_t *puDst, uint32_t *pEFlags));
2890typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU32,(uint32_t *puDst, uint32_t *pEFlags));
2891typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU64,(uint64_t *puDst, uint32_t *pEFlags));
2892typedef FNIEMAIMPLRDRANDSEEDU16 *PFNIEMAIMPLRDRANDSEEDU16;
2893typedef FNIEMAIMPLRDRANDSEEDU32 *PFNIEMAIMPLRDRANDSEEDU32;
2894typedef FNIEMAIMPLRDRANDSEEDU64 *PFNIEMAIMPLRDRANDSEEDU64;
2895
2896FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdrand_u16, iemAImpl_rdrand_u16_fallback;
2897FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdrand_u32, iemAImpl_rdrand_u32_fallback;
2898FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdrand_u64, iemAImpl_rdrand_u64_fallback;
2899FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdseed_u16, iemAImpl_rdseed_u16_fallback;
2900FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdseed_u32, iemAImpl_rdseed_u32_fallback;
2901FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdseed_u64, iemAImpl_rdseed_u64_fallback;
2902/** @} */
2903
2904/** @name ADOX and ADCX
2905 * @{ */
2906FNIEMAIMPLBINU32 iemAImpl_adcx_u32, iemAImpl_adcx_u32_fallback;
2907FNIEMAIMPLBINU64 iemAImpl_adcx_u64, iemAImpl_adcx_u64_fallback;
2908FNIEMAIMPLBINU32 iemAImpl_adox_u32, iemAImpl_adox_u32_fallback;
2909FNIEMAIMPLBINU64 iemAImpl_adox_u64, iemAImpl_adox_u64_fallback;
2910/** @} */
2911
2912/** @name FPU operations taking a 32-bit float argument
2913 * @{ */
2914typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2915 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
2916typedef FNIEMAIMPLFPUR32FSW *PFNIEMAIMPLFPUR32FSW;
2917
2918typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2919 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
2920typedef FNIEMAIMPLFPUR32 *PFNIEMAIMPLFPUR32;
2921
2922FNIEMAIMPLFPUR32FSW iemAImpl_fcom_r80_by_r32;
2923FNIEMAIMPLFPUR32 iemAImpl_fadd_r80_by_r32;
2924FNIEMAIMPLFPUR32 iemAImpl_fmul_r80_by_r32;
2925FNIEMAIMPLFPUR32 iemAImpl_fsub_r80_by_r32;
2926FNIEMAIMPLFPUR32 iemAImpl_fsubr_r80_by_r32;
2927FNIEMAIMPLFPUR32 iemAImpl_fdiv_r80_by_r32;
2928FNIEMAIMPLFPUR32 iemAImpl_fdivr_r80_by_r32;
2929
2930IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT32U pr32Val));
2931IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2932 PRTFLOAT32U pr32Val, PCRTFLOAT80U pr80Val));
2933/** @} */
2934
2935/** @name FPU operations taking a 64-bit float argument
2936 * @{ */
2937typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2938 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
2939typedef FNIEMAIMPLFPUR64FSW *PFNIEMAIMPLFPUR64FSW;
2940
2941typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2942 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
2943typedef FNIEMAIMPLFPUR64 *PFNIEMAIMPLFPUR64;
2944
2945FNIEMAIMPLFPUR64FSW iemAImpl_fcom_r80_by_r64;
2946FNIEMAIMPLFPUR64 iemAImpl_fadd_r80_by_r64;
2947FNIEMAIMPLFPUR64 iemAImpl_fmul_r80_by_r64;
2948FNIEMAIMPLFPUR64 iemAImpl_fsub_r80_by_r64;
2949FNIEMAIMPLFPUR64 iemAImpl_fsubr_r80_by_r64;
2950FNIEMAIMPLFPUR64 iemAImpl_fdiv_r80_by_r64;
2951FNIEMAIMPLFPUR64 iemAImpl_fdivr_r80_by_r64;
2952
2953IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT64U pr64Val));
2954IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2955 PRTFLOAT64U pr32Val, PCRTFLOAT80U pr80Val));
2956/** @} */
2957
2958/** @name FPU operations taking a 80-bit float argument
2959 * @{ */
2960typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2961 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2962typedef FNIEMAIMPLFPUR80 *PFNIEMAIMPLFPUR80;
2963FNIEMAIMPLFPUR80 iemAImpl_fadd_r80_by_r80;
2964FNIEMAIMPLFPUR80 iemAImpl_fmul_r80_by_r80;
2965FNIEMAIMPLFPUR80 iemAImpl_fsub_r80_by_r80;
2966FNIEMAIMPLFPUR80 iemAImpl_fsubr_r80_by_r80;
2967FNIEMAIMPLFPUR80 iemAImpl_fdiv_r80_by_r80;
2968FNIEMAIMPLFPUR80 iemAImpl_fdivr_r80_by_r80;
2969FNIEMAIMPLFPUR80 iemAImpl_fprem_r80_by_r80;
2970FNIEMAIMPLFPUR80 iemAImpl_fprem1_r80_by_r80;
2971FNIEMAIMPLFPUR80 iemAImpl_fscale_r80_by_r80;
2972
2973FNIEMAIMPLFPUR80 iemAImpl_fpatan_r80_by_r80, iemAImpl_fpatan_r80_by_r80_amd, iemAImpl_fpatan_r80_by_r80_intel;
2974FNIEMAIMPLFPUR80 iemAImpl_fyl2x_r80_by_r80, iemAImpl_fyl2x_r80_by_r80_amd, iemAImpl_fyl2x_r80_by_r80_intel;
2975FNIEMAIMPLFPUR80 iemAImpl_fyl2xp1_r80_by_r80, iemAImpl_fyl2xp1_r80_by_r80_amd, iemAImpl_fyl2xp1_r80_by_r80_intel;
2976
2977typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2978 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2979typedef FNIEMAIMPLFPUR80FSW *PFNIEMAIMPLFPUR80FSW;
2980FNIEMAIMPLFPUR80FSW iemAImpl_fcom_r80_by_r80;
2981FNIEMAIMPLFPUR80FSW iemAImpl_fucom_r80_by_r80;
2982
2983typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPUR80EFL,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
2984 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2985typedef FNIEMAIMPLFPUR80EFL *PFNIEMAIMPLFPUR80EFL;
2986FNIEMAIMPLFPUR80EFL iemAImpl_fcomi_r80_by_r80;
2987FNIEMAIMPLFPUR80EFL iemAImpl_fucomi_r80_by_r80;
2988
2989typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARY,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
2990typedef FNIEMAIMPLFPUR80UNARY *PFNIEMAIMPLFPUR80UNARY;
2991FNIEMAIMPLFPUR80UNARY iemAImpl_fabs_r80;
2992FNIEMAIMPLFPUR80UNARY iemAImpl_fchs_r80;
2993FNIEMAIMPLFPUR80UNARY iemAImpl_f2xm1_r80, iemAImpl_f2xm1_r80_amd, iemAImpl_f2xm1_r80_intel;
2994FNIEMAIMPLFPUR80UNARY iemAImpl_fsqrt_r80;
2995FNIEMAIMPLFPUR80UNARY iemAImpl_frndint_r80;
2996FNIEMAIMPLFPUR80UNARY iemAImpl_fsin_r80, iemAImpl_fsin_r80_amd, iemAImpl_fsin_r80_intel;
2997FNIEMAIMPLFPUR80UNARY iemAImpl_fcos_r80, iemAImpl_fcos_r80_amd, iemAImpl_fcos_r80_intel;
2998
2999typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYFSW,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw, PCRTFLOAT80U pr80Val));
3000typedef FNIEMAIMPLFPUR80UNARYFSW *PFNIEMAIMPLFPUR80UNARYFSW;
3001FNIEMAIMPLFPUR80UNARYFSW iemAImpl_ftst_r80;
3002FNIEMAIMPLFPUR80UNARYFSW iemAImpl_fxam_r80;
3003
3004typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80LDCONST,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes));
3005typedef FNIEMAIMPLFPUR80LDCONST *PFNIEMAIMPLFPUR80LDCONST;
3006FNIEMAIMPLFPUR80LDCONST iemAImpl_fld1;
3007FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2t;
3008FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2e;
3009FNIEMAIMPLFPUR80LDCONST iemAImpl_fldpi;
3010FNIEMAIMPLFPUR80LDCONST iemAImpl_fldlg2;
3011FNIEMAIMPLFPUR80LDCONST iemAImpl_fldln2;
3012FNIEMAIMPLFPUR80LDCONST iemAImpl_fldz;
3013
3014typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYTWO,(PCX86FXSTATE pFpuState, PIEMFPURESULTTWO pFpuResTwo,
3015 PCRTFLOAT80U pr80Val));
3016typedef FNIEMAIMPLFPUR80UNARYTWO *PFNIEMAIMPLFPUR80UNARYTWO;
3017FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fptan_r80_r80, iemAImpl_fptan_r80_r80_amd, iemAImpl_fptan_r80_r80_intel;
3018FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fxtract_r80_r80;
3019FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fsincos_r80_r80, iemAImpl_fsincos_r80_r80_amd, iemAImpl_fsincos_r80_r80_intel;
3020
3021IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
3022IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
3023 PRTFLOAT80U pr80Dst, PCRTFLOAT80U pr80Src));
3024
3025IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_d80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTPBCD80U pd80Val));
3026IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_d80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
3027 PRTPBCD80U pd80Dst, PCRTFLOAT80U pr80Src));
3028
3029/** @} */
3030
3031/** @name FPU operations taking a 16-bit signed integer argument
3032 * @{ */
3033typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3034 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
3035typedef FNIEMAIMPLFPUI16 *PFNIEMAIMPLFPUI16;
3036typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI16,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
3037 int16_t *pi16Dst, PCRTFLOAT80U pr80Src));
3038typedef FNIEMAIMPLFPUSTR80TOI16 *PFNIEMAIMPLFPUSTR80TOI16;
3039
3040FNIEMAIMPLFPUI16 iemAImpl_fiadd_r80_by_i16;
3041FNIEMAIMPLFPUI16 iemAImpl_fimul_r80_by_i16;
3042FNIEMAIMPLFPUI16 iemAImpl_fisub_r80_by_i16;
3043FNIEMAIMPLFPUI16 iemAImpl_fisubr_r80_by_i16;
3044FNIEMAIMPLFPUI16 iemAImpl_fidiv_r80_by_i16;
3045FNIEMAIMPLFPUI16 iemAImpl_fidivr_r80_by_i16;
3046
3047typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3048 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
3049typedef FNIEMAIMPLFPUI16FSW *PFNIEMAIMPLFPUI16FSW;
3050FNIEMAIMPLFPUI16FSW iemAImpl_ficom_r80_by_i16;
3051
3052IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int16_t const *pi16Val));
3053FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fist_r80_to_i16;
3054FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fistt_r80_to_i16, iemAImpl_fistt_r80_to_i16_amd, iemAImpl_fistt_r80_to_i16_intel;
3055/** @} */
3056
3057/** @name FPU operations taking a 32-bit signed integer argument
3058 * @{ */
3059typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3060 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
3061typedef FNIEMAIMPLFPUI32 *PFNIEMAIMPLFPUI32;
3062typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI32,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
3063 int32_t *pi32Dst, PCRTFLOAT80U pr80Src));
3064typedef FNIEMAIMPLFPUSTR80TOI32 *PFNIEMAIMPLFPUSTR80TOI32;
3065
3066FNIEMAIMPLFPUI32 iemAImpl_fiadd_r80_by_i32;
3067FNIEMAIMPLFPUI32 iemAImpl_fimul_r80_by_i32;
3068FNIEMAIMPLFPUI32 iemAImpl_fisub_r80_by_i32;
3069FNIEMAIMPLFPUI32 iemAImpl_fisubr_r80_by_i32;
3070FNIEMAIMPLFPUI32 iemAImpl_fidiv_r80_by_i32;
3071FNIEMAIMPLFPUI32 iemAImpl_fidivr_r80_by_i32;
3072
3073typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3074 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
3075typedef FNIEMAIMPLFPUI32FSW *PFNIEMAIMPLFPUI32FSW;
3076FNIEMAIMPLFPUI32FSW iemAImpl_ficom_r80_by_i32;
3077
3078IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int32_t const *pi32Val));
3079FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fist_r80_to_i32;
3080FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fistt_r80_to_i32;
3081/** @} */
3082
3083/** @name FPU operations taking a 64-bit signed integer argument
3084 * @{ */
3085typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI64,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
3086 int64_t *pi64Dst, PCRTFLOAT80U pr80Src));
3087typedef FNIEMAIMPLFPUSTR80TOI64 *PFNIEMAIMPLFPUSTR80TOI64;
3088
3089IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int64_t const *pi64Val));
3090FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fist_r80_to_i64;
3091FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fistt_r80_to_i64;
3092/** @} */
3093
3094
3095/** Temporary type representing a 256-bit vector register. */
3096typedef struct { uint64_t au64[4]; } IEMVMM256;
3097/** Temporary type pointing to a 256-bit vector register. */
3098typedef IEMVMM256 *PIEMVMM256;
3099/** Temporary type pointing to a const 256-bit vector register. */
3100typedef IEMVMM256 *PCIEMVMM256;
3101
3102
3103/** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1.
3104 * @{ */
3105typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *puDst, uint64_t const *puSrc));
3106typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF2U64;
3107typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U128,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc));
3108typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF2U128;
3109typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U128,(PX86XSAVEAREA pExtState, PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
3110typedef FNIEMAIMPLMEDIAF3U128 *PFNIEMAIMPLMEDIAF3U128;
3111typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U256,(PX86XSAVEAREA pExtState, PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
3112typedef FNIEMAIMPLMEDIAF3U256 *PFNIEMAIMPLMEDIAF3U256;
3113typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U64,(uint64_t *puDst, uint64_t const *puSrc));
3114typedef FNIEMAIMPLMEDIAOPTF2U64 *PFNIEMAIMPLMEDIAOPTF2U64;
3115typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128,(PRTUINT128U puDst, PCRTUINT128U puSrc));
3116typedef FNIEMAIMPLMEDIAOPTF2U128 *PFNIEMAIMPLMEDIAOPTF2U128;
3117typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
3118typedef FNIEMAIMPLMEDIAOPTF3U128 *PFNIEMAIMPLMEDIAOPTF3U128;
3119typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
3120typedef FNIEMAIMPLMEDIAOPTF3U256 *PFNIEMAIMPLMEDIAOPTF3U256;
3121typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256,(PRTUINT256U puDst, PCRTUINT256U puSrc));
3122typedef FNIEMAIMPLMEDIAOPTF2U256 *PFNIEMAIMPLMEDIAOPTF2U256;
3123FNIEMAIMPLMEDIAF2U64 iemAImpl_pshufb_u64, iemAImpl_pshufb_u64_fallback;
3124FNIEMAIMPLMEDIAF2U64 iemAImpl_pand_u64, iemAImpl_pandn_u64, iemAImpl_por_u64, iemAImpl_pxor_u64;
3125FNIEMAIMPLMEDIAF2U64 iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqd_u64;
3126FNIEMAIMPLMEDIAF2U64 iemAImpl_pcmpgtb_u64, iemAImpl_pcmpgtw_u64, iemAImpl_pcmpgtd_u64;
3127FNIEMAIMPLMEDIAF2U64 iemAImpl_paddb_u64, iemAImpl_paddsb_u64, iemAImpl_paddusb_u64;
3128FNIEMAIMPLMEDIAF2U64 iemAImpl_paddw_u64, iemAImpl_paddsw_u64, iemAImpl_paddusw_u64;
3129FNIEMAIMPLMEDIAF2U64 iemAImpl_paddd_u64;
3130FNIEMAIMPLMEDIAF2U64 iemAImpl_paddq_u64;
3131FNIEMAIMPLMEDIAF2U64 iemAImpl_psubb_u64, iemAImpl_psubsb_u64, iemAImpl_psubusb_u64;
3132FNIEMAIMPLMEDIAF2U64 iemAImpl_psubw_u64, iemAImpl_psubsw_u64, iemAImpl_psubusw_u64;
3133FNIEMAIMPLMEDIAF2U64 iemAImpl_psubd_u64;
3134FNIEMAIMPLMEDIAF2U64 iemAImpl_psubq_u64;
3135FNIEMAIMPLMEDIAF2U64 iemAImpl_pmaddwd_u64, iemAImpl_pmaddwd_u64_fallback;
3136FNIEMAIMPLMEDIAF2U64 iemAImpl_pmullw_u64, iemAImpl_pmulhw_u64;
3137FNIEMAIMPLMEDIAF2U64 iemAImpl_pminub_u64, iemAImpl_pmaxub_u64;
3138FNIEMAIMPLMEDIAF2U64 iemAImpl_pminsw_u64, iemAImpl_pmaxsw_u64;
3139FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsb_u64, iemAImpl_pabsb_u64_fallback;
3140FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsw_u64, iemAImpl_pabsw_u64_fallback;
3141FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsd_u64, iemAImpl_pabsd_u64_fallback;
3142FNIEMAIMPLMEDIAF2U64 iemAImpl_psignb_u64, iemAImpl_psignb_u64_fallback;
3143FNIEMAIMPLMEDIAF2U64 iemAImpl_psignw_u64, iemAImpl_psignw_u64_fallback;
3144FNIEMAIMPLMEDIAF2U64 iemAImpl_psignd_u64, iemAImpl_psignd_u64_fallback;
3145FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddw_u64, iemAImpl_phaddw_u64_fallback;
3146FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddd_u64, iemAImpl_phaddd_u64_fallback;
3147FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubw_u64, iemAImpl_phsubw_u64_fallback;
3148FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubd_u64, iemAImpl_phsubd_u64_fallback;
3149FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddsw_u64, iemAImpl_phaddsw_u64_fallback;
3150FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubsw_u64, iemAImpl_phsubsw_u64_fallback;
3151FNIEMAIMPLMEDIAF2U64 iemAImpl_pmaddubsw_u64, iemAImpl_pmaddubsw_u64_fallback;
3152FNIEMAIMPLMEDIAF2U64 iemAImpl_pmulhrsw_u64, iemAImpl_pmulhrsw_u64_fallback;
3153FNIEMAIMPLMEDIAF2U64 iemAImpl_pmuludq_u64;
3154FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllw_u64, iemAImpl_psrlw_u64, iemAImpl_psraw_u64;
3155FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pslld_u64, iemAImpl_psrld_u64, iemAImpl_psrad_u64;
3156FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllq_u64, iemAImpl_psrlq_u64;
3157FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packsswb_u64, iemAImpl_packuswb_u64;
3158FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packssdw_u64;
3159FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmulhuw_u64;
3160FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pavgb_u64, iemAImpl_pavgw_u64;
3161FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psadbw_u64;
3162
3163FNIEMAIMPLMEDIAF2U128 iemAImpl_pshufb_u128, iemAImpl_pshufb_u128_fallback;
3164FNIEMAIMPLMEDIAF2U128 iemAImpl_pand_u128, iemAImpl_pandn_u128, iemAImpl_por_u128, iemAImpl_pxor_u128;
3165FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpeqb_u128, iemAImpl_pcmpeqw_u128, iemAImpl_pcmpeqd_u128;
3166FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpeqq_u128, iemAImpl_pcmpeqq_u128_fallback;
3167FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpgtb_u128, iemAImpl_pcmpgtw_u128, iemAImpl_pcmpgtd_u128;
3168FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpgtq_u128, iemAImpl_pcmpgtq_u128_fallback;
3169FNIEMAIMPLMEDIAF2U128 iemAImpl_paddb_u128, iemAImpl_paddsb_u128, iemAImpl_paddusb_u128;
3170FNIEMAIMPLMEDIAF2U128 iemAImpl_paddw_u128, iemAImpl_paddsw_u128, iemAImpl_paddusw_u128;
3171FNIEMAIMPLMEDIAF2U128 iemAImpl_paddd_u128;
3172FNIEMAIMPLMEDIAF2U128 iemAImpl_paddq_u128;
3173FNIEMAIMPLMEDIAF2U128 iemAImpl_psubb_u128, iemAImpl_psubsb_u128, iemAImpl_psubusb_u128;
3174FNIEMAIMPLMEDIAF2U128 iemAImpl_psubw_u128, iemAImpl_psubsw_u128, iemAImpl_psubusw_u128;
3175FNIEMAIMPLMEDIAF2U128 iemAImpl_psubd_u128;
3176FNIEMAIMPLMEDIAF2U128 iemAImpl_psubq_u128;
3177FNIEMAIMPLMEDIAF2U128 iemAImpl_pmullw_u128, iemAImpl_pmullw_u128_fallback;
3178FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulhw_u128;
3179FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulld_u128, iemAImpl_pmulld_u128_fallback;
3180FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaddwd_u128, iemAImpl_pmaddwd_u128_fallback;
3181FNIEMAIMPLMEDIAF2U128 iemAImpl_pminub_u128;
3182FNIEMAIMPLMEDIAF2U128 iemAImpl_pminud_u128, iemAImpl_pminud_u128_fallback;
3183FNIEMAIMPLMEDIAF2U128 iemAImpl_pminuw_u128, iemAImpl_pminuw_u128_fallback;
3184FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsb_u128, iemAImpl_pminsb_u128_fallback;
3185FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsd_u128, iemAImpl_pminsd_u128_fallback;
3186FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsw_u128, iemAImpl_pminsw_u128_fallback;
3187FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxub_u128;
3188FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxud_u128, iemAImpl_pmaxud_u128_fallback;
3189FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxuw_u128, iemAImpl_pmaxuw_u128_fallback;
3190FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsb_u128, iemAImpl_pmaxsb_u128_fallback;
3191FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsw_u128;
3192FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsd_u128, iemAImpl_pmaxsd_u128_fallback;
3193FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsb_u128, iemAImpl_pabsb_u128_fallback;
3194FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsw_u128, iemAImpl_pabsw_u128_fallback;
3195FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsd_u128, iemAImpl_pabsd_u128_fallback;
3196FNIEMAIMPLMEDIAF2U128 iemAImpl_psignb_u128, iemAImpl_psignb_u128_fallback;
3197FNIEMAIMPLMEDIAF2U128 iemAImpl_psignw_u128, iemAImpl_psignw_u128_fallback;
3198FNIEMAIMPLMEDIAF2U128 iemAImpl_psignd_u128, iemAImpl_psignd_u128_fallback;
3199FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddw_u128, iemAImpl_phaddw_u128_fallback;
3200FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddd_u128, iemAImpl_phaddd_u128_fallback;
3201FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubw_u128, iemAImpl_phsubw_u128_fallback;
3202FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubd_u128, iemAImpl_phsubd_u128_fallback;
3203FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddsw_u128, iemAImpl_phaddsw_u128_fallback;
3204FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubsw_u128, iemAImpl_phsubsw_u128_fallback;
3205FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaddubsw_u128, iemAImpl_pmaddubsw_u128_fallback;
3206FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulhrsw_u128, iemAImpl_pmulhrsw_u128_fallback;
3207FNIEMAIMPLMEDIAF2U128 iemAImpl_pmuludq_u128;
3208FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaddwd_u128, iemAImpl_pmaddwd_u128_fallback;
3209FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packsswb_u128, iemAImpl_packuswb_u128;
3210FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packssdw_u128, iemAImpl_packusdw_u128;
3211FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllw_u128, iemAImpl_psrlw_u128, iemAImpl_psraw_u128;
3212FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pslld_u128, iemAImpl_psrld_u128, iemAImpl_psrad_u128;
3213FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllq_u128, iemAImpl_psrlq_u128;
3214FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhuw_u128;
3215FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pavgb_u128, iemAImpl_pavgw_u128;
3216FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psadbw_u128;
3217FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuldq_u128, iemAImpl_pmuldq_u128_fallback;
3218FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpcklps_u128, iemAImpl_unpcklpd_u128;
3219FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpckhps_u128, iemAImpl_unpckhpd_u128;
3220FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phminposuw_u128, iemAImpl_phminposuw_u128_fallback;
3221
3222FNIEMAIMPLMEDIAF3U128 iemAImpl_vpshufb_u128, iemAImpl_vpshufb_u128_fallback;
3223FNIEMAIMPLMEDIAF3U128 iemAImpl_vpand_u128, iemAImpl_vpand_u128_fallback;
3224FNIEMAIMPLMEDIAF3U128 iemAImpl_vpandn_u128, iemAImpl_vpandn_u128_fallback;
3225FNIEMAIMPLMEDIAF3U128 iemAImpl_vpor_u128, iemAImpl_vpor_u128_fallback;
3226FNIEMAIMPLMEDIAF3U128 iemAImpl_vpxor_u128, iemAImpl_vpxor_u128_fallback;
3227FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqb_u128, iemAImpl_vpcmpeqb_u128_fallback;
3228FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqw_u128, iemAImpl_vpcmpeqw_u128_fallback;
3229FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqd_u128, iemAImpl_vpcmpeqd_u128_fallback;
3230FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqq_u128, iemAImpl_vpcmpeqq_u128_fallback;
3231FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtb_u128, iemAImpl_vpcmpgtb_u128_fallback;
3232FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtw_u128, iemAImpl_vpcmpgtw_u128_fallback;
3233FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtd_u128, iemAImpl_vpcmpgtd_u128_fallback;
3234FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtq_u128, iemAImpl_vpcmpgtq_u128_fallback;
3235FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddb_u128, iemAImpl_vpaddb_u128_fallback;
3236FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddw_u128, iemAImpl_vpaddw_u128_fallback;
3237FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddd_u128, iemAImpl_vpaddd_u128_fallback;
3238FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddq_u128, iemAImpl_vpaddq_u128_fallback;
3239FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubb_u128, iemAImpl_vpsubb_u128_fallback;
3240FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubw_u128, iemAImpl_vpsubw_u128_fallback;
3241FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubd_u128, iemAImpl_vpsubd_u128_fallback;
3242FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubq_u128, iemAImpl_vpsubq_u128_fallback;
3243FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminub_u128, iemAImpl_vpminub_u128_fallback;
3244FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminuw_u128, iemAImpl_vpminuw_u128_fallback;
3245FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminud_u128, iemAImpl_vpminud_u128_fallback;
3246FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsb_u128, iemAImpl_vpminsb_u128_fallback;
3247FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsw_u128, iemAImpl_vpminsw_u128_fallback;
3248FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsd_u128, iemAImpl_vpminsd_u128_fallback;
3249FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxub_u128, iemAImpl_vpmaxub_u128_fallback;
3250FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxuw_u128, iemAImpl_vpmaxuw_u128_fallback;
3251FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxud_u128, iemAImpl_vpmaxud_u128_fallback;
3252FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsb_u128, iemAImpl_vpmaxsb_u128_fallback;
3253FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsw_u128, iemAImpl_vpmaxsw_u128_fallback;
3254FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsd_u128, iemAImpl_vpmaxsd_u128_fallback;
3255FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpacksswb_u128, iemAImpl_vpacksswb_u128_fallback;
3256FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackssdw_u128, iemAImpl_vpackssdw_u128_fallback;
3257FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackuswb_u128, iemAImpl_vpackuswb_u128_fallback;
3258FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackusdw_u128, iemAImpl_vpackusdw_u128_fallback;
3259FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmullw_u128, iemAImpl_vpmullw_u128_fallback;
3260FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulld_u128, iemAImpl_vpmulld_u128_fallback;
3261FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhw_u128, iemAImpl_vpmulhw_u128_fallback;
3262FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhuw_u128, iemAImpl_vpmulhuw_u128_fallback;
3263FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgb_u128, iemAImpl_vpavgb_u128_fallback;
3264FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgw_u128, iemAImpl_vpavgw_u128_fallback;
3265FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignb_u128, iemAImpl_vpsignb_u128_fallback;
3266FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignw_u128, iemAImpl_vpsignw_u128_fallback;
3267FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignd_u128, iemAImpl_vpsignd_u128_fallback;
3268FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddw_u128, iemAImpl_vphaddw_u128_fallback;
3269FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddd_u128, iemAImpl_vphaddd_u128_fallback;
3270FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubw_u128, iemAImpl_vphsubw_u128_fallback;
3271FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubd_u128, iemAImpl_vphsubd_u128_fallback;
3272FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddsw_u128, iemAImpl_vphaddsw_u128_fallback;
3273FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubsw_u128, iemAImpl_vphsubsw_u128_fallback;
3274FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddubsw_u128, iemAImpl_vpmaddubsw_u128_fallback;
3275FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhrsw_u128, iemAImpl_vpmulhrsw_u128_fallback;
3276FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsadbw_u128, iemAImpl_vpsadbw_u128_fallback;
3277FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuldq_u128, iemAImpl_vpmuldq_u128_fallback;
3278FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuludq_u128, iemAImpl_vpmuludq_u128_fallback;
3279FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsb_u128, iemAImpl_vpsubsb_u128_fallback;
3280FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsw_u128, iemAImpl_vpsubsw_u128_fallback;
3281FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusb_u128, iemAImpl_vpsubusb_u128_fallback;
3282FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusw_u128, iemAImpl_vpsubusw_u128_fallback;
3283FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusb_u128, iemAImpl_vpaddusb_u128_fallback;
3284FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusw_u128, iemAImpl_vpaddusw_u128_fallback;
3285FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsb_u128, iemAImpl_vpaddsb_u128_fallback;
3286FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsw_u128, iemAImpl_vpaddsw_u128_fallback;
3287FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllw_u128, iemAImpl_vpsllw_u128_fallback;
3288FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpslld_u128, iemAImpl_vpslld_u128_fallback;
3289FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllq_u128, iemAImpl_vpsllq_u128_fallback;
3290FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsraw_u128, iemAImpl_vpsraw_u128_fallback;
3291FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrad_u128, iemAImpl_vpsrad_u128_fallback;
3292FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlw_u128, iemAImpl_vpsrlw_u128_fallback;
3293FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrld_u128, iemAImpl_vpsrld_u128_fallback;
3294FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlq_u128, iemAImpl_vpsrlq_u128_fallback;
3295FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddwd_u128, iemAImpl_vpmaddwd_u128_fallback;
3296
3297FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsb_u128, iemAImpl_vpabsb_u128_fallback;
3298FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsw_u128, iemAImpl_vpabsd_u128_fallback;
3299FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsd_u128, iemAImpl_vpabsw_u128_fallback;
3300FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vphminposuw_u128, iemAImpl_vphminposuw_u128_fallback;
3301
3302FNIEMAIMPLMEDIAF3U256 iemAImpl_vpshufb_u256, iemAImpl_vpshufb_u256_fallback;
3303FNIEMAIMPLMEDIAF3U256 iemAImpl_vpand_u256, iemAImpl_vpand_u256_fallback;
3304FNIEMAIMPLMEDIAF3U256 iemAImpl_vpandn_u256, iemAImpl_vpandn_u256_fallback;
3305FNIEMAIMPLMEDIAF3U256 iemAImpl_vpor_u256, iemAImpl_vpor_u256_fallback;
3306FNIEMAIMPLMEDIAF3U256 iemAImpl_vpxor_u256, iemAImpl_vpxor_u256_fallback;
3307FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqb_u256, iemAImpl_vpcmpeqb_u256_fallback;
3308FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqw_u256, iemAImpl_vpcmpeqw_u256_fallback;
3309FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqd_u256, iemAImpl_vpcmpeqd_u256_fallback;
3310FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqq_u256, iemAImpl_vpcmpeqq_u256_fallback;
3311FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtb_u256, iemAImpl_vpcmpgtb_u256_fallback;
3312FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtw_u256, iemAImpl_vpcmpgtw_u256_fallback;
3313FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtd_u256, iemAImpl_vpcmpgtd_u256_fallback;
3314FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtq_u256, iemAImpl_vpcmpgtq_u256_fallback;
3315FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddb_u256, iemAImpl_vpaddb_u256_fallback;
3316FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddw_u256, iemAImpl_vpaddw_u256_fallback;
3317FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddd_u256, iemAImpl_vpaddd_u256_fallback;
3318FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddq_u256, iemAImpl_vpaddq_u256_fallback;
3319FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubb_u256, iemAImpl_vpsubb_u256_fallback;
3320FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubw_u256, iemAImpl_vpsubw_u256_fallback;
3321FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubd_u256, iemAImpl_vpsubd_u256_fallback;
3322FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubq_u256, iemAImpl_vpsubq_u256_fallback;
3323FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminub_u256, iemAImpl_vpminub_u256_fallback;
3324FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminuw_u256, iemAImpl_vpminuw_u256_fallback;
3325FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminud_u256, iemAImpl_vpminud_u256_fallback;
3326FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsb_u256, iemAImpl_vpminsb_u256_fallback;
3327FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsw_u256, iemAImpl_vpminsw_u256_fallback;
3328FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsd_u256, iemAImpl_vpminsd_u256_fallback;
3329FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxub_u256, iemAImpl_vpmaxub_u256_fallback;
3330FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxuw_u256, iemAImpl_vpmaxuw_u256_fallback;
3331FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxud_u256, iemAImpl_vpmaxud_u256_fallback;
3332FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsb_u256, iemAImpl_vpmaxsb_u256_fallback;
3333FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsw_u256, iemAImpl_vpmaxsw_u256_fallback;
3334FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsd_u256, iemAImpl_vpmaxsd_u256_fallback;
3335FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpacksswb_u256, iemAImpl_vpacksswb_u256_fallback;
3336FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackssdw_u256, iemAImpl_vpackssdw_u256_fallback;
3337FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackuswb_u256, iemAImpl_vpackuswb_u256_fallback;
3338FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackusdw_u256, iemAImpl_vpackusdw_u256_fallback;
3339FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmullw_u256, iemAImpl_vpmullw_u256_fallback;
3340FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulld_u256, iemAImpl_vpmulld_u256_fallback;
3341FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhw_u256, iemAImpl_vpmulhw_u256_fallback;
3342FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhuw_u256, iemAImpl_vpmulhuw_u256_fallback;
3343FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgb_u256, iemAImpl_vpavgb_u256_fallback;
3344FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgw_u256, iemAImpl_vpavgw_u256_fallback;
3345FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignb_u256, iemAImpl_vpsignb_u256_fallback;
3346FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignw_u256, iemAImpl_vpsignw_u256_fallback;
3347FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignd_u256, iemAImpl_vpsignd_u256_fallback;
3348FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddw_u256, iemAImpl_vphaddw_u256_fallback;
3349FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddd_u256, iemAImpl_vphaddd_u256_fallback;
3350FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubw_u256, iemAImpl_vphsubw_u256_fallback;
3351FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubd_u256, iemAImpl_vphsubd_u256_fallback;
3352FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddsw_u256, iemAImpl_vphaddsw_u256_fallback;
3353FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubsw_u256, iemAImpl_vphsubsw_u256_fallback;
3354FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddubsw_u256, iemAImpl_vpmaddubsw_u256_fallback;
3355FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhrsw_u256, iemAImpl_vpmulhrsw_u256_fallback;
3356FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsadbw_u256, iemAImpl_vpsadbw_u256_fallback;
3357FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuldq_u256, iemAImpl_vpmuldq_u256_fallback;
3358FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuludq_u256, iemAImpl_vpmuludq_u256_fallback;
3359FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsb_u256, iemAImpl_vpsubsb_u256_fallback;
3360FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsw_u256, iemAImpl_vpsubsw_u256_fallback;
3361FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusb_u256, iemAImpl_vpsubusb_u256_fallback;
3362FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusw_u256, iemAImpl_vpsubusw_u256_fallback;
3363FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusb_u256, iemAImpl_vpaddusb_u256_fallback;
3364FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusw_u256, iemAImpl_vpaddusw_u256_fallback;
3365FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsb_u256, iemAImpl_vpaddsb_u256_fallback;
3366FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsw_u256, iemAImpl_vpaddsw_u256_fallback;
3367FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllw_u256, iemAImpl_vpsllw_u256_fallback;
3368FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpslld_u256, iemAImpl_vpslld_u256_fallback;
3369FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllq_u256, iemAImpl_vpsllq_u256_fallback;
3370FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsraw_u256, iemAImpl_vpsraw_u256_fallback;
3371FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrad_u256, iemAImpl_vpsrad_u256_fallback;
3372FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlw_u256, iemAImpl_vpsrlw_u256_fallback;
3373FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrld_u256, iemAImpl_vpsrld_u256_fallback;
3374FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlq_u256, iemAImpl_vpsrlq_u256_fallback;
3375FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddwd_u256, iemAImpl_vpmaddwd_u256_fallback;
3376
3377FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsb_u256, iemAImpl_vpabsb_u256_fallback;
3378FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsw_u256, iemAImpl_vpabsw_u256_fallback;
3379FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsd_u256, iemAImpl_vpabsd_u256_fallback;
3380/** @} */
3381
3382/** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1.
3383 * @{ */
3384FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpcklbw_u64, iemAImpl_punpcklwd_u64, iemAImpl_punpckldq_u64;
3385FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128;
3386FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpcklbw_u128, iemAImpl_vpunpcklbw_u128_fallback,
3387 iemAImpl_vpunpcklwd_u128, iemAImpl_vpunpcklwd_u128_fallback,
3388 iemAImpl_vpunpckldq_u128, iemAImpl_vpunpckldq_u128_fallback,
3389 iemAImpl_vpunpcklqdq_u128, iemAImpl_vpunpcklqdq_u128_fallback,
3390 iemAImpl_vunpcklps_u128, iemAImpl_vunpcklps_u128_fallback,
3391 iemAImpl_vunpcklpd_u128, iemAImpl_vunpcklpd_u128_fallback,
3392 iemAImpl_vunpckhps_u128, iemAImpl_vunpckhps_u128_fallback,
3393 iemAImpl_vunpckhpd_u128, iemAImpl_vunpckhpd_u128_fallback;
3394
3395FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpcklbw_u256, iemAImpl_vpunpcklbw_u256_fallback,
3396 iemAImpl_vpunpcklwd_u256, iemAImpl_vpunpcklwd_u256_fallback,
3397 iemAImpl_vpunpckldq_u256, iemAImpl_vpunpckldq_u256_fallback,
3398 iemAImpl_vpunpcklqdq_u256, iemAImpl_vpunpcklqdq_u256_fallback,
3399 iemAImpl_vunpcklps_u256, iemAImpl_vunpcklps_u256_fallback,
3400 iemAImpl_vunpcklpd_u256, iemAImpl_vunpcklpd_u256_fallback,
3401 iemAImpl_vunpckhps_u256, iemAImpl_vunpckhps_u256_fallback,
3402 iemAImpl_vunpckhpd_u256, iemAImpl_vunpckhpd_u256_fallback;
3403/** @} */
3404
3405/** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1.
3406 * @{ */
3407FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64;
3408FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128;
3409FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpckhbw_u128, iemAImpl_vpunpckhbw_u128_fallback,
3410 iemAImpl_vpunpckhwd_u128, iemAImpl_vpunpckhwd_u128_fallback,
3411 iemAImpl_vpunpckhdq_u128, iemAImpl_vpunpckhdq_u128_fallback,
3412 iemAImpl_vpunpckhqdq_u128, iemAImpl_vpunpckhqdq_u128_fallback;
3413FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpckhbw_u256, iemAImpl_vpunpckhbw_u256_fallback,
3414 iemAImpl_vpunpckhwd_u256, iemAImpl_vpunpckhwd_u256_fallback,
3415 iemAImpl_vpunpckhdq_u256, iemAImpl_vpunpckhdq_u256_fallback,
3416 iemAImpl_vpunpckhqdq_u256, iemAImpl_vpunpckhqdq_u256_fallback;
3417/** @} */
3418
3419/** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil)
3420 * @{ */
3421typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3422typedef FNIEMAIMPLMEDIAPSHUFU128 *PFNIEMAIMPLMEDIAPSHUFU128;
3423typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
3424typedef FNIEMAIMPLMEDIAPSHUFU256 *PFNIEMAIMPLMEDIAPSHUFU256;
3425IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw_u64,(uint64_t *puDst, uint64_t const *puSrc, uint8_t bEvil));
3426FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_pshufhw_u128, iemAImpl_pshuflw_u128, iemAImpl_pshufd_u128;
3427#ifndef IEM_WITHOUT_ASSEMBLY
3428FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256, iemAImpl_vpshuflw_u256, iemAImpl_vpshufd_u256;
3429#endif
3430FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256_fallback, iemAImpl_vpshuflw_u256_fallback, iemAImpl_vpshufd_u256_fallback;
3431/** @} */
3432
3433/** @name Media (SSE/MMX/AVX) operation: Shift Immediate Stuff (evil)
3434 * @{ */
3435typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU64,(uint64_t *puDst, uint8_t bShift));
3436typedef FNIEMAIMPLMEDIAPSHIFTU64 *PFNIEMAIMPLMEDIAPSHIFTU64;
3437typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU128,(PRTUINT128U puDst, uint8_t bShift));
3438typedef FNIEMAIMPLMEDIAPSHIFTU128 *PFNIEMAIMPLMEDIAPSHIFTU128;
3439typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU256,(PRTUINT256U puDst, uint8_t bShift));
3440typedef FNIEMAIMPLMEDIAPSHIFTU256 *PFNIEMAIMPLMEDIAPSHIFTU256;
3441FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psllw_imm_u64, iemAImpl_pslld_imm_u64, iemAImpl_psllq_imm_u64;
3442FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psrlw_imm_u64, iemAImpl_psrld_imm_u64, iemAImpl_psrlq_imm_u64;
3443FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psraw_imm_u64, iemAImpl_psrad_imm_u64;
3444FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psllw_imm_u128, iemAImpl_pslld_imm_u128, iemAImpl_psllq_imm_u128;
3445FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psrlw_imm_u128, iemAImpl_psrld_imm_u128, iemAImpl_psrlq_imm_u128;
3446FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psraw_imm_u128, iemAImpl_psrad_imm_u128;
3447FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_pslldq_imm_u128, iemAImpl_psrldq_imm_u128;
3448/** @} */
3449
3450/** @name Media (SSE/MMX/AVX) operation: Move Byte Mask
3451 * @{ */
3452IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u64,(uint64_t *pu64Dst, uint64_t const *puSrc));
3453IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u128,(uint64_t *pu64Dst, PCRTUINT128U puSrc));
3454#ifndef IEM_WITHOUT_ASSEMBLY
3455IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3456#endif
3457IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256_fallback,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3458/** @} */
3459
3460/** @name Media (SSE/MMX/AVX) operations: Variable Blend Packed Bytes/R32/R64.
3461 * @{ */
3462typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puMask));
3463typedef FNIEMAIMPLBLENDU128 *PFNIEMAIMPLBLENDU128;
3464typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, PCRTUINT128U puMask));
3465typedef FNIEMAIMPLAVXBLENDU128 *PFNIEMAIMPLAVXBLENDU128;
3466typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, PCRTUINT256U puMask));
3467typedef FNIEMAIMPLAVXBLENDU256 *PFNIEMAIMPLAVXBLENDU256;
3468
3469FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128;
3470FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128_fallback;
3471FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128;
3472FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128_fallback;
3473FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256;
3474FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256_fallback;
3475
3476FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128;
3477FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128_fallback;
3478FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128;
3479FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128_fallback;
3480FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256;
3481FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256_fallback;
3482
3483FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128;
3484FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128_fallback;
3485FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128;
3486FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128_fallback;
3487FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256;
3488FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256_fallback;
3489/** @} */
3490
3491
3492/** @name Media (SSE/MMX/AVX) operation: Sort this later
3493 * @{ */
3494IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3495IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3496IEM_DECL_IMPL_DEF(void, iemAImpl_vmovshdup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3497IEM_DECL_IMPL_DEF(void, iemAImpl_vmovshdup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3498IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3499IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3500
3501IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3502IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3503IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3504IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3505IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3506
3507IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3508IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3509IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3510IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3511IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3512
3513IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3514IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3515IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3516IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3517IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3518
3519IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3520IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3521IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3522IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3523IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3524
3525IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3526IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3527IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3528IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3529IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3530
3531IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3532IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3533IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3534IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3535IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3536
3537IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3538IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3539IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3540IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3541IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3542
3543IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3544IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3545IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3546IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3547IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3548
3549IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3550IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3551IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3552IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3553IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3554
3555IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3556IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3557IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3558IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3559IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3560
3561IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3562IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3563IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3564IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3565IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3566
3567IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3568IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3569IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3570IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3571IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3572
3573IEM_DECL_IMPL_DEF(void, iemAImpl_shufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3574IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3575IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3576IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3577IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3578
3579IEM_DECL_IMPL_DEF(void, iemAImpl_shufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3580IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3581IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3582IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3583IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3584
3585IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3586IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64_fallback,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3587
3588IEM_DECL_IMPL_DEF(void, iemAImpl_pinsrw_u64,(uint64_t *pu64Dst, uint16_t u16Src, uint8_t bEvil));
3589IEM_DECL_IMPL_DEF(void, iemAImpl_pinsrw_u128,(PRTUINT128U puDst, uint16_t u16Src, uint8_t bEvil));
3590IEM_DECL_IMPL_DEF(void, iemAImpl_vpinsrw_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint16_t u16Src, uint8_t bEvil));
3591IEM_DECL_IMPL_DEF(void, iemAImpl_vpinsrw_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint16_t u16Src, uint8_t bEvil));
3592
3593IEM_DECL_IMPL_DEF(void, iemAImpl_pextrw_u64,(uint16_t *pu16Dst, uint64_t u64Src, uint8_t bEvil));
3594IEM_DECL_IMPL_DEF(void, iemAImpl_pextrw_u128,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3595IEM_DECL_IMPL_DEF(void, iemAImpl_vpextrw_u128,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3596IEM_DECL_IMPL_DEF(void, iemAImpl_vpextrw_u128_fallback,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3597
3598IEM_DECL_IMPL_DEF(void, iemAImpl_movmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3599IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3600IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3601IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3602IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3603
3604IEM_DECL_IMPL_DEF(void, iemAImpl_movmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3605IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3606IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3607IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3608IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3609
3610
3611typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3612typedef FNIEMAIMPLMEDIAOPTF2U128IMM8 *PFNIEMAIMPLMEDIAOPTF2U128IMM8;
3613typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
3614typedef FNIEMAIMPLMEDIAOPTF2U256IMM8 *PFNIEMAIMPLMEDIAOPTF2U256IMM8;
3615typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3616typedef FNIEMAIMPLMEDIAOPTF3U128IMM8 *PFNIEMAIMPLMEDIAOPTF3U128IMM8;
3617typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3618typedef FNIEMAIMPLMEDIAOPTF3U256IMM8 *PFNIEMAIMPLMEDIAOPTF3U256IMM8;
3619
3620FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_palignr_u128, iemAImpl_palignr_u128_fallback;
3621FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pblendw_u128, iemAImpl_pblendw_u128_fallback;
3622FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendps_u128, iemAImpl_blendps_u128_fallback;
3623FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendpd_u128, iemAImpl_blendpd_u128_fallback;
3624
3625FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpalignr_u128, iemAImpl_vpalignr_u128_fallback;
3626FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendw_u128, iemAImpl_vpblendw_u128_fallback;
3627FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendd_u128, iemAImpl_vpblendd_u128_fallback;
3628FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendps_u128, iemAImpl_vblendps_u128_fallback;
3629FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendpd_u128, iemAImpl_vblendpd_u128_fallback;
3630
3631FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpalignr_u256, iemAImpl_vpalignr_u256_fallback;
3632FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendw_u256, iemAImpl_vpblendw_u256_fallback;
3633FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendd_u256, iemAImpl_vpblendd_u256_fallback;
3634FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendps_u256, iemAImpl_vblendps_u256_fallback;
3635FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendpd_u256, iemAImpl_vblendpd_u256_fallback;
3636FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2i128_u256, iemAImpl_vperm2i128_u256_fallback;
3637FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2f128_u256, iemAImpl_vperm2f128_u256_fallback;
3638
3639FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesimc_u128, iemAImpl_aesimc_u128_fallback;
3640FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenc_u128, iemAImpl_aesenc_u128_fallback;
3641FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenclast_u128, iemAImpl_aesenclast_u128_fallback;
3642FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdec_u128, iemAImpl_aesdec_u128_fallback;
3643FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdeclast_u128, iemAImpl_aesdeclast_u128_fallback;
3644
3645FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesimc_u128, iemAImpl_vaesimc_u128_fallback;
3646FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenc_u128, iemAImpl_vaesenc_u128_fallback;
3647FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenclast_u128, iemAImpl_vaesenclast_u128_fallback;
3648FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdec_u128, iemAImpl_vaesdec_u128_fallback;
3649FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdeclast_u128, iemAImpl_vaesdeclast_u128_fallback;
3650
3651FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_aeskeygenassist_u128, iemAImpl_aeskeygenassist_u128_fallback;
3652
3653FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vaeskeygenassist_u128, iemAImpl_vaeskeygenassist_u128_fallback;
3654
3655FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1nexte_u128, iemAImpl_sha1nexte_u128_fallback;
3656FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg1_u128, iemAImpl_sha1msg1_u128_fallback;
3657FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg2_u128, iemAImpl_sha1msg2_u128_fallback;
3658FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg1_u128, iemAImpl_sha256msg1_u128_fallback;
3659FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg2_u128, iemAImpl_sha256msg2_u128_fallback;
3660FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_sha1rnds4_u128, iemAImpl_sha1rnds4_u128_fallback;
3661IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3662IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3663
3664typedef struct IEMPCMPISTRXSRC
3665{
3666 RTUINT128U uSrc1;
3667 RTUINT128U uSrc2;
3668} IEMPCMPISTRXSRC;
3669typedef IEMPCMPISTRXSRC *PIEMPCMPISTRXSRC;
3670typedef const IEMPCMPISTRXSRC *PCIEMPCMPISTRXSRC;
3671
3672typedef struct IEMPCMPESTRXSRC
3673{
3674 RTUINT128U uSrc1;
3675 RTUINT128U uSrc2;
3676 uint64_t u64Rax;
3677 uint64_t u64Rdx;
3678} IEMPCMPESTRXSRC;
3679typedef IEMPCMPESTRXSRC *PIEMPCMPESTRXSRC;
3680typedef const IEMPCMPESTRXSRC *PCIEMPCMPESTRXSRC;
3681
3682typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
3683typedef FNIEMAIMPLPCMPISTRIU128IMM8 *PFNIEMAIMPLPCMPISTRIU128IMM8;
3684typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3685typedef FNIEMAIMPLPCMPESTRIU128IMM8 *PFNIEMAIMPLPCMPESTRIU128IMM8;
3686
3687typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
3688typedef FNIEMAIMPLPCMPISTRMU128IMM8 *PFNIEMAIMPLPCMPISTRMU128IMM8;
3689typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3690typedef FNIEMAIMPLPCMPESTRMU128IMM8 *PFNIEMAIMPLPCMPESTRMU128IMM8;
3691
3692FNIEMAIMPLPCMPISTRIU128IMM8 iemAImpl_pcmpistri_u128, iemAImpl_pcmpistri_u128_fallback;
3693FNIEMAIMPLPCMPESTRIU128IMM8 iemAImpl_pcmpestri_u128, iemAImpl_pcmpestri_u128_fallback;
3694FNIEMAIMPLPCMPISTRMU128IMM8 iemAImpl_pcmpistrm_u128, iemAImpl_pcmpistrm_u128_fallback;
3695FNIEMAIMPLPCMPESTRMU128IMM8 iemAImpl_pcmpestrm_u128, iemAImpl_pcmpestrm_u128_fallback;
3696
3697FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pclmulqdq_u128, iemAImpl_pclmulqdq_u128_fallback;
3698FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpclmulqdq_u128, iemAImpl_vpclmulqdq_u128_fallback;
3699
3700FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_mpsadbw_u128, iemAImpl_mpsadbw_u128_fallback;
3701FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vmpsadbw_u128, iemAImpl_vmpsadbw_u128_fallback;
3702FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vmpsadbw_u256, iemAImpl_vmpsadbw_u256_fallback;
3703
3704FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsllw_imm_u128, iemAImpl_vpsllw_imm_u128_fallback;
3705FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsllw_imm_u256, iemAImpl_vpsllw_imm_u256_fallback;
3706FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpslld_imm_u128, iemAImpl_vpslld_imm_u128_fallback;
3707FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpslld_imm_u256, iemAImpl_vpslld_imm_u256_fallback;
3708FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsllq_imm_u128, iemAImpl_vpsllq_imm_u128_fallback;
3709FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsllq_imm_u256, iemAImpl_vpsllq_imm_u256_fallback;
3710
3711FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsraw_imm_u128, iemAImpl_vpsraw_imm_u128_fallback;
3712FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsraw_imm_u256, iemAImpl_vpsraw_imm_u256_fallback;
3713FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrad_imm_u128, iemAImpl_vpsrad_imm_u128_fallback;
3714FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrad_imm_u256, iemAImpl_vpsrad_imm_u256_fallback;
3715
3716FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrlw_imm_u128, iemAImpl_vpsrlw_imm_u128_fallback;
3717FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrlw_imm_u256, iemAImpl_vpsrlw_imm_u256_fallback;
3718FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrld_imm_u128, iemAImpl_vpsrld_imm_u128_fallback;
3719FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrld_imm_u256, iemAImpl_vpsrld_imm_u256_fallback;
3720FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrlq_imm_u128, iemAImpl_vpsrlq_imm_u128_fallback;
3721FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrlq_imm_u256, iemAImpl_vpsrlq_imm_u256_fallback;
3722FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrldq_imm_u128, iemAImpl_vpsrldq_imm_u128_fallback;
3723FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrldq_imm_u256, iemAImpl_vpsrldq_imm_u256_fallback;
3724
3725FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpermilps_u128, iemAImpl_vpermilps_u128_fallback;
3726FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_vpermilps_imm_u128, iemAImpl_vpermilps_imm_u128_fallback;
3727FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpermilps_u256, iemAImpl_vpermilps_u256_fallback;
3728FNIEMAIMPLMEDIAOPTF2U256IMM8 iemAImpl_vpermilps_imm_u256, iemAImpl_vpermilps_imm_u256_fallback;
3729
3730FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpermilpd_u128, iemAImpl_vpermilpd_u128_fallback;
3731FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_vpermilpd_imm_u128, iemAImpl_vpermilpd_imm_u128_fallback;
3732FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpermilpd_u256, iemAImpl_vpermilpd_u256_fallback;
3733FNIEMAIMPLMEDIAOPTF2U256IMM8 iemAImpl_vpermilpd_imm_u256, iemAImpl_vpermilpd_imm_u256_fallback;
3734
3735FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllvd_u128, iemAImpl_vpsllvd_u128_fallback;
3736FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllvd_u256, iemAImpl_vpsllvd_u256_fallback;
3737FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllvq_u128, iemAImpl_vpsllvq_u128_fallback;
3738FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllvq_u256, iemAImpl_vpsllvq_u256_fallback;
3739FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsravd_u128, iemAImpl_vpsravd_u128_fallback;
3740FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsravd_u256, iemAImpl_vpsravd_u256_fallback;
3741FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlvd_u128, iemAImpl_vpsrlvd_u128_fallback;
3742FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlvd_u256, iemAImpl_vpsrlvd_u256_fallback;
3743FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlvq_u128, iemAImpl_vpsrlvq_u128_fallback;
3744FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlvq_u256, iemAImpl_vpsrlvq_u256_fallback;
3745/** @} */
3746
3747/** @name Media Odds and Ends
3748 * @{ */
3749typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U8,(uint32_t *puDst, uint8_t uSrc));
3750typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U16,(uint32_t *puDst, uint16_t uSrc));
3751typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U32,(uint32_t *puDst, uint32_t uSrc));
3752typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U64,(uint32_t *puDst, uint64_t uSrc));
3753FNIEMAIMPLCR32U8 iemAImpl_crc32_u8, iemAImpl_crc32_u8_fallback;
3754FNIEMAIMPLCR32U16 iemAImpl_crc32_u16, iemAImpl_crc32_u16_fallback;
3755FNIEMAIMPLCR32U32 iemAImpl_crc32_u32, iemAImpl_crc32_u32_fallback;
3756FNIEMAIMPLCR32U64 iemAImpl_crc32_u64, iemAImpl_crc32_u64_fallback;
3757
3758typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL128,(PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint32_t *pEFlags));
3759typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL256,(PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint32_t *pEFlags));
3760FNIEMAIMPLF2EFL128 iemAImpl_ptest_u128;
3761FNIEMAIMPLF2EFL256 iemAImpl_vptest_u256, iemAImpl_vptest_u256_fallback;
3762
3763typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I32U64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int32_t *pi32Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
3764typedef FNIEMAIMPLSSEF2I32U64 *PFNIEMAIMPLSSEF2I32U64;
3765typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I64U64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int64_t *pi64Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
3766typedef FNIEMAIMPLSSEF2I64U64 *PFNIEMAIMPLSSEF2I64U64;
3767typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I32U32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int32_t *pi32Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
3768typedef FNIEMAIMPLSSEF2I32U32 *PFNIEMAIMPLSSEF2I32U32;
3769typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I64U32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int64_t *pi64Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
3770typedef FNIEMAIMPLSSEF2I64U32 *PFNIEMAIMPLSSEF2I64U32;
3771
3772FNIEMAIMPLSSEF2I32U64 iemAImpl_cvttsd2si_i32_r64;
3773FNIEMAIMPLSSEF2I32U64 iemAImpl_cvtsd2si_i32_r64;
3774
3775FNIEMAIMPLSSEF2I64U64 iemAImpl_cvttsd2si_i64_r64;
3776FNIEMAIMPLSSEF2I64U64 iemAImpl_cvtsd2si_i64_r64;
3777
3778FNIEMAIMPLSSEF2I32U32 iemAImpl_cvttss2si_i32_r32;
3779FNIEMAIMPLSSEF2I32U32 iemAImpl_cvtss2si_i32_r32;
3780
3781FNIEMAIMPLSSEF2I64U32 iemAImpl_cvttss2si_i64_r32;
3782FNIEMAIMPLSSEF2I64U32 iemAImpl_cvtss2si_i64_r32;
3783
3784typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R32I32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT32U pr32Dst, const int32_t *pi32Src));
3785typedef FNIEMAIMPLSSEF2R32I32 *PFNIEMAIMPLSSEF2R32I32;
3786typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R32I64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT32U pr32Dst, const int64_t *pi64Src));
3787typedef FNIEMAIMPLSSEF2R32I64 *PFNIEMAIMPLSSEF2R32I64;
3788
3789FNIEMAIMPLSSEF2R32I32 iemAImpl_cvtsi2ss_r32_i32;
3790FNIEMAIMPLSSEF2R32I64 iemAImpl_cvtsi2ss_r32_i64;
3791
3792typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R64I32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT64U pr64Dst, const int32_t *pi32Src));
3793typedef FNIEMAIMPLSSEF2R64I32 *PFNIEMAIMPLSSEF2R64I32;
3794typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R64I64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT64U pr64Dst, const int64_t *pi64Src));
3795typedef FNIEMAIMPLSSEF2R64I64 *PFNIEMAIMPLSSEF2R64I64;
3796
3797FNIEMAIMPLSSEF2R64I32 iemAImpl_cvtsi2sd_r64_i32;
3798FNIEMAIMPLSSEF2R64I64 iemAImpl_cvtsi2sd_r64_i64;
3799
3800
3801typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFLMXCSR128,(uint32_t *pfMxcsr, uint32_t *pfEFlags, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
3802typedef FNIEMAIMPLF2EFLMXCSR128 *PFNIEMAIMPLF2EFLMXCSR128;
3803
3804FNIEMAIMPLF2EFLMXCSR128 iemAImpl_ucomiss_u128;
3805FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vucomiss_u128, iemAImpl_vucomiss_u128_fallback;
3806
3807FNIEMAIMPLF2EFLMXCSR128 iemAImpl_ucomisd_u128;
3808FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vucomisd_u128, iemAImpl_vucomisd_u128_fallback;
3809
3810FNIEMAIMPLF2EFLMXCSR128 iemAImpl_comiss_u128;
3811FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vcomiss_u128, iemAImpl_vcomiss_u128_fallback;
3812
3813FNIEMAIMPLF2EFLMXCSR128 iemAImpl_comisd_u128;
3814FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vcomisd_u128, iemAImpl_vcomisd_u128_fallback;
3815
3816
3817typedef struct IEMMEDIAF2XMMSRC
3818{
3819 X86XMMREG uSrc1;
3820 X86XMMREG uSrc2;
3821} IEMMEDIAF2XMMSRC;
3822typedef IEMMEDIAF2XMMSRC *PIEMMEDIAF2XMMSRC;
3823typedef const IEMMEDIAF2XMMSRC *PCIEMMEDIAF2XMMSRC;
3824
3825typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRF2XMMIMM8,(uint32_t *pfMxcsr, PX86XMMREG puDst, PCIEMMEDIAF2XMMSRC puSrc, uint8_t bEvil));
3826typedef FNIEMAIMPLMXCSRF2XMMIMM8 *PFNIEMAIMPLMXCSRF2XMMIMM8;
3827
3828FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpps_u128;
3829FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmppd_u128;
3830FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpss_u128;
3831FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpsd_u128;
3832FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundss_u128;
3833FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundsd_u128;
3834
3835FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundps_u128, iemAImpl_roundps_u128_fallback;
3836FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundpd_u128, iemAImpl_roundpd_u128_fallback;
3837
3838FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dpps_u128, iemAImpl_dpps_u128_fallback;
3839FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dppd_u128, iemAImpl_dppd_u128_fallback;
3840
3841typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU64U128,(uint32_t *pfMxcsr, uint64_t *pu64Dst, PCX86XMMREG pSrc));
3842typedef FNIEMAIMPLMXCSRU64U128 *PFNIEMAIMPLMXCSRU64U128;
3843
3844FNIEMAIMPLMXCSRU64U128 iemAImpl_cvtpd2pi_u128;
3845FNIEMAIMPLMXCSRU64U128 iemAImpl_cvttpd2pi_u128;
3846
3847typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU128U64,(uint32_t *pfMxcsr, PX86XMMREG pDst, uint64_t u64Src));
3848typedef FNIEMAIMPLMXCSRU128U64 *PFNIEMAIMPLMXCSRU128U64;
3849
3850FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2ps_u128;
3851FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2pd_u128;
3852
3853typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU64U64,(uint32_t *pfMxcsr, uint64_t *pu64Dst, uint64_t u64Src));
3854typedef FNIEMAIMPLMXCSRU64U64 *PFNIEMAIMPLMXCSRU64U64;
3855
3856FNIEMAIMPLMXCSRU64U64 iemAImpl_cvtps2pi_u128;
3857FNIEMAIMPLMXCSRU64U64 iemAImpl_cvttps2pi_u128;
3858
3859/** @} */
3860
3861
3862/** @name Function tables.
3863 * @{
3864 */
3865
3866/**
3867 * Function table for a binary operator providing implementation based on
3868 * operand size.
3869 */
3870typedef struct IEMOPBINSIZES
3871{
3872 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
3873 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
3874 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
3875 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
3876} IEMOPBINSIZES;
3877/** Pointer to a binary operator function table. */
3878typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
3879
3880
3881/**
3882 * Function table for a unary operator providing implementation based on
3883 * operand size.
3884 */
3885typedef struct IEMOPUNARYSIZES
3886{
3887 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
3888 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
3889 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
3890 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
3891} IEMOPUNARYSIZES;
3892/** Pointer to a unary operator function table. */
3893typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
3894
3895
3896/**
3897 * Function table for a shift operator providing implementation based on
3898 * operand size.
3899 */
3900typedef struct IEMOPSHIFTSIZES
3901{
3902 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
3903 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
3904 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
3905 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
3906} IEMOPSHIFTSIZES;
3907/** Pointer to a shift operator function table. */
3908typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
3909
3910
3911/**
3912 * Function table for a multiplication or division operation.
3913 */
3914typedef struct IEMOPMULDIVSIZES
3915{
3916 PFNIEMAIMPLMULDIVU8 pfnU8;
3917 PFNIEMAIMPLMULDIVU16 pfnU16;
3918 PFNIEMAIMPLMULDIVU32 pfnU32;
3919 PFNIEMAIMPLMULDIVU64 pfnU64;
3920} IEMOPMULDIVSIZES;
3921/** Pointer to a multiplication or division operation function table. */
3922typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
3923
3924
3925/**
3926 * Function table for a double precision shift operator providing implementation
3927 * based on operand size.
3928 */
3929typedef struct IEMOPSHIFTDBLSIZES
3930{
3931 PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16;
3932 PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32;
3933 PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64;
3934} IEMOPSHIFTDBLSIZES;
3935/** Pointer to a double precision shift function table. */
3936typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
3937
3938
3939/**
3940 * Function table for media instruction taking two full sized media source
3941 * registers and one full sized destination register (AVX).
3942 */
3943typedef struct IEMOPMEDIAF3
3944{
3945 PFNIEMAIMPLMEDIAF3U128 pfnU128;
3946 PFNIEMAIMPLMEDIAF3U256 pfnU256;
3947} IEMOPMEDIAF3;
3948/** Pointer to a media operation function table for 3 full sized ops (AVX). */
3949typedef IEMOPMEDIAF3 const *PCIEMOPMEDIAF3;
3950
3951/** @def IEMOPMEDIAF3_INIT_VARS_EX
3952 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3953 * given functions as initializers. For use in AVX functions where a pair of
3954 * functions are only used once and the function table need not be public. */
3955#ifndef TST_IEM_CHECK_MC
3956# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3957# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3958 static IEMOPMEDIAF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3959 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3960# else
3961# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3962 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3963# endif
3964#else
3965# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3966#endif
3967/** @def IEMOPMEDIAF3_INIT_VARS
3968 * Generate AVX function tables for the @a a_InstrNm instruction.
3969 * @sa IEMOPMEDIAF3_INIT_VARS_EX */
3970#define IEMOPMEDIAF3_INIT_VARS(a_InstrNm) \
3971 IEMOPMEDIAF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3972 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3973
3974/**
3975 * Function table for media instruction taking two full sized media source
3976 * registers and one full sized destination register, but no additional state
3977 * (AVX).
3978 */
3979typedef struct IEMOPMEDIAOPTF3
3980{
3981 PFNIEMAIMPLMEDIAOPTF3U128 pfnU128;
3982 PFNIEMAIMPLMEDIAOPTF3U256 pfnU256;
3983} IEMOPMEDIAOPTF3;
3984/** Pointer to a media operation function table for 3 full sized ops (AVX). */
3985typedef IEMOPMEDIAOPTF3 const *PCIEMOPMEDIAOPTF3;
3986
3987/** @def IEMOPMEDIAOPTF3_INIT_VARS_EX
3988 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3989 * given functions as initializers. For use in AVX functions where a pair of
3990 * functions are only used once and the function table need not be public. */
3991#ifndef TST_IEM_CHECK_MC
3992# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3993# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3994 static IEMOPMEDIAOPTF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3995 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3996# else
3997# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3998 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3999# endif
4000#else
4001# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4002#endif
4003/** @def IEMOPMEDIAOPTF3_INIT_VARS
4004 * Generate AVX function tables for the @a a_InstrNm instruction.
4005 * @sa IEMOPMEDIAOPTF3_INIT_VARS_EX */
4006#define IEMOPMEDIAOPTF3_INIT_VARS(a_InstrNm) \
4007 IEMOPMEDIAOPTF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4008 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4009
4010/**
4011 * Function table for media instruction taking one full sized media source
4012 * registers and one full sized destination register, but no additional state
4013 * (AVX).
4014 */
4015typedef struct IEMOPMEDIAOPTF2
4016{
4017 PFNIEMAIMPLMEDIAOPTF2U128 pfnU128;
4018 PFNIEMAIMPLMEDIAOPTF2U256 pfnU256;
4019} IEMOPMEDIAOPTF2;
4020/** Pointer to a media operation function table for 2 full sized ops (AVX). */
4021typedef IEMOPMEDIAOPTF2 const *PCIEMOPMEDIAOPTF2;
4022
4023/** @def IEMOPMEDIAOPTF2_INIT_VARS_EX
4024 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4025 * given functions as initializers. For use in AVX functions where a pair of
4026 * functions are only used once and the function table need not be public. */
4027#ifndef TST_IEM_CHECK_MC
4028# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4029# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4030 static IEMOPMEDIAOPTF2 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4031 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4032# else
4033# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4034 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4035# endif
4036#else
4037# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4038#endif
4039/** @def IEMOPMEDIAOPTF2_INIT_VARS
4040 * Generate AVX function tables for the @a a_InstrNm instruction.
4041 * @sa IEMOPMEDIAOPTF2_INIT_VARS_EX */
4042#define IEMOPMEDIAOPTF2_INIT_VARS(a_InstrNm) \
4043 IEMOPMEDIAOPTF2_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4044 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4045
4046/**
4047 * Function table for media instruction taking one full sized media source
4048 * register and one full sized destination register and an 8-bit immediate, but no additional state
4049 * (AVX).
4050 */
4051typedef struct IEMOPMEDIAOPTF2IMM8
4052{
4053 PFNIEMAIMPLMEDIAOPTF2U128IMM8 pfnU128;
4054 PFNIEMAIMPLMEDIAOPTF2U256IMM8 pfnU256;
4055} IEMOPMEDIAOPTF2IMM8;
4056/** Pointer to a media operation function table for 2 full sized ops (AVX). */
4057typedef IEMOPMEDIAOPTF2IMM8 const *PCIEMOPMEDIAOPTF2IMM8;
4058
4059/** @def IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX
4060 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4061 * given functions as initializers. For use in AVX functions where a pair of
4062 * functions are only used once and the function table need not be public. */
4063#ifndef TST_IEM_CHECK_MC
4064# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4065# define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4066 static IEMOPMEDIAOPTF2IMM8 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4067 static IEMOPMEDIAOPTF2IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4068# else
4069# define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4070 static IEMOPMEDIAOPTF2IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4071# endif
4072#else
4073# define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4074#endif
4075/** @def IEMOPMEDIAOPTF2IMM8_INIT_VARS
4076 * Generate AVX function tables for the @a a_InstrNm instruction.
4077 * @sa IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX */
4078#define IEMOPMEDIAOPTF2IMM8_INIT_VARS(a_InstrNm) \
4079 IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u256),\
4080 RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u256_fallback))
4081
4082/**
4083 * Function table for media instruction taking two full sized media source
4084 * registers and one full sized destination register and an 8-bit immediate, but no additional state
4085 * (AVX).
4086 */
4087typedef struct IEMOPMEDIAOPTF3IMM8
4088{
4089 PFNIEMAIMPLMEDIAOPTF3U128IMM8 pfnU128;
4090 PFNIEMAIMPLMEDIAOPTF3U256IMM8 pfnU256;
4091} IEMOPMEDIAOPTF3IMM8;
4092/** Pointer to a media operation function table for 3 full sized ops (AVX). */
4093typedef IEMOPMEDIAOPTF3IMM8 const *PCIEMOPMEDIAOPTF3IMM8;
4094
4095/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX
4096 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4097 * given functions as initializers. For use in AVX functions where a pair of
4098 * functions are only used once and the function table need not be public. */
4099#ifndef TST_IEM_CHECK_MC
4100# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4101# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4102 static IEMOPMEDIAOPTF3IMM8 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4103 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4104# else
4105# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4106 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4107# endif
4108#else
4109# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4110#endif
4111/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS
4112 * Generate AVX function tables for the @a a_InstrNm instruction.
4113 * @sa IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX */
4114#define IEMOPMEDIAOPTF3IMM8_INIT_VARS(a_InstrNm) \
4115 IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4116 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4117/** @} */
4118
4119
4120/**
4121 * Function table for blend type instruction taking three full sized media source
4122 * registers and one full sized destination register, but no additional state
4123 * (AVX).
4124 */
4125typedef struct IEMOPBLENDOP
4126{
4127 PFNIEMAIMPLAVXBLENDU128 pfnU128;
4128 PFNIEMAIMPLAVXBLENDU256 pfnU256;
4129} IEMOPBLENDOP;
4130/** Pointer to a media operation function table for 4 full sized ops (AVX). */
4131typedef IEMOPBLENDOP const *PCIEMOPBLENDOP;
4132
4133/** @def IEMOPBLENDOP_INIT_VARS_EX
4134 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4135 * given functions as initializers. For use in AVX functions where a pair of
4136 * functions are only used once and the function table need not be public. */
4137#ifndef TST_IEM_CHECK_MC
4138# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4139# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4140 static IEMOPBLENDOP const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4141 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4142# else
4143# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4144 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4145# endif
4146#else
4147# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4148#endif
4149/** @def IEMOPBLENDOP_INIT_VARS
4150 * Generate AVX function tables for the @a a_InstrNm instruction.
4151 * @sa IEMOPBLENDOP_INIT_VARS_EX */
4152#define IEMOPBLENDOP_INIT_VARS(a_InstrNm) \
4153 IEMOPBLENDOP_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4154 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4155
4156
4157/** @name SSE/AVX single/double precision floating point operations.
4158 * @{ */
4159/**
4160 * A SSE result.
4161 */
4162typedef struct IEMSSERESULT
4163{
4164 /** The output value. */
4165 X86XMMREG uResult;
4166 /** The output status. */
4167 uint32_t MXCSR;
4168} IEMSSERESULT;
4169AssertCompileMemberOffset(IEMSSERESULT, MXCSR, 128 / 8);
4170/** Pointer to a SSE result. */
4171typedef IEMSSERESULT *PIEMSSERESULT;
4172/** Pointer to a const SSE result. */
4173typedef IEMSSERESULT const *PCIEMSSERESULT;
4174
4175
4176/**
4177 * A AVX128 result.
4178 */
4179typedef struct IEMAVX128RESULT
4180{
4181 /** The output value. */
4182 X86XMMREG uResult;
4183 /** The output status. */
4184 uint32_t MXCSR;
4185} IEMAVX128RESULT;
4186AssertCompileMemberOffset(IEMAVX128RESULT, MXCSR, 128 / 8);
4187/** Pointer to a AVX128 result. */
4188typedef IEMAVX128RESULT *PIEMAVX128RESULT;
4189/** Pointer to a const AVX128 result. */
4190typedef IEMAVX128RESULT const *PCIEMAVX128RESULT;
4191
4192
4193/**
4194 * A AVX256 result.
4195 */
4196typedef struct IEMAVX256RESULT
4197{
4198 /** The output value. */
4199 X86YMMREG uResult;
4200 /** The output status. */
4201 uint32_t MXCSR;
4202} IEMAVX256RESULT;
4203AssertCompileMemberOffset(IEMAVX256RESULT, MXCSR, 256 / 8);
4204/** Pointer to a AVX256 result. */
4205typedef IEMAVX256RESULT *PIEMAVX256RESULT;
4206/** Pointer to a const AVX256 result. */
4207typedef IEMAVX256RESULT const *PCIEMAVX256RESULT;
4208
4209
4210typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
4211typedef FNIEMAIMPLFPSSEF2U128 *PFNIEMAIMPLFPSSEF2U128;
4212typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128R32,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
4213typedef FNIEMAIMPLFPSSEF2U128R32 *PFNIEMAIMPLFPSSEF2U128R32;
4214typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128R64,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
4215typedef FNIEMAIMPLFPSSEF2U128R64 *PFNIEMAIMPLFPSSEF2U128R64;
4216
4217typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
4218typedef FNIEMAIMPLFPAVXF3U128 *PFNIEMAIMPLFPAVXF3U128;
4219typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128R32,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
4220typedef FNIEMAIMPLFPAVXF3U128R32 *PFNIEMAIMPLFPAVXF3U128R32;
4221typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128R64,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
4222typedef FNIEMAIMPLFPAVXF3U128R64 *PFNIEMAIMPLFPAVXF3U128R64;
4223
4224typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U256,(PX86XSAVEAREA pExtState, PIEMAVX256RESULT pResult, PCX86YMMREG puSrc1, PCX86YMMREG puSrc2));
4225typedef FNIEMAIMPLFPAVXF3U256 *PFNIEMAIMPLFPAVXF3U256;
4226
4227FNIEMAIMPLFPSSEF2U128 iemAImpl_addps_u128;
4228FNIEMAIMPLFPSSEF2U128 iemAImpl_addpd_u128;
4229FNIEMAIMPLFPSSEF2U128 iemAImpl_mulps_u128;
4230FNIEMAIMPLFPSSEF2U128 iemAImpl_mulpd_u128;
4231FNIEMAIMPLFPSSEF2U128 iemAImpl_subps_u128;
4232FNIEMAIMPLFPSSEF2U128 iemAImpl_subpd_u128;
4233FNIEMAIMPLFPSSEF2U128 iemAImpl_minps_u128;
4234FNIEMAIMPLFPSSEF2U128 iemAImpl_minpd_u128;
4235FNIEMAIMPLFPSSEF2U128 iemAImpl_divps_u128;
4236FNIEMAIMPLFPSSEF2U128 iemAImpl_divpd_u128;
4237FNIEMAIMPLFPSSEF2U128 iemAImpl_maxps_u128;
4238FNIEMAIMPLFPSSEF2U128 iemAImpl_maxpd_u128;
4239FNIEMAIMPLFPSSEF2U128 iemAImpl_haddps_u128;
4240FNIEMAIMPLFPSSEF2U128 iemAImpl_haddpd_u128;
4241FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubps_u128;
4242FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubpd_u128;
4243FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtps_u128;
4244FNIEMAIMPLFPSSEF2U128 iemAImpl_rsqrtps_u128;
4245FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtpd_u128;
4246FNIEMAIMPLFPSSEF2U128 iemAImpl_rcpps_u128;
4247FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubps_u128;
4248FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubpd_u128;
4249FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2ps_u128;
4250FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2pd_u128;
4251
4252FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2ps_u128;
4253FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2dq_u128;
4254FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttps2dq_u128;
4255FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttpd2dq_u128;
4256FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2pd_u128;
4257FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2dq_u128;
4258
4259FNIEMAIMPLFPSSEF2U128R32 iemAImpl_addss_u128_r32;
4260FNIEMAIMPLFPSSEF2U128R64 iemAImpl_addsd_u128_r64;
4261FNIEMAIMPLFPSSEF2U128R32 iemAImpl_mulss_u128_r32;
4262FNIEMAIMPLFPSSEF2U128R64 iemAImpl_mulsd_u128_r64;
4263FNIEMAIMPLFPSSEF2U128R32 iemAImpl_subss_u128_r32;
4264FNIEMAIMPLFPSSEF2U128R64 iemAImpl_subsd_u128_r64;
4265FNIEMAIMPLFPSSEF2U128R32 iemAImpl_minss_u128_r32;
4266FNIEMAIMPLFPSSEF2U128R64 iemAImpl_minsd_u128_r64;
4267FNIEMAIMPLFPSSEF2U128R32 iemAImpl_divss_u128_r32;
4268FNIEMAIMPLFPSSEF2U128R64 iemAImpl_divsd_u128_r64;
4269FNIEMAIMPLFPSSEF2U128R32 iemAImpl_maxss_u128_r32;
4270FNIEMAIMPLFPSSEF2U128R64 iemAImpl_maxsd_u128_r64;
4271FNIEMAIMPLFPSSEF2U128R32 iemAImpl_cvtss2sd_u128_r32;
4272FNIEMAIMPLFPSSEF2U128R64 iemAImpl_cvtsd2ss_u128_r64;
4273FNIEMAIMPLFPSSEF2U128R32 iemAImpl_sqrtss_u128_r32;
4274FNIEMAIMPLFPSSEF2U128R64 iemAImpl_sqrtsd_u128_r64;
4275FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rsqrtss_u128_r32;
4276FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rcpss_u128_r32;
4277
4278FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddps_u128, iemAImpl_vaddps_u128_fallback;
4279FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddpd_u128, iemAImpl_vaddpd_u128_fallback;
4280FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulps_u128, iemAImpl_vmulps_u128_fallback;
4281FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulpd_u128, iemAImpl_vmulpd_u128_fallback;
4282FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubps_u128, iemAImpl_vsubps_u128_fallback;
4283FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubpd_u128, iemAImpl_vsubpd_u128_fallback;
4284FNIEMAIMPLFPAVXF3U128 iemAImpl_vminps_u128, iemAImpl_vminps_u128_fallback;
4285FNIEMAIMPLFPAVXF3U128 iemAImpl_vminpd_u128, iemAImpl_vminpd_u128_fallback;
4286FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivps_u128, iemAImpl_vdivps_u128_fallback;
4287FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivpd_u128, iemAImpl_vdivpd_u128_fallback;
4288FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxps_u128, iemAImpl_vmaxps_u128_fallback;
4289FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxpd_u128, iemAImpl_vmaxpd_u128_fallback;
4290FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddps_u128, iemAImpl_vhaddps_u128_fallback;
4291FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddpd_u128, iemAImpl_vhaddpd_u128_fallback;
4292FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubps_u128, iemAImpl_vhsubps_u128_fallback;
4293FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubpd_u128, iemAImpl_vhsubpd_u128_fallback;
4294FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtps_u128, iemAImpl_vsqrtps_u128_fallback;
4295FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtpd_u128, iemAImpl_vsqrtpd_u128_fallback;
4296FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubps_u128, iemAImpl_vaddsubps_u128_fallback;
4297FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubpd_u128, iemAImpl_vaddsubpd_u128_fallback;
4298FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtpd2ps_u128, iemAImpl_vcvtpd2ps_u128_fallback;
4299FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtps2pd_u128, iemAImpl_vcvtps2pd_u128_fallback;
4300
4301FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vaddss_u128_r32, iemAImpl_vaddss_u128_r32_fallback;
4302FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vaddsd_u128_r64, iemAImpl_vaddsd_u128_r64_fallback;
4303FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmulss_u128_r32, iemAImpl_vmulss_u128_r32_fallback;
4304FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmulsd_u128_r64, iemAImpl_vmulsd_u128_r64_fallback;
4305FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsubss_u128_r32, iemAImpl_vsubss_u128_r32_fallback;
4306FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsubsd_u128_r64, iemAImpl_vsubsd_u128_r64_fallback;
4307FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vminss_u128_r32, iemAImpl_vminss_u128_r32_fallback;
4308FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vminsd_u128_r64, iemAImpl_vminsd_u128_r64_fallback;
4309FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vdivss_u128_r32, iemAImpl_vdivss_u128_r32_fallback;
4310FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vdivsd_u128_r64, iemAImpl_vdivsd_u128_r64_fallback;
4311FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmaxss_u128_r32, iemAImpl_vmaxss_u128_r32_fallback;
4312FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmaxsd_u128_r64, iemAImpl_vmaxsd_u128_r64_fallback;
4313FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsqrtss_u128_r32, iemAImpl_vsqrtss_u128_r32_fallback;
4314FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsqrtsd_u128_r64, iemAImpl_vsqrtsd_u128_r64_fallback;
4315
4316FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddps_u256, iemAImpl_vaddps_u256_fallback;
4317FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddpd_u256, iemAImpl_vaddpd_u256_fallback;
4318FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulps_u256, iemAImpl_vmulps_u256_fallback;
4319FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulpd_u256, iemAImpl_vmulpd_u256_fallback;
4320FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubps_u256, iemAImpl_vsubps_u256_fallback;
4321FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubpd_u256, iemAImpl_vsubpd_u256_fallback;
4322FNIEMAIMPLFPAVXF3U256 iemAImpl_vminps_u256, iemAImpl_vminps_u256_fallback;
4323FNIEMAIMPLFPAVXF3U256 iemAImpl_vminpd_u256, iemAImpl_vminpd_u256_fallback;
4324FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivps_u256, iemAImpl_vdivps_u256_fallback;
4325FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivpd_u256, iemAImpl_vdivpd_u256_fallback;
4326FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxps_u256, iemAImpl_vmaxps_u256_fallback;
4327FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxpd_u256, iemAImpl_vmaxpd_u256_fallback;
4328FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddps_u256, iemAImpl_vhaddps_u256_fallback;
4329FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddpd_u256, iemAImpl_vhaddpd_u256_fallback;
4330FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubps_u256, iemAImpl_vhsubps_u256_fallback;
4331FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubpd_u256, iemAImpl_vhsubpd_u256_fallback;
4332FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubps_u256, iemAImpl_vhaddsubps_u256_fallback;
4333FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubpd_u256, iemAImpl_vhaddsubpd_u256_fallback;
4334FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtpd2ps_u256, iemAImpl_vcvtpd2ps_u256_fallback;
4335FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtps2pd_u256, iemAImpl_vcvtps2pd_u256_fallback;
4336/** @} */
4337
4338/** @name C instruction implementations for anything slightly complicated.
4339 * @{ */
4340
4341/**
4342 * For typedef'ing or declaring a C instruction implementation function taking
4343 * no extra arguments.
4344 *
4345 * @param a_Name The name of the type.
4346 */
4347# define IEM_CIMPL_DECL_TYPE_0(a_Name) \
4348 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4349/**
4350 * For defining a C instruction implementation function taking no extra
4351 * arguments.
4352 *
4353 * @param a_Name The name of the function
4354 */
4355# define IEM_CIMPL_DEF_0(a_Name) \
4356 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4357/**
4358 * Prototype version of IEM_CIMPL_DEF_0.
4359 */
4360# define IEM_CIMPL_PROTO_0(a_Name) \
4361 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4362/**
4363 * For calling a C instruction implementation function taking no extra
4364 * arguments.
4365 *
4366 * This special call macro adds default arguments to the call and allow us to
4367 * change these later.
4368 *
4369 * @param a_fn The name of the function.
4370 */
4371# define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr)
4372
4373/** Type for a C instruction implementation function taking no extra
4374 * arguments. */
4375typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
4376/** Function pointer type for a C instruction implementation function taking
4377 * no extra arguments. */
4378typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
4379
4380/**
4381 * For typedef'ing or declaring a C instruction implementation function taking
4382 * one extra argument.
4383 *
4384 * @param a_Name The name of the type.
4385 * @param a_Type0 The argument type.
4386 * @param a_Arg0 The argument name.
4387 */
4388# define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
4389 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4390/**
4391 * For defining a C instruction implementation function taking one extra
4392 * argument.
4393 *
4394 * @param a_Name The name of the function
4395 * @param a_Type0 The argument type.
4396 * @param a_Arg0 The argument name.
4397 */
4398# define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
4399 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4400/**
4401 * Prototype version of IEM_CIMPL_DEF_1.
4402 */
4403# define IEM_CIMPL_PROTO_1(a_Name, a_Type0, a_Arg0) \
4404 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4405/**
4406 * For calling a C instruction implementation function taking one extra
4407 * argument.
4408 *
4409 * This special call macro adds default arguments to the call and allow us to
4410 * change these later.
4411 *
4412 * @param a_fn The name of the function.
4413 * @param a0 The name of the 1st argument.
4414 */
4415# define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0))
4416
4417/**
4418 * For typedef'ing or declaring a C instruction implementation function taking
4419 * two extra arguments.
4420 *
4421 * @param a_Name The name of the type.
4422 * @param a_Type0 The type of the 1st argument
4423 * @param a_Arg0 The name of the 1st argument.
4424 * @param a_Type1 The type of the 2nd argument.
4425 * @param a_Arg1 The name of the 2nd argument.
4426 */
4427# define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4428 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4429/**
4430 * For defining a C instruction implementation function taking two extra
4431 * arguments.
4432 *
4433 * @param a_Name The name of the function.
4434 * @param a_Type0 The type of the 1st argument
4435 * @param a_Arg0 The name of the 1st argument.
4436 * @param a_Type1 The type of the 2nd argument.
4437 * @param a_Arg1 The name of the 2nd argument.
4438 */
4439# define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4440 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4441/**
4442 * Prototype version of IEM_CIMPL_DEF_2.
4443 */
4444# define IEM_CIMPL_PROTO_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4445 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4446/**
4447 * For calling a C instruction implementation function taking two extra
4448 * arguments.
4449 *
4450 * This special call macro adds default arguments to the call and allow us to
4451 * change these later.
4452 *
4453 * @param a_fn The name of the function.
4454 * @param a0 The name of the 1st argument.
4455 * @param a1 The name of the 2nd argument.
4456 */
4457# define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1))
4458
4459/**
4460 * For typedef'ing or declaring a C instruction implementation function taking
4461 * three extra arguments.
4462 *
4463 * @param a_Name The name of the type.
4464 * @param a_Type0 The type of the 1st argument
4465 * @param a_Arg0 The name of the 1st argument.
4466 * @param a_Type1 The type of the 2nd argument.
4467 * @param a_Arg1 The name of the 2nd argument.
4468 * @param a_Type2 The type of the 3rd argument.
4469 * @param a_Arg2 The name of the 3rd argument.
4470 */
4471# define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4472 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4473/**
4474 * For defining a C instruction implementation function taking three extra
4475 * arguments.
4476 *
4477 * @param a_Name The name of the function.
4478 * @param a_Type0 The type of the 1st argument
4479 * @param a_Arg0 The name of the 1st argument.
4480 * @param a_Type1 The type of the 2nd argument.
4481 * @param a_Arg1 The name of the 2nd argument.
4482 * @param a_Type2 The type of the 3rd argument.
4483 * @param a_Arg2 The name of the 3rd argument.
4484 */
4485# define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4486 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4487/**
4488 * Prototype version of IEM_CIMPL_DEF_3.
4489 */
4490# define IEM_CIMPL_PROTO_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4491 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4492/**
4493 * For calling a C instruction implementation function taking three extra
4494 * arguments.
4495 *
4496 * This special call macro adds default arguments to the call and allow us to
4497 * change these later.
4498 *
4499 * @param a_fn The name of the function.
4500 * @param a0 The name of the 1st argument.
4501 * @param a1 The name of the 2nd argument.
4502 * @param a2 The name of the 3rd argument.
4503 */
4504# define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
4505
4506
4507/**
4508 * For typedef'ing or declaring a C instruction implementation function taking
4509 * four extra arguments.
4510 *
4511 * @param a_Name The name of the type.
4512 * @param a_Type0 The type of the 1st argument
4513 * @param a_Arg0 The name of the 1st argument.
4514 * @param a_Type1 The type of the 2nd argument.
4515 * @param a_Arg1 The name of the 2nd argument.
4516 * @param a_Type2 The type of the 3rd argument.
4517 * @param a_Arg2 The name of the 3rd argument.
4518 * @param a_Type3 The type of the 4th argument.
4519 * @param a_Arg3 The name of the 4th argument.
4520 */
4521# define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4522 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
4523/**
4524 * For defining a C instruction implementation function taking four extra
4525 * arguments.
4526 *
4527 * @param a_Name The name of the function.
4528 * @param a_Type0 The type of the 1st argument
4529 * @param a_Arg0 The name of the 1st argument.
4530 * @param a_Type1 The type of the 2nd argument.
4531 * @param a_Arg1 The name of the 2nd argument.
4532 * @param a_Type2 The type of the 3rd argument.
4533 * @param a_Arg2 The name of the 3rd argument.
4534 * @param a_Type3 The type of the 4th argument.
4535 * @param a_Arg3 The name of the 4th argument.
4536 */
4537# define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4538 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4539 a_Type2 a_Arg2, a_Type3 a_Arg3))
4540/**
4541 * Prototype version of IEM_CIMPL_DEF_4.
4542 */
4543# define IEM_CIMPL_PROTO_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4544 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4545 a_Type2 a_Arg2, a_Type3 a_Arg3))
4546/**
4547 * For calling a C instruction implementation function taking four extra
4548 * arguments.
4549 *
4550 * This special call macro adds default arguments to the call and allow us to
4551 * change these later.
4552 *
4553 * @param a_fn The name of the function.
4554 * @param a0 The name of the 1st argument.
4555 * @param a1 The name of the 2nd argument.
4556 * @param a2 The name of the 3rd argument.
4557 * @param a3 The name of the 4th argument.
4558 */
4559# define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
4560
4561
4562/**
4563 * For typedef'ing or declaring a C instruction implementation function taking
4564 * five extra arguments.
4565 *
4566 * @param a_Name The name of the type.
4567 * @param a_Type0 The type of the 1st argument
4568 * @param a_Arg0 The name of the 1st argument.
4569 * @param a_Type1 The type of the 2nd argument.
4570 * @param a_Arg1 The name of the 2nd argument.
4571 * @param a_Type2 The type of the 3rd argument.
4572 * @param a_Arg2 The name of the 3rd argument.
4573 * @param a_Type3 The type of the 4th argument.
4574 * @param a_Arg3 The name of the 4th argument.
4575 * @param a_Type4 The type of the 5th argument.
4576 * @param a_Arg4 The name of the 5th argument.
4577 */
4578# define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4579 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, \
4580 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
4581 a_Type3 a_Arg3, a_Type4 a_Arg4))
4582/**
4583 * For defining a C instruction implementation function taking five extra
4584 * arguments.
4585 *
4586 * @param a_Name The name of the function.
4587 * @param a_Type0 The type of the 1st argument
4588 * @param a_Arg0 The name of the 1st argument.
4589 * @param a_Type1 The type of the 2nd argument.
4590 * @param a_Arg1 The name of the 2nd argument.
4591 * @param a_Type2 The type of the 3rd argument.
4592 * @param a_Arg2 The name of the 3rd argument.
4593 * @param a_Type3 The type of the 4th argument.
4594 * @param a_Arg3 The name of the 4th argument.
4595 * @param a_Type4 The type of the 5th argument.
4596 * @param a_Arg4 The name of the 5th argument.
4597 */
4598# define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4599 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4600 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4601/**
4602 * Prototype version of IEM_CIMPL_DEF_5.
4603 */
4604# define IEM_CIMPL_PROTO_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4605 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4606 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4607/**
4608 * For calling a C instruction implementation function taking five extra
4609 * arguments.
4610 *
4611 * This special call macro adds default arguments to the call and allow us to
4612 * change these later.
4613 *
4614 * @param a_fn The name of the function.
4615 * @param a0 The name of the 1st argument.
4616 * @param a1 The name of the 2nd argument.
4617 * @param a2 The name of the 3rd argument.
4618 * @param a3 The name of the 4th argument.
4619 * @param a4 The name of the 5th argument.
4620 */
4621# define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
4622
4623/** @} */
4624
4625
4626/** @name Opcode Decoder Function Types.
4627 * @{ */
4628
4629/** @typedef PFNIEMOP
4630 * Pointer to an opcode decoder function.
4631 */
4632
4633/** @def FNIEMOP_DEF
4634 * Define an opcode decoder function.
4635 *
4636 * We're using macors for this so that adding and removing parameters as well as
4637 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
4638 *
4639 * @param a_Name The function name.
4640 */
4641
4642/** @typedef PFNIEMOPRM
4643 * Pointer to an opcode decoder function with RM byte.
4644 */
4645
4646/** @def FNIEMOPRM_DEF
4647 * Define an opcode decoder function with RM byte.
4648 *
4649 * We're using macors for this so that adding and removing parameters as well as
4650 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
4651 *
4652 * @param a_Name The function name.
4653 */
4654
4655#if defined(__GNUC__) && defined(RT_ARCH_X86)
4656typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
4657typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4658# define FNIEMOP_DEF(a_Name) \
4659 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
4660# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4661 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4662# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4663 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4664
4665#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
4666typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
4667typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4668# define FNIEMOP_DEF(a_Name) \
4669 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4670# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4671 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4672# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4673 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4674
4675#elif defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
4676typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4677typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4678# define FNIEMOP_DEF(a_Name) \
4679 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
4680# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4681 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4682# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4683 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4684
4685#else
4686typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4687typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4688# define FNIEMOP_DEF(a_Name) \
4689 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4690# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4691 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4692# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4693 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4694
4695#endif
4696#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
4697
4698/**
4699 * Call an opcode decoder function.
4700 *
4701 * We're using macors for this so that adding and removing parameters can be
4702 * done as we please. See FNIEMOP_DEF.
4703 */
4704#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
4705
4706/**
4707 * Call a common opcode decoder function taking one extra argument.
4708 *
4709 * We're using macors for this so that adding and removing parameters can be
4710 * done as we please. See FNIEMOP_DEF_1.
4711 */
4712#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
4713
4714/**
4715 * Call a common opcode decoder function taking one extra argument.
4716 *
4717 * We're using macors for this so that adding and removing parameters can be
4718 * done as we please. See FNIEMOP_DEF_1.
4719 */
4720#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
4721/** @} */
4722
4723
4724/** @name Misc Helpers
4725 * @{ */
4726
4727/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
4728 * due to GCC lacking knowledge about the value range of a switch. */
4729#if RT_CPLUSPLUS_PREREQ(202000)
4730# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: [[unlikely]] AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4731#else
4732# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4733#endif
4734
4735/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
4736#if RT_CPLUSPLUS_PREREQ(202000)
4737# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: [[unlikely]] AssertFailedReturn(a_RetValue)
4738#else
4739# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
4740#endif
4741
4742/**
4743 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4744 * occation.
4745 */
4746#ifdef LOG_ENABLED
4747# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4748 do { \
4749 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
4750 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4751 } while (0)
4752#else
4753# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4754 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4755#endif
4756
4757/**
4758 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4759 * occation using the supplied logger statement.
4760 *
4761 * @param a_LoggerArgs What to log on failure.
4762 */
4763#ifdef LOG_ENABLED
4764# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4765 do { \
4766 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
4767 /*LogFunc(a_LoggerArgs);*/ \
4768 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4769 } while (0)
4770#else
4771# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4772 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4773#endif
4774
4775/**
4776 * Gets the CPU mode (from fExec) as a IEMMODE value.
4777 *
4778 * @returns IEMMODE
4779 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4780 */
4781#define IEM_GET_CPU_MODE(a_pVCpu) ((a_pVCpu)->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK)
4782
4783/**
4784 * Check if we're currently executing in real or virtual 8086 mode.
4785 *
4786 * @returns @c true if it is, @c false if not.
4787 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4788 */
4789#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (( ((a_pVCpu)->iem.s.fExec ^ IEM_F_MODE_X86_PROT_MASK) \
4790 & (IEM_F_MODE_X86_V86_MASK | IEM_F_MODE_X86_PROT_MASK)) != 0)
4791
4792/**
4793 * Check if we're currently executing in virtual 8086 mode.
4794 *
4795 * @returns @c true if it is, @c false if not.
4796 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4797 */
4798#define IEM_IS_V86_MODE(a_pVCpu) (((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_V86_MASK) != 0)
4799
4800/**
4801 * Check if we're currently executing in long mode.
4802 *
4803 * @returns @c true if it is, @c false if not.
4804 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4805 */
4806#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
4807
4808/**
4809 * Check if we're currently executing in a 16-bit code segment.
4810 *
4811 * @returns @c true if it is, @c false if not.
4812 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4813 */
4814#define IEM_IS_16BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_16BIT)
4815
4816/**
4817 * Check if we're currently executing in a 32-bit code segment.
4818 *
4819 * @returns @c true if it is, @c false if not.
4820 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4821 */
4822#define IEM_IS_32BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_32BIT)
4823
4824/**
4825 * Check if we're currently executing in a 64-bit code segment.
4826 *
4827 * @returns @c true if it is, @c false if not.
4828 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4829 */
4830#define IEM_IS_64BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_64BIT)
4831
4832/**
4833 * Check if we're currently executing in real mode.
4834 *
4835 * @returns @c true if it is, @c false if not.
4836 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4837 */
4838#define IEM_IS_REAL_MODE(a_pVCpu) (!((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_PROT_MASK))
4839
4840/**
4841 * Gets the current protection level (CPL).
4842 *
4843 * @returns 0..3
4844 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4845 */
4846#define IEM_GET_CPL(a_pVCpu) (((a_pVCpu)->iem.s.fExec >> IEM_F_X86_CPL_SHIFT) & IEM_F_X86_CPL_SMASK)
4847
4848/**
4849 * Sets the current protection level (CPL).
4850 *
4851 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4852 */
4853#define IEM_SET_CPL(a_pVCpu, a_uCpl) \
4854 do { (a_pVCpu)->iem.s.fExec = ((a_pVCpu)->iem.s.fExec & ~IEM_F_X86_CPL_MASK) | ((a_uCpl) << IEM_F_X86_CPL_SHIFT); } while (0)
4855
4856/**
4857 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
4858 * @returns PCCPUMFEATURES
4859 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4860 */
4861#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
4862
4863/**
4864 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
4865 * @returns PCCPUMFEATURES
4866 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4867 */
4868#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&g_CpumHostFeatures.s)
4869
4870/**
4871 * Evaluates to true if we're presenting an Intel CPU to the guest.
4872 */
4873#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
4874
4875/**
4876 * Evaluates to true if we're presenting an AMD CPU to the guest.
4877 */
4878#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
4879
4880/**
4881 * Check if the address is canonical.
4882 */
4883#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
4884
4885/** Checks if the ModR/M byte is in register mode or not. */
4886#define IEM_IS_MODRM_REG_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT) )
4887/** Checks if the ModR/M byte is in memory mode or not. */
4888#define IEM_IS_MODRM_MEM_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT) )
4889
4890/**
4891 * Gets the register (reg) part of a ModR/M encoding, with REX.R added in.
4892 *
4893 * For use during decoding.
4894 */
4895#define IEM_GET_MODRM_REG(a_pVCpu, a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | (a_pVCpu)->iem.s.uRexReg )
4896/**
4897 * Gets the r/m part of a ModR/M encoding as a register index, with REX.B added in.
4898 *
4899 * For use during decoding.
4900 */
4901#define IEM_GET_MODRM_RM(a_pVCpu, a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) | (a_pVCpu)->iem.s.uRexB )
4902
4903/**
4904 * Gets the register (reg) part of a ModR/M encoding, without REX.R.
4905 *
4906 * For use during decoding.
4907 */
4908#define IEM_GET_MODRM_REG_8(a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) )
4909/**
4910 * Gets the r/m part of a ModR/M encoding as a register index, without REX.B.
4911 *
4912 * For use during decoding.
4913 */
4914#define IEM_GET_MODRM_RM_8(a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) )
4915
4916/**
4917 * Gets the register (reg) part of a ModR/M encoding as an extended 8-bit
4918 * register index, with REX.R added in.
4919 *
4920 * For use during decoding.
4921 *
4922 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
4923 */
4924#define IEM_GET_MODRM_REG_EX8(a_pVCpu, a_bRm) \
4925 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
4926 || !((a_bRm) & (4 << X86_MODRM_REG_SHIFT)) /* IEM_GET_MODRM_REG(pVCpu, a_bRm) < 4 */ \
4927 ? IEM_GET_MODRM_REG(pVCpu, a_bRm) : (((a_bRm) >> X86_MODRM_REG_SHIFT) & 3) | 16)
4928/**
4929 * Gets the r/m part of a ModR/M encoding as an extended 8-bit register index,
4930 * with REX.B added in.
4931 *
4932 * For use during decoding.
4933 *
4934 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
4935 */
4936#define IEM_GET_MODRM_RM_EX8(a_pVCpu, a_bRm) \
4937 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
4938 || !((a_bRm) & 4) /* IEM_GET_MODRM_RM(pVCpu, a_bRm) < 4 */ \
4939 ? IEM_GET_MODRM_RM(pVCpu, a_bRm) : ((a_bRm) & 3) | 16)
4940
4941/**
4942 * Combines the prefix REX and ModR/M byte for passing to
4943 * iemOpHlpCalcRmEffAddrThreadedAddr64().
4944 *
4945 * @returns The ModRM byte but with bit 3 set to REX.B and bit 4 to REX.X.
4946 * The two bits are part of the REG sub-field, which isn't needed in
4947 * iemOpHlpCalcRmEffAddrThreadedAddr64().
4948 *
4949 * For use during decoding/recompiling.
4950 */
4951#define IEM_GET_MODRM_EX(a_pVCpu, a_bRm) \
4952 ( ((a_bRm) & ~X86_MODRM_REG_MASK) \
4953 | (uint8_t)( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X)) >> (25 - 3) ) )
4954AssertCompile(IEM_OP_PRF_REX_B == RT_BIT_32(25));
4955AssertCompile(IEM_OP_PRF_REX_X == RT_BIT_32(26));
4956
4957/**
4958 * Gets the effective VEX.VVVV value.
4959 *
4960 * The 4th bit is ignored if not 64-bit code.
4961 * @returns effective V-register value.
4962 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4963 */
4964#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
4965 (IEM_IS_64BIT_CODE(a_pVCpu) ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
4966
4967
4968/**
4969 * Checks if we're executing inside an AMD-V or VT-x guest.
4970 */
4971#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
4972# define IEM_IS_IN_GUEST(a_pVCpu) RT_BOOL((a_pVCpu)->iem.s.fExec & IEM_F_X86_CTX_IN_GUEST)
4973#else
4974# define IEM_IS_IN_GUEST(a_pVCpu) false
4975#endif
4976
4977
4978#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4979
4980/**
4981 * Check if the guest has entered VMX root operation.
4982 */
4983# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
4984
4985/**
4986 * Check if the guest has entered VMX non-root operation.
4987 */
4988# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) ( ((a_pVCpu)->iem.s.fExec & (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST)) \
4989 == (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST) )
4990
4991/**
4992 * Check if the nested-guest has the given Pin-based VM-execution control set.
4993 */
4994# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
4995
4996/**
4997 * Check if the nested-guest has the given Processor-based VM-execution control set.
4998 */
4999# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
5000
5001/**
5002 * Check if the nested-guest has the given Secondary Processor-based VM-execution
5003 * control set.
5004 */
5005# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
5006
5007/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
5008# define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
5009
5010/** Whether a shadow VMCS is present for the given VCPU. */
5011# define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
5012
5013/** Gets the VMXON region pointer. */
5014# define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
5015
5016/** Gets the guest-physical address of the current VMCS for the given VCPU. */
5017# define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
5018
5019/** Whether a current VMCS is present for the given VCPU. */
5020# define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
5021
5022/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
5023# define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
5024 do \
5025 { \
5026 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
5027 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
5028 } while (0)
5029
5030/** Clears any current VMCS for the given VCPU. */
5031# define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
5032 do \
5033 { \
5034 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
5035 } while (0)
5036
5037/**
5038 * Invokes the VMX VM-exit handler for an instruction intercept.
5039 */
5040# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
5041 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
5042
5043/**
5044 * Invokes the VMX VM-exit handler for an instruction intercept where the
5045 * instruction provides additional VM-exit information.
5046 */
5047# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
5048 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
5049
5050/**
5051 * Invokes the VMX VM-exit handler for a task switch.
5052 */
5053# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
5054 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
5055
5056/**
5057 * Invokes the VMX VM-exit handler for MWAIT.
5058 */
5059# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
5060 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
5061
5062/**
5063 * Invokes the VMX VM-exit handler for EPT faults.
5064 */
5065# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
5066 do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
5067
5068/**
5069 * Invokes the VMX VM-exit handler.
5070 */
5071# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
5072 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
5073
5074#else
5075# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
5076# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
5077# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
5078# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
5079# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
5080# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5081# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5082# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5083# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5084# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5085# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
5086
5087#endif
5088
5089#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5090/**
5091 * Checks if we're executing a guest using AMD-V.
5092 */
5093# define IEM_SVM_IS_IN_GUEST(a_pVCpu) ( (a_pVCpu->iem.s.fExec & (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST)) \
5094 == (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST))
5095/**
5096 * Check if an SVM control/instruction intercept is set.
5097 */
5098# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
5099 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
5100
5101/**
5102 * Check if an SVM read CRx intercept is set.
5103 */
5104# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
5105 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
5106
5107/**
5108 * Check if an SVM write CRx intercept is set.
5109 */
5110# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
5111 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
5112
5113/**
5114 * Check if an SVM read DRx intercept is set.
5115 */
5116# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
5117 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
5118
5119/**
5120 * Check if an SVM write DRx intercept is set.
5121 */
5122# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
5123 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
5124
5125/**
5126 * Check if an SVM exception intercept is set.
5127 */
5128# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
5129 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
5130
5131/**
5132 * Invokes the SVM \#VMEXIT handler for the nested-guest.
5133 */
5134# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
5135 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
5136
5137/**
5138 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
5139 * corresponding decode assist information.
5140 */
5141# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
5142 do \
5143 { \
5144 uint64_t uExitInfo1; \
5145 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
5146 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
5147 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
5148 else \
5149 uExitInfo1 = 0; \
5150 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
5151 } while (0)
5152
5153/** Check and handles SVM nested-guest instruction intercept and updates
5154 * NRIP if needed.
5155 */
5156# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
5157 do \
5158 { \
5159 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
5160 { \
5161 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
5162 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
5163 } \
5164 } while (0)
5165
5166/** Checks and handles SVM nested-guest CR0 read intercept. */
5167# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
5168 do \
5169 { \
5170 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
5171 { /* probably likely */ } \
5172 else \
5173 { \
5174 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
5175 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
5176 } \
5177 } while (0)
5178
5179/**
5180 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
5181 */
5182# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) \
5183 do { \
5184 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
5185 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_cbInstr)); \
5186 } while (0)
5187
5188#else
5189# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
5190# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
5191# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
5192# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
5193# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
5194# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
5195# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
5196# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
5197# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, \
5198 a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
5199# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
5200# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) do { } while (0)
5201
5202#endif
5203
5204/** @} */
5205
5206uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu);
5207VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu);
5208
5209
5210/**
5211 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
5212 */
5213typedef union IEMSELDESC
5214{
5215 /** The legacy view. */
5216 X86DESC Legacy;
5217 /** The long mode view. */
5218 X86DESC64 Long;
5219} IEMSELDESC;
5220/** Pointer to a selector descriptor table entry. */
5221typedef IEMSELDESC *PIEMSELDESC;
5222
5223/** @name Raising Exceptions.
5224 * @{ */
5225VBOXSTRICTRC iemTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, uint32_t uNextEip, uint32_t fFlags,
5226 uint16_t uErr, uint64_t uCr2, RTSEL SelTSS, PIEMSELDESC pNewDescTSS) RT_NOEXCEPT;
5227
5228VBOXSTRICTRC iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
5229 uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
5230#ifdef IEM_WITH_SETJMP
5231DECL_NO_RETURN(void) iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
5232 uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
5233#endif
5234VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
5235#ifdef IEM_WITH_SETJMP
5236DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5237#endif
5238VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
5239VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT;
5240VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
5241#ifdef IEM_WITH_SETJMP
5242DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5243#endif
5244VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT;
5245#ifdef IEM_WITH_SETJMP
5246DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5247#endif
5248VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5249VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT;
5250VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
5251VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5252/*VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;*/
5253VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5254VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5255VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5256VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5257VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5258VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
5259#ifdef IEM_WITH_SETJMP
5260DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5261#endif
5262VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
5263VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT;
5264VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
5265#ifdef IEM_WITH_SETJMP
5266DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
5267#endif
5268VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
5269#ifdef IEM_WITH_SETJMP
5270DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP;
5271#endif
5272VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
5273#ifdef IEM_WITH_SETJMP
5274DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
5275#endif
5276VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
5277#ifdef IEM_WITH_SETJMP
5278DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
5279#endif
5280VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
5281#ifdef IEM_WITH_SETJMP
5282DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5283#endif
5284VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
5285#ifdef IEM_WITH_SETJMP
5286DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5287#endif
5288VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
5289#ifdef IEM_WITH_SETJMP
5290DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5291#endif
5292
5293void iemLogSyscallRealModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
5294void iemLogSyscallProtModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
5295
5296IEM_CIMPL_DEF_0(iemCImplRaiseDivideError);
5297IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix);
5298IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode);
5299
5300/**
5301 * Macro for calling iemCImplRaiseDivideError().
5302 *
5303 * This is for things that will _always_ decode to an \#DE, taking the
5304 * recompiler into consideration and everything.
5305 *
5306 * @return Strict VBox status code.
5307 */
5308#define IEMOP_RAISE_DIVIDE_ERROR_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseDivideError)
5309
5310/**
5311 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5312 *
5313 * This is for things that will _always_ decode to an \#UD, taking the
5314 * recompiler into consideration and everything.
5315 *
5316 * @return Strict VBox status code.
5317 */
5318#define IEMOP_RAISE_INVALID_LOCK_PREFIX_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidLockPrefix)
5319
5320/**
5321 * Macro for calling iemCImplRaiseInvalidOpcode() for decode/static \#UDs.
5322 *
5323 * This is for things that will _always_ decode to an \#UD, taking the
5324 * recompiler into consideration and everything.
5325 *
5326 * @return Strict VBox status code.
5327 */
5328#define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidOpcode)
5329
5330/**
5331 * Macro for calling iemCImplRaiseInvalidOpcode() for runtime-style \#UDs.
5332 *
5333 * Using this macro means you've got _buggy_ _code_ and are doing things that
5334 * belongs exclusively in IEMAllCImpl.cpp during decoding.
5335 *
5336 * @return Strict VBox status code.
5337 * @see IEMOP_RAISE_INVALID_OPCODE_RET
5338 */
5339#define IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidOpcode)
5340
5341/** @} */
5342
5343/** @name Register Access.
5344 * @{ */
5345VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
5346 IEMMODE enmEffOpSize) RT_NOEXCEPT;
5347VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT;
5348VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5349 IEMMODE enmEffOpSize) RT_NOEXCEPT;
5350/** @} */
5351
5352/** @name FPU access and helpers.
5353 * @{ */
5354void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
5355void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5356void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
5357void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5358void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5359void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5360 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5361void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5362 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5363void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5364void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5365void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5366void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5367void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5368void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5369void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5370void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5371void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5372void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5373void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5374void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5375void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5376void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5377void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5378/** @} */
5379
5380/** @name SSE+AVX SIMD access and helpers.
5381 * @{ */
5382void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT;
5383void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT;
5384/** @} */
5385
5386/** @name Memory access.
5387 * @{ */
5388
5389/** Report a \#GP instead of \#AC and do not restrict to ring-3 */
5390#define IEM_MEMMAP_F_ALIGN_GP RT_BIT_32(16)
5391/** SSE access that should report a \#GP instead of \#AC, unless MXCSR.MM=1
5392 * when it works like normal \#AC. Always used with IEM_MEMMAP_F_ALIGN_GP. */
5393#define IEM_MEMMAP_F_ALIGN_SSE RT_BIT_32(17)
5394/** If \#AC is applicable, raise it. Always used with IEM_MEMMAP_F_ALIGN_GP.
5395 * Users include FXSAVE & FXRSTOR. */
5396#define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18)
5397
5398VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5399 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
5400VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5401#ifndef IN_RING3
5402VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5403#endif
5404void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5405void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
5406VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
5407VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5408VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT;
5409
5410void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr);
5411void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr);
5412#ifdef IEM_WITH_CODE_TLB
5413void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP;
5414#else
5415VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT;
5416#endif
5417#ifdef IEM_WITH_SETJMP
5418uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5419uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5420uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5421uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5422#else
5423VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT;
5424VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5425VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5426VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5427VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5428VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5429VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5430VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5431VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5432VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5433VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5434#endif
5435
5436VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5437VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5438VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5439VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5440VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5441VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5442VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5443VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5444VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5445VBOXSTRICTRC iemMemFetchDataU128NoAc(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5446VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5447VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5448VBOXSTRICTRC iemMemFetchDataU256NoAc(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5449VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5450VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
5451 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT;
5452#ifdef IEM_WITH_SETJMP
5453uint8_t iemMemFetchDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5454uint16_t iemMemFetchDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5455uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5456uint32_t iemMemFlatFetchDataU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5457uint64_t iemMemFetchDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5458uint64_t iemMemFetchDataU64AlignedU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5459void iemMemFetchDataR80SafeJmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5460void iemMemFetchDataD80SafeJmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5461void iemMemFetchDataU128SafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5462void iemMemFetchDataU128NoAcSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5463void iemMemFetchDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5464void iemMemFetchDataU256SafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5465void iemMemFetchDataU256NoAcSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5466void iemMemFetchDataU256AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5467# if 0 /* these are inlined now */
5468uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5469uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5470uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5471uint32_t iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5472uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5473uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5474void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5475void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5476void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5477void iemMemFetchDataU128NoAcJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5478# endif
5479void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5480void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5481void iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5482void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5483#endif
5484
5485VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5486VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5487VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5488VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5489VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT;
5490
5491VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT;
5492VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT;
5493VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT;
5494VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT;
5495VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5496VBOXSTRICTRC iemMemStoreDataU128NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5497VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5498VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5499VBOXSTRICTRC iemMemStoreDataU256NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5500VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5501VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5502#ifdef IEM_WITH_SETJMP
5503void iemMemStoreDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5504void iemMemStoreDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5505void iemMemStoreDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5506void iemMemStoreDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5507void iemMemStoreDataU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5508void iemMemStoreDataU128NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5509void iemMemStoreDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5510void iemMemStoreDataU256SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5511void iemMemStoreDataU256NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5512void iemMemStoreDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5513void iemMemStoreDataR80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTFLOAT80U pr80Value) IEM_NOEXCEPT_MAY_LONGJMP;
5514void iemMemStoreDataD80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTPBCD80U pd80Value) IEM_NOEXCEPT_MAY_LONGJMP;
5515#if 0
5516void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5517void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5518void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5519void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5520void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5521void iemMemStoreDataNoAcU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5522#endif
5523void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5524void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5525void iemMemStoreDataU256NoAcJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5526void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5527#endif
5528
5529#ifdef IEM_WITH_SETJMP
5530uint8_t *iemMemMapDataU8RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5531uint8_t *iemMemMapDataU8AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5532uint8_t *iemMemMapDataU8WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5533uint8_t const *iemMemMapDataU8RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5534uint16_t *iemMemMapDataU16RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5535uint16_t *iemMemMapDataU16AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5536uint16_t *iemMemMapDataU16WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5537uint16_t const *iemMemMapDataU16RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5538uint32_t *iemMemMapDataU32RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5539uint32_t *iemMemMapDataU32AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5540uint32_t *iemMemMapDataU32WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5541uint32_t const *iemMemMapDataU32RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5542uint64_t *iemMemMapDataU64RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5543uint64_t *iemMemMapDataU64AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5544uint64_t *iemMemMapDataU64WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5545uint64_t const *iemMemMapDataU64RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5546PRTFLOAT80U iemMemMapDataR80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5547PRTFLOAT80U iemMemMapDataR80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5548PCRTFLOAT80U iemMemMapDataR80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5549PRTPBCD80U iemMemMapDataD80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5550PRTPBCD80U iemMemMapDataD80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5551PCRTPBCD80U iemMemMapDataD80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5552PRTUINT128U iemMemMapDataU128RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5553PRTUINT128U iemMemMapDataU128AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5554PRTUINT128U iemMemMapDataU128WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5555PCRTUINT128U iemMemMapDataU128RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5556
5557void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5558void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5559void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5560void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5561void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5562void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5563#endif
5564
5565VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5566 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
5567VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT;
5568VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
5569VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5570VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT;
5571VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5572VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5573VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5574VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5575VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5576 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
5577VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
5578 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT;
5579VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5580VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT;
5581VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT;
5582VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT;
5583VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5584VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5585VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5586
5587#ifdef IEM_WITH_SETJMP
5588void iemMemStackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5589void iemMemStackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5590void iemMemStackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5591void iemMemStackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5592void iemMemStackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5593void iemMemStackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5594void iemMemStackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5595
5596void iemMemFlat32StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5597void iemMemFlat32StackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5598void iemMemFlat32StackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5599void iemMemFlat32StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5600void iemMemFlat32StackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5601
5602void iemMemFlat64StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5603void iemMemFlat64StackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5604void iemMemFlat64StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5605void iemMemFlat64StackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5606
5607void iemMemStoreStackU16SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5608void iemMemStoreStackU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5609void iemMemStoreStackU32SRegSafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5610void iemMemStoreStackU64SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5611
5612uint16_t iemMemFetchStackU16SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5613uint32_t iemMemFetchStackU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5614uint64_t iemMemFetchStackU64SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5615
5616#endif
5617
5618/** @} */
5619
5620/** @name IEMAllCImpl.cpp
5621 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/'
5622 * @{ */
5623IEM_CIMPL_PROTO_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5624IEM_CIMPL_PROTO_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5625IEM_CIMPL_PROTO_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5626IEM_CIMPL_PROTO_0(iemCImpl_popa_16);
5627IEM_CIMPL_PROTO_0(iemCImpl_popa_32);
5628IEM_CIMPL_PROTO_0(iemCImpl_pusha_16);
5629IEM_CIMPL_PROTO_0(iemCImpl_pusha_32);
5630IEM_CIMPL_PROTO_1(iemCImpl_pushf, IEMMODE, enmEffOpSize);
5631IEM_CIMPL_PROTO_1(iemCImpl_popf, IEMMODE, enmEffOpSize);
5632IEM_CIMPL_PROTO_1(iemCImpl_call_16, uint16_t, uNewPC);
5633IEM_CIMPL_PROTO_1(iemCImpl_call_rel_16, int16_t, offDisp);
5634IEM_CIMPL_PROTO_1(iemCImpl_call_32, uint32_t, uNewPC);
5635IEM_CIMPL_PROTO_1(iemCImpl_call_rel_32, int32_t, offDisp);
5636IEM_CIMPL_PROTO_1(iemCImpl_call_64, uint64_t, uNewPC);
5637IEM_CIMPL_PROTO_1(iemCImpl_call_rel_64, int64_t, offDisp);
5638IEM_CIMPL_PROTO_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5639IEM_CIMPL_PROTO_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5640typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5641typedef FNIEMCIMPLFARBRANCH *PFNIEMCIMPLFARBRANCH;
5642IEM_CIMPL_PROTO_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop);
5643IEM_CIMPL_PROTO_0(iemCImpl_retn_16);
5644IEM_CIMPL_PROTO_0(iemCImpl_retn_32);
5645IEM_CIMPL_PROTO_0(iemCImpl_retn_64);
5646IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_16, uint16_t, cbPop);
5647IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_32, uint16_t, cbPop);
5648IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_64, uint16_t, cbPop);
5649IEM_CIMPL_PROTO_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters);
5650IEM_CIMPL_PROTO_1(iemCImpl_leave, IEMMODE, enmEffOpSize);
5651IEM_CIMPL_PROTO_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt);
5652IEM_CIMPL_PROTO_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize);
5653IEM_CIMPL_PROTO_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp);
5654IEM_CIMPL_PROTO_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize);
5655IEM_CIMPL_PROTO_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize);
5656IEM_CIMPL_PROTO_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize);
5657IEM_CIMPL_PROTO_1(iemCImpl_iret, IEMMODE, enmEffOpSize);
5658IEM_CIMPL_PROTO_0(iemCImpl_loadall286);
5659IEM_CIMPL_PROTO_0(iemCImpl_syscall);
5660IEM_CIMPL_PROTO_1(iemCImpl_sysret, IEMMODE, enmEffOpSize);
5661IEM_CIMPL_PROTO_0(iemCImpl_sysenter);
5662IEM_CIMPL_PROTO_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize);
5663IEM_CIMPL_PROTO_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel);
5664IEM_CIMPL_PROTO_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel);
5665IEM_CIMPL_PROTO_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize);
5666IEM_CIMPL_PROTO_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize);
5667IEM_CIMPL_PROTO_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite);
5668IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar);
5669IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar);
5670IEM_CIMPL_PROTO_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5671IEM_CIMPL_PROTO_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5672IEM_CIMPL_PROTO_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5673IEM_CIMPL_PROTO_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5674IEM_CIMPL_PROTO_1(iemCImpl_lldt, uint16_t, uNewLdt);
5675IEM_CIMPL_PROTO_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5676IEM_CIMPL_PROTO_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5677IEM_CIMPL_PROTO_1(iemCImpl_ltr, uint16_t, uNewTr);
5678IEM_CIMPL_PROTO_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5679IEM_CIMPL_PROTO_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5680IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg);
5681IEM_CIMPL_PROTO_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5682IEM_CIMPL_PROTO_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5683IEM_CIMPL_PROTO_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg);
5684IEM_CIMPL_PROTO_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg);
5685IEM_CIMPL_PROTO_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst);
5686IEM_CIMPL_PROTO_0(iemCImpl_clts);
5687IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg);
5688IEM_CIMPL_PROTO_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg);
5689IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg);
5690IEM_CIMPL_PROTO_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg);
5691IEM_CIMPL_PROTO_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage);
5692IEM_CIMPL_PROTO_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType);
5693IEM_CIMPL_PROTO_0(iemCImpl_invd);
5694IEM_CIMPL_PROTO_0(iemCImpl_wbinvd);
5695IEM_CIMPL_PROTO_0(iemCImpl_rsm);
5696IEM_CIMPL_PROTO_0(iemCImpl_rdtsc);
5697IEM_CIMPL_PROTO_0(iemCImpl_rdtscp);
5698IEM_CIMPL_PROTO_0(iemCImpl_rdpmc);
5699IEM_CIMPL_PROTO_0(iemCImpl_rdmsr);
5700IEM_CIMPL_PROTO_0(iemCImpl_wrmsr);
5701IEM_CIMPL_PROTO_3(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5702IEM_CIMPL_PROTO_2(iemCImpl_in_eAX_DX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5703IEM_CIMPL_PROTO_3(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5704IEM_CIMPL_PROTO_2(iemCImpl_out_DX_eAX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5705IEM_CIMPL_PROTO_0(iemCImpl_cli);
5706IEM_CIMPL_PROTO_0(iemCImpl_sti);
5707IEM_CIMPL_PROTO_0(iemCImpl_hlt);
5708IEM_CIMPL_PROTO_1(iemCImpl_monitor, uint8_t, iEffSeg);
5709IEM_CIMPL_PROTO_0(iemCImpl_mwait);
5710IEM_CIMPL_PROTO_0(iemCImpl_swapgs);
5711IEM_CIMPL_PROTO_0(iemCImpl_cpuid);
5712IEM_CIMPL_PROTO_1(iemCImpl_aad, uint8_t, bImm);
5713IEM_CIMPL_PROTO_1(iemCImpl_aam, uint8_t, bImm);
5714IEM_CIMPL_PROTO_0(iemCImpl_daa);
5715IEM_CIMPL_PROTO_0(iemCImpl_das);
5716IEM_CIMPL_PROTO_0(iemCImpl_aaa);
5717IEM_CIMPL_PROTO_0(iemCImpl_aas);
5718IEM_CIMPL_PROTO_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound);
5719IEM_CIMPL_PROTO_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound);
5720IEM_CIMPL_PROTO_0(iemCImpl_xgetbv);
5721IEM_CIMPL_PROTO_0(iemCImpl_xsetbv);
5722IEM_CIMPL_PROTO_5(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
5723 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags, uint8_t, bUnmapInfo);
5724IEM_CIMPL_PROTO_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5725IEM_CIMPL_PROTO_1(iemCImpl_finit, bool, fCheckXcpts);
5726IEM_CIMPL_PROTO_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5727IEM_CIMPL_PROTO_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5728IEM_CIMPL_PROTO_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5729IEM_CIMPL_PROTO_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5730IEM_CIMPL_PROTO_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5731IEM_CIMPL_PROTO_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5732IEM_CIMPL_PROTO_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5733IEM_CIMPL_PROTO_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5734IEM_CIMPL_PROTO_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5735IEM_CIMPL_PROTO_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5736IEM_CIMPL_PROTO_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5737IEM_CIMPL_PROTO_1(iemCImpl_fldcw, uint16_t, u16Fcw);
5738IEM_CIMPL_PROTO_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode);
5739IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode);
5740IEM_CIMPL_PROTO_2(iemCImpl_rdseed, uint8_t, iReg, IEMMODE, enmEffOpSize);
5741IEM_CIMPL_PROTO_2(iemCImpl_rdrand, uint8_t, iReg, IEMMODE, enmEffOpSize);
5742/** @} */
5743
5744/** @name IEMAllCImplStrInstr.cpp.h
5745 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/' -e 's/RT_CONCAT4(//' \
5746 * -e 's/,ADDR_SIZE)/64/g' -e 's/,OP_SIZE,/64/g' -e 's/,OP_rAX,/rax/g' IEMAllCImplStrInstr.cpp.h
5747 * @{ */
5748IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr16, uint8_t, iEffSeg);
5749IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr16, uint8_t, iEffSeg);
5750IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m16);
5751IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m16);
5752IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr16, uint8_t, iEffSeg);
5753IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m16);
5754IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m16, int8_t, iEffSeg);
5755IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr16, bool, fIoChecked);
5756IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr16, bool, fIoChecked);
5757IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5758IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5759
5760IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr16, uint8_t, iEffSeg);
5761IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr16, uint8_t, iEffSeg);
5762IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m16);
5763IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m16);
5764IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr16, uint8_t, iEffSeg);
5765IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m16);
5766IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m16, int8_t, iEffSeg);
5767IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr16, bool, fIoChecked);
5768IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr16, bool, fIoChecked);
5769IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5770IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5771
5772IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr16, uint8_t, iEffSeg);
5773IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr16, uint8_t, iEffSeg);
5774IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m16);
5775IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m16);
5776IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr16, uint8_t, iEffSeg);
5777IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m16);
5778IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m16, int8_t, iEffSeg);
5779IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr16, bool, fIoChecked);
5780IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr16, bool, fIoChecked);
5781IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5782IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5783
5784
5785IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr32, uint8_t, iEffSeg);
5786IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr32, uint8_t, iEffSeg);
5787IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m32);
5788IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m32);
5789IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr32, uint8_t, iEffSeg);
5790IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m32);
5791IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m32, int8_t, iEffSeg);
5792IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr32, bool, fIoChecked);
5793IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr32, bool, fIoChecked);
5794IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5795IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5796
5797IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr32, uint8_t, iEffSeg);
5798IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr32, uint8_t, iEffSeg);
5799IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m32);
5800IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m32);
5801IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr32, uint8_t, iEffSeg);
5802IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m32);
5803IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m32, int8_t, iEffSeg);
5804IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr32, bool, fIoChecked);
5805IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr32, bool, fIoChecked);
5806IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5807IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5808
5809IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr32, uint8_t, iEffSeg);
5810IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr32, uint8_t, iEffSeg);
5811IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m32);
5812IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m32);
5813IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr32, uint8_t, iEffSeg);
5814IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m32);
5815IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m32, int8_t, iEffSeg);
5816IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr32, bool, fIoChecked);
5817IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr32, bool, fIoChecked);
5818IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5819IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5820
5821IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr32, uint8_t, iEffSeg);
5822IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr32, uint8_t, iEffSeg);
5823IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m32);
5824IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m32);
5825IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr32, uint8_t, iEffSeg);
5826IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m32);
5827IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m32, int8_t, iEffSeg);
5828IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr32, bool, fIoChecked);
5829IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr32, bool, fIoChecked);
5830IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5831IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5832
5833
5834IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr64, uint8_t, iEffSeg);
5835IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr64, uint8_t, iEffSeg);
5836IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m64);
5837IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m64);
5838IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr64, uint8_t, iEffSeg);
5839IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m64);
5840IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m64, int8_t, iEffSeg);
5841IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr64, bool, fIoChecked);
5842IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr64, bool, fIoChecked);
5843IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5844IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5845
5846IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr64, uint8_t, iEffSeg);
5847IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr64, uint8_t, iEffSeg);
5848IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m64);
5849IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m64);
5850IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr64, uint8_t, iEffSeg);
5851IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m64);
5852IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m64, int8_t, iEffSeg);
5853IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr64, bool, fIoChecked);
5854IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr64, bool, fIoChecked);
5855IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5856IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5857
5858IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr64, uint8_t, iEffSeg);
5859IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr64, uint8_t, iEffSeg);
5860IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m64);
5861IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m64);
5862IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr64, uint8_t, iEffSeg);
5863IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m64);
5864IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m64, int8_t, iEffSeg);
5865IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr64, bool, fIoChecked);
5866IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr64, bool, fIoChecked);
5867IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5868IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5869
5870IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr64, uint8_t, iEffSeg);
5871IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr64, uint8_t, iEffSeg);
5872IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m64);
5873IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m64);
5874IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr64, uint8_t, iEffSeg);
5875IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m64);
5876IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m64, int8_t, iEffSeg);
5877IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr64, bool, fIoChecked);
5878IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr64, bool, fIoChecked);
5879IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5880IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5881/** @} */
5882
5883#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5884VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual) RT_NOEXCEPT;
5885VBOXSTRICTRC iemVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr) RT_NOEXCEPT;
5886VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr) RT_NOEXCEPT;
5887VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr) RT_NOEXCEPT;
5888VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr) RT_NOEXCEPT;
5889VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
5890VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT;
5891VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu) RT_NOEXCEPT;
5892VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPUCC pVCpu, bool fMonitorHwArmed, uint8_t cbInstr) RT_NOEXCEPT;
5893VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port,
5894 bool fImm, uint8_t cbAccess, uint8_t cbInstr) RT_NOEXCEPT;
5895VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess,
5896 bool fRep, VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr) RT_NOEXCEPT;
5897VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5898VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5899VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5900VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPUCC pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5901VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5902VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPUCC pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5903VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT;
5904VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPUCC pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw,
5905 RTGCPTR GCPtrEffDst, uint8_t cbInstr) RT_NOEXCEPT;
5906VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr) RT_NOEXCEPT;
5907VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPUCC pVCpu) RT_NOEXCEPT;
5908VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess, size_t cbAccess, uint32_t fAccess) RT_NOEXCEPT;
5909uint32_t iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg) RT_NOEXCEPT;
5910void iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg) RT_NOEXCEPT;
5911VBOXSTRICTRC iemVmxInvvpid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
5912 uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT;
5913bool iemVmxIsRdmsrWrmsrInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr) RT_NOEXCEPT;
5914IEM_CIMPL_PROTO_0(iemCImpl_vmxoff);
5915IEM_CIMPL_PROTO_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon);
5916IEM_CIMPL_PROTO_0(iemCImpl_vmlaunch);
5917IEM_CIMPL_PROTO_0(iemCImpl_vmresume);
5918IEM_CIMPL_PROTO_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5919IEM_CIMPL_PROTO_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5920IEM_CIMPL_PROTO_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5921IEM_CIMPL_PROTO_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64VmcsField);
5922IEM_CIMPL_PROTO_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64VmcsField);
5923IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg64, uint64_t *, pu64Dst, uint64_t, u64VmcsField);
5924IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg32, uint64_t *, pu32Dst, uint32_t, u32VmcsField);
5925IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64VmcsField);
5926IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32VmcsField);
5927IEM_CIMPL_PROTO_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType);
5928IEM_CIMPL_PROTO_3(iemCImpl_invept, uint8_t, iEffSeg, RTGCPTR, GCPtrInveptDesc, uint64_t, uInveptType);
5929IEM_CIMPL_PROTO_0(iemCImpl_vmx_pause);
5930#endif
5931
5932#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5933VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) RT_NOEXCEPT;
5934VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2) RT_NOEXCEPT;
5935VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPUCC pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
5936 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) RT_NOEXCEPT;
5937VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPUCC pVCpu, uint32_t idMsr, bool fWrite, uint8_t cbInstr) RT_NOEXCEPT;
5938IEM_CIMPL_PROTO_0(iemCImpl_vmrun);
5939IEM_CIMPL_PROTO_0(iemCImpl_vmload);
5940IEM_CIMPL_PROTO_0(iemCImpl_vmsave);
5941IEM_CIMPL_PROTO_0(iemCImpl_clgi);
5942IEM_CIMPL_PROTO_0(iemCImpl_stgi);
5943IEM_CIMPL_PROTO_0(iemCImpl_invlpga);
5944IEM_CIMPL_PROTO_0(iemCImpl_skinit);
5945IEM_CIMPL_PROTO_0(iemCImpl_svm_pause);
5946#endif
5947
5948IEM_CIMPL_PROTO_0(iemCImpl_vmcall); /* vmx */
5949IEM_CIMPL_PROTO_0(iemCImpl_vmmcall); /* svm */
5950IEM_CIMPL_PROTO_1(iemCImpl_Hypercall, uint16_t, uDisOpcode); /* both */
5951
5952extern const PFNIEMOP g_apfnIemInterpretOnlyOneByteMap[256];
5953extern const PFNIEMOP g_apfnIemInterpretOnlyTwoByteMap[1024];
5954extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f3a[1024];
5955extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f38[1024];
5956extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap1[1024];
5957extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap2[1024];
5958extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap3[1024];
5959
5960/*
5961 * Recompiler related stuff.
5962 */
5963extern const PFNIEMOP g_apfnIemThreadedRecompilerOneByteMap[256];
5964extern const PFNIEMOP g_apfnIemThreadedRecompilerTwoByteMap[1024];
5965extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f3a[1024];
5966extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f38[1024];
5967extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap1[1024];
5968extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap2[1024];
5969extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap3[1024];
5970
5971DECLCALLBACK(int) iemTbInit(PVMCC pVM, uint32_t cInitialTbs, uint32_t cMaxTbs,
5972 uint64_t cbInitialExec, uint64_t cbMaxExec, uint32_t cbChunkExec);
5973void iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb, bool fSafeToFree);
5974void iemTbAllocatorProcessDelayedFrees(PVMCPUCC pVCpu, PIEMTBALLOCATOR pTbAllocator);
5975void iemTbAllocatorFreeupNativeSpace(PVMCPUCC pVCpu, uint32_t cNeededInstrs);
5976DECLEXPORT(const char *) iemTbFlagsToString(uint32_t fFlags, char *pszBuf, size_t cbBuf) RT_NOEXCEPT;
5977DECLHIDDEN(void) iemThreadedDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT;
5978
5979
5980/** @todo FNIEMTHREADEDFUNC and friends may need more work... */
5981#if defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
5982typedef VBOXSTRICTRC /*__attribute__((__nothrow__))*/ FNIEMTHREADEDFUNC(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
5983typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
5984# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
5985 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
5986# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
5987 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
5988
5989#else
5990typedef VBOXSTRICTRC (FNIEMTHREADEDFUNC)(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
5991typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
5992# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
5993 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
5994# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
5995 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
5996#endif
5997
5998
5999IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_Nop);
6000IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_LogCpuState);
6001
6002IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_DeferToCImpl0);
6003
6004IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckIrq);
6005IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckMode);
6006IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckHwInstrBps);
6007IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLim);
6008
6009IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes);
6010IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodes);
6011IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim);
6012
6013/* Branching: */
6014IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes);
6015IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodes);
6016IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim);
6017
6018IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb);
6019IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb);
6020IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim);
6021
6022/* Natural page crossing: */
6023IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb);
6024IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb);
6025IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim);
6026
6027IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb);
6028IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb);
6029IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim);
6030
6031IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb);
6032IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb);
6033IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim);
6034
6035bool iemThreadedCompileEmitIrqCheckBefore(PVMCPUCC pVCpu, PIEMTB pTb);
6036bool iemThreadedCompileBeginEmitCallsComplications(PVMCPUCC pVCpu, PIEMTB pTb);
6037
6038/* Native recompiler public bits: */
6039DECLHIDDEN(PIEMTB) iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT;
6040DECLHIDDEN(void) iemNativeDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT;
6041int iemExecMemAllocatorInit(PVMCPU pVCpu, uint64_t cbMax, uint64_t cbInitial, uint32_t cbChunk);
6042void iemExecMemAllocatorFree(PVMCPU pVCpu, void *pv, size_t cb);
6043DECLASM(DECL_NO_RETURN(void)) iemNativeTbLongJmp(void *pvFramePointer, int rc) RT_NOEXCEPT;
6044
6045
6046/** @} */
6047
6048RT_C_DECLS_END
6049
6050#endif /* !VMM_INCLUDED_SRC_include_IEMInternal_h */
6051
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette