VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInternal.h@ 104206

Last change on this file since 104206 was 104206, checked in by vboxsync, 8 months ago

VMM/IEM: Refactoring assembly helpers to not pass eflags by reference but instead by value and return the updated value (via eax/w0) - first chunk: IMUL(two ops), BSF, BSR, LZCNT, TZCNT, POPCNT. bugref:10376

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 315.6 KB
Line 
1/* $Id: IEMInternal.h 104206 2024-04-05 20:28:19Z vboxsync $ */
2/** @file
3 * IEM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInternal_h
29#define VMM_INCLUDED_SRC_include_IEMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/vmm/cpum.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/stam.h>
38#include <VBox/param.h>
39
40#include <iprt/setjmp-without-sigmask.h>
41#include <iprt/list.h>
42
43
44RT_C_DECLS_BEGIN
45
46
47/** @defgroup grp_iem_int Internals
48 * @ingroup grp_iem
49 * @internal
50 * @{
51 */
52
53/** For expanding symbol in slickedit and other products tagging and
54 * crossreferencing IEM symbols. */
55#ifndef IEM_STATIC
56# define IEM_STATIC static
57#endif
58
59/** @def IEM_WITH_SETJMP
60 * Enables alternative status code handling using setjmps.
61 *
62 * This adds a bit of expense via the setjmp() call since it saves all the
63 * non-volatile registers. However, it eliminates return code checks and allows
64 * for more optimal return value passing (return regs instead of stack buffer).
65 */
66#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
67# define IEM_WITH_SETJMP
68#endif
69
70/** @def IEM_WITH_THROW_CATCH
71 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
72 * mode code when IEM_WITH_SETJMP is in effect.
73 *
74 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
75 * setjmp/long resulted in bs2-test-1 running 3.00% faster and all but on test
76 * result value improving by more than 1%. (Best out of three.)
77 *
78 * With Visual C++ 2019 and code TLB on windows, using throw/catch instead of
79 * setjmp/long resulted in bs2-test-1 running 3.68% faster and all but some of
80 * the MMIO and CPUID tests ran noticeably faster. Variation is greater than on
81 * Linux, but it should be quite a bit faster for normal code.
82 */
83#if (defined(__cplusplus) && defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
84 || defined(DOXYGEN_RUNNING)
85# define IEM_WITH_THROW_CATCH
86#endif
87
88/** @def IEMNATIVE_WITH_DELAYED_PC_UPDATING
89 * Enables the delayed PC updating optimization (see @bugref{10373}).
90 */
91#if defined(DOXYGEN_RUNNING) || 1
92# define IEMNATIVE_WITH_DELAYED_PC_UPDATING
93#endif
94
95/** Enables the SIMD register allocator @bugref{10614}. */
96#if defined(DOXYGEN_RUNNING) || 1
97# define IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
98#endif
99/** Enables access to even callee saved registers. */
100//# define IEMNATIVE_WITH_SIMD_REG_ACCESS_ALL_REGISTERS
101
102#if defined(DOXYGEN_RUNNING) || 1
103/** @def IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
104 * Delay the writeback or dirty registers as long as possible. */
105# define IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
106#endif
107
108/** @def VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
109 * Enables a quicker alternative to throw/longjmp for IEM_DO_LONGJMP when
110 * executing native translation blocks.
111 *
112 * This exploits the fact that we save all non-volatile registers in the TB
113 * prologue and thus just need to do the same as the TB epilogue to get the
114 * effect of a longjmp/throw. Since MSC marks XMM6 thru XMM15 as
115 * non-volatile (and does something even more crazy for ARM), this probably
116 * won't work reliably on Windows. */
117#if defined(DOXYGEN_RUNNING) || (!defined(RT_OS_WINDOWS) && (defined(RT_ARCH_ARM64) /*|| defined(_RT_ARCH_AMD64)*/))
118# define VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
119#endif
120#ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
121# if !defined(IN_RING3) \
122 || !defined(VBOX_WITH_IEM_RECOMPILER) \
123 || !defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
124# undef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
125# elif defined(RT_OS_WINDOWS)
126# pragma message("VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is not safe to use on windows")
127# endif
128#endif
129
130
131/** @def IEM_DO_LONGJMP
132 *
133 * Wrapper around longjmp / throw.
134 *
135 * @param a_pVCpu The CPU handle.
136 * @param a_rc The status code jump back with / throw.
137 */
138#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
139# ifdef IEM_WITH_THROW_CATCH
140# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
141# define IEM_DO_LONGJMP(a_pVCpu, a_rc) do { \
142 if ((a_pVCpu)->iem.s.pvTbFramePointerR3) \
143 iemNativeTbLongJmp((a_pVCpu)->iem.s.pvTbFramePointerR3, (a_rc)); \
144 throw int(a_rc); \
145 } while (0)
146# else
147# define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
148# endif
149# else
150# define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
151# endif
152#endif
153
154/** For use with IEM function that may do a longjmp (when enabled).
155 *
156 * Visual C++ has trouble longjmp'ing from/over functions with the noexcept
157 * attribute. So, we indicate that function that may be part of a longjmp may
158 * throw "exceptions" and that the compiler should definitely not generate and
159 * std::terminate calling unwind code.
160 *
161 * Here is one example of this ending in std::terminate:
162 * @code{.txt}
16300 00000041`cadfda10 00007ffc`5d5a1f9f ucrtbase!abort+0x4e
16401 00000041`cadfda40 00007ffc`57af229a ucrtbase!terminate+0x1f
16502 00000041`cadfda70 00007ffb`eec91030 VCRUNTIME140!__std_terminate+0xa [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\ehhelpers.cpp @ 192]
16603 00000041`cadfdaa0 00007ffb`eec92c6d VCRUNTIME140_1!_CallSettingFrame+0x20 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\handlers.asm @ 50]
16704 00000041`cadfdad0 00007ffb`eec93ae5 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToState+0x241 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 1085]
16805 00000041`cadfdc00 00007ffb`eec92258 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToEmptyState+0x2d [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 218]
16906 00000041`cadfdc30 00007ffb`eec940e9 VCRUNTIME140_1!__InternalCxxFrameHandler<__FrameHandler4>+0x194 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 304]
17007 00000041`cadfdcd0 00007ffc`5f9f249f VCRUNTIME140_1!__CxxFrameHandler4+0xa9 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 290]
17108 00000041`cadfdd40 00007ffc`5f980939 ntdll!RtlpExecuteHandlerForUnwind+0xf
17209 00000041`cadfdd70 00007ffc`5f9a0edd ntdll!RtlUnwindEx+0x339
1730a 00000041`cadfe490 00007ffc`57aff976 ntdll!RtlUnwind+0xcd
1740b 00000041`cadfea00 00007ffb`e1b5de01 VCRUNTIME140!__longjmp_internal+0xe6 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\longjmp.asm @ 140]
1750c (Inline Function) --------`-------- VBoxVMM!iemOpcodeGetNextU8SlowJmp+0x95 [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 1155]
1760d 00000041`cadfea50 00007ffb`e1b60f6b VBoxVMM!iemOpcodeGetNextU8Jmp+0xc1 [L:\vbox-intern\src\VBox\VMM\include\IEMInline.h @ 402]
1770e 00000041`cadfea90 00007ffb`e1cc6201 VBoxVMM!IEMExecForExits+0xdb [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 10185]
1780f 00000041`cadfec70 00007ffb`e1d0df8d VBoxVMM!EMHistoryExec+0x4f1 [L:\vbox-intern\src\VBox\VMM\VMMAll\EMAll.cpp @ 452]
17910 00000041`cadfed60 00007ffb`e1d0d4c0 VBoxVMM!nemR3WinHandleExitCpuId+0x79d [L:\vbox-intern\src\VBox\VMM\VMMAll\NEMAllNativeTemplate-win.cpp.h @ 1829] @encode
180 @endcode
181 *
182 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
183 */
184#if defined(IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))
185# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false)
186#else
187# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT
188#endif
189
190#define IEM_IMPLEMENTS_TASKSWITCH
191
192/** @def IEM_WITH_3DNOW
193 * Includes the 3DNow decoding. */
194#if (!defined(IEM_WITH_3DNOW) && !defined(IEM_WITHOUT_3DNOW)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
195# define IEM_WITH_3DNOW
196#endif
197
198/** @def IEM_WITH_THREE_0F_38
199 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
200#if (!defined(IEM_WITH_THREE_0F_38) && !defined(IEM_WITHOUT_THREE_0F_38)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
201# define IEM_WITH_THREE_0F_38
202#endif
203
204/** @def IEM_WITH_THREE_0F_3A
205 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
206#if (!defined(IEM_WITH_THREE_0F_3A) && !defined(IEM_WITHOUT_THREE_0F_3A)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
207# define IEM_WITH_THREE_0F_3A
208#endif
209
210/** @def IEM_WITH_VEX
211 * Includes the VEX decoding. */
212#if (!defined(IEM_WITH_VEX) && !defined(IEM_WITHOUT_VEX)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
213# define IEM_WITH_VEX
214#endif
215
216/** @def IEM_CFG_TARGET_CPU
217 * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
218 *
219 * By default we allow this to be configured by the user via the
220 * CPUM/GuestCpuName config string, but this comes at a slight cost during
221 * decoding. So, for applications of this code where there is no need to
222 * be dynamic wrt target CPU, just modify this define.
223 */
224#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
225# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
226#endif
227
228//#define IEM_WITH_CODE_TLB // - work in progress
229//#define IEM_WITH_DATA_TLB // - work in progress
230
231
232/** @def IEM_USE_UNALIGNED_DATA_ACCESS
233 * Use unaligned accesses instead of elaborate byte assembly. */
234#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
235# define IEM_USE_UNALIGNED_DATA_ACCESS
236#endif
237
238//#define IEM_LOG_MEMORY_WRITES
239
240#if !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
241/** Instruction statistics. */
242typedef struct IEMINSTRSTATS
243{
244# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
245# include "IEMInstructionStatisticsTmpl.h"
246# undef IEM_DO_INSTR_STAT
247} IEMINSTRSTATS;
248#else
249struct IEMINSTRSTATS;
250typedef struct IEMINSTRSTATS IEMINSTRSTATS;
251#endif
252/** Pointer to IEM instruction statistics. */
253typedef IEMINSTRSTATS *PIEMINSTRSTATS;
254
255
256/** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour
257 * @{ */
258#define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 0 /**< Native x86 EFLAGS result; Intel EFLAGS when on non-x86 hosts. */
259#define IEMTARGETCPU_EFL_BEHAVIOR_INTEL 1 /**< Intel EFLAGS result. */
260#define IEMTARGETCPU_EFL_BEHAVIOR_AMD 2 /**< AMD EFLAGS result */
261#define IEMTARGETCPU_EFL_BEHAVIOR_RESERVED 3 /**< Reserved/dummy entry slot that's the same as 0. */
262#define IEMTARGETCPU_EFL_BEHAVIOR_MASK 3 /**< For masking the index before use. */
263/** Selects the right variant from a_aArray.
264 * pVCpu is implicit in the caller context. */
265#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \
266 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
267/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can
268 * be used because the host CPU does not support the operation. */
269#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \
270 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
271/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional
272 * array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index
273 * into the two.
274 * @sa IEM_SELECT_NATIVE_OR_FALLBACK */
275#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
276# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
277 (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
278#else
279# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
280 (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
281#endif
282/** @} */
283
284/**
285 * Picks @a a_pfnNative or @a a_pfnFallback according to the host CPU feature
286 * indicator given by @a a_fCpumFeatureMember (CPUMFEATURES member).
287 *
288 * On non-x86 hosts, this will shortcut to the fallback w/o checking the
289 * indicator.
290 *
291 * @sa IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX
292 */
293#if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
294# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) \
295 (g_CpumHostFeatures.s.a_fCpumFeatureMember ? a_pfnNative : a_pfnFallback)
296#else
297# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) (a_pfnFallback)
298#endif
299
300
301/**
302 * Extended operand mode that includes a representation of 8-bit.
303 *
304 * This is used for packing down modes when invoking some C instruction
305 * implementations.
306 */
307typedef enum IEMMODEX
308{
309 IEMMODEX_16BIT = IEMMODE_16BIT,
310 IEMMODEX_32BIT = IEMMODE_32BIT,
311 IEMMODEX_64BIT = IEMMODE_64BIT,
312 IEMMODEX_8BIT
313} IEMMODEX;
314AssertCompileSize(IEMMODEX, 4);
315
316
317/**
318 * Branch types.
319 */
320typedef enum IEMBRANCH
321{
322 IEMBRANCH_JUMP = 1,
323 IEMBRANCH_CALL,
324 IEMBRANCH_TRAP,
325 IEMBRANCH_SOFTWARE_INT,
326 IEMBRANCH_HARDWARE_INT
327} IEMBRANCH;
328AssertCompileSize(IEMBRANCH, 4);
329
330
331/**
332 * INT instruction types.
333 */
334typedef enum IEMINT
335{
336 /** INT n instruction (opcode 0xcd imm). */
337 IEMINT_INTN = 0,
338 /** Single byte INT3 instruction (opcode 0xcc). */
339 IEMINT_INT3 = IEM_XCPT_FLAGS_BP_INSTR,
340 /** Single byte INTO instruction (opcode 0xce). */
341 IEMINT_INTO = IEM_XCPT_FLAGS_OF_INSTR,
342 /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
343 IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
344} IEMINT;
345AssertCompileSize(IEMINT, 4);
346
347
348/**
349 * A FPU result.
350 */
351typedef struct IEMFPURESULT
352{
353 /** The output value. */
354 RTFLOAT80U r80Result;
355 /** The output status. */
356 uint16_t FSW;
357} IEMFPURESULT;
358AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
359/** Pointer to a FPU result. */
360typedef IEMFPURESULT *PIEMFPURESULT;
361/** Pointer to a const FPU result. */
362typedef IEMFPURESULT const *PCIEMFPURESULT;
363
364
365/**
366 * A FPU result consisting of two output values and FSW.
367 */
368typedef struct IEMFPURESULTTWO
369{
370 /** The first output value. */
371 RTFLOAT80U r80Result1;
372 /** The output status. */
373 uint16_t FSW;
374 /** The second output value. */
375 RTFLOAT80U r80Result2;
376} IEMFPURESULTTWO;
377AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
378AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
379/** Pointer to a FPU result consisting of two output values and FSW. */
380typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
381/** Pointer to a const FPU result consisting of two output values and FSW. */
382typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
383
384
385/**
386 * IEM TLB entry.
387 *
388 * Lookup assembly:
389 * @code{.asm}
390 ; Calculate tag.
391 mov rax, [VA]
392 shl rax, 16
393 shr rax, 16 + X86_PAGE_SHIFT
394 or rax, [uTlbRevision]
395
396 ; Do indexing.
397 movzx ecx, al
398 lea rcx, [pTlbEntries + rcx]
399
400 ; Check tag.
401 cmp [rcx + IEMTLBENTRY.uTag], rax
402 jne .TlbMiss
403
404 ; Check access.
405 mov rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
406 and rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
407 cmp rax, [uTlbPhysRev]
408 jne .TlbMiss
409
410 ; Calc address and we're done.
411 mov eax, X86_PAGE_OFFSET_MASK
412 and eax, [VA]
413 or rax, [rcx + IEMTLBENTRY.pMappingR3]
414 %ifdef VBOX_WITH_STATISTICS
415 inc qword [cTlbHits]
416 %endif
417 jmp .Done
418
419 .TlbMiss:
420 mov r8d, ACCESS_FLAGS
421 mov rdx, [VA]
422 mov rcx, [pVCpu]
423 call iemTlbTypeMiss
424 .Done:
425
426 @endcode
427 *
428 */
429typedef struct IEMTLBENTRY
430{
431 /** The TLB entry tag.
432 * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
433 * is ASSUMING a virtual address width of 48 bits.
434 *
435 * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
436 *
437 * The TLB lookup code uses the current TLB revision, which won't ever be zero,
438 * enabling an extremely cheap TLB invalidation most of the time. When the TLB
439 * revision wraps around though, the tags needs to be zeroed.
440 *
441 * @note Try use SHRD instruction? After seeing
442 * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
443 *
444 * @todo This will need to be reorganized for 57-bit wide virtual address and
445 * PCID (currently 12 bits) and ASID (currently 6 bits) support. We'll
446 * have to move the TLB entry versioning entirely to the
447 * fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
448 * 19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
449 * consumed by PCID and ASID (12 + 6 = 18).
450 */
451 uint64_t uTag;
452 /** Access flags and physical TLB revision.
453 *
454 * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
455 * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
456 * - Bit 2 - page tables - not user (complemented X86_PTE_US).
457 * - Bit 3 - pgm phys/virt - not directly writable.
458 * - Bit 4 - pgm phys page - not directly readable.
459 * - Bit 5 - page tables - not accessed (complemented X86_PTE_A).
460 * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
461 * - Bit 7 - tlb entry - pMappingR3 member not valid.
462 * - Bits 63 thru 8 are used for the physical TLB revision number.
463 *
464 * We're using complemented bit meanings here because it makes it easy to check
465 * whether special action is required. For instance a user mode write access
466 * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
467 * non-zero result would mean special handling needed because either it wasn't
468 * writable, or it wasn't user, or the page wasn't dirty. A user mode read
469 * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
470 * need to check any PTE flag.
471 */
472 uint64_t fFlagsAndPhysRev;
473 /** The guest physical page address. */
474 uint64_t GCPhys;
475 /** Pointer to the ring-3 mapping. */
476 R3PTRTYPE(uint8_t *) pbMappingR3;
477#if HC_ARCH_BITS == 32
478 uint32_t u32Padding1;
479#endif
480} IEMTLBENTRY;
481AssertCompileSize(IEMTLBENTRY, 32);
482/** Pointer to an IEM TLB entry. */
483typedef IEMTLBENTRY *PIEMTLBENTRY;
484
485/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
486 * @{ */
487#define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
488#define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
489#define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
490#define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
491#define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
492#define IEMTLBE_F_PT_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */
493#define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
494#define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(7) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
495#define IEMTLBE_F_PG_UNASSIGNED RT_BIT_64(8) /**< Phys page: Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
496#define IEMTLBE_F_PG_CODE_PAGE RT_BIT_64(9) /**< Phys page: Code page. */
497#define IEMTLBE_F_PHYS_REV UINT64_C(0xfffffffffffffc00) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
498/** @} */
499
500
501/**
502 * An IEM TLB.
503 *
504 * We've got two of these, one for data and one for instructions.
505 */
506typedef struct IEMTLB
507{
508 /** The TLB revision.
509 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
510 * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
511 * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
512 * (The revision zero indicates an invalid TLB entry.)
513 *
514 * The initial value is choosen to cause an early wraparound. */
515 uint64_t uTlbRevision;
516 /** The TLB physical address revision - shadow of PGM variable.
517 *
518 * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
519 * incremented by adding RT_BIT_64(8). When it wraps around and becomes zero,
520 * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
521 * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
522 *
523 * The initial value is choosen to cause an early wraparound. */
524 uint64_t volatile uTlbPhysRev;
525
526 /* Statistics: */
527
528 /** TLB hits (VBOX_WITH_STATISTICS only). */
529 uint64_t cTlbHits;
530 /** TLB misses. */
531 uint32_t cTlbMisses;
532 /** Slow read path. */
533 uint32_t cTlbSlowReadPath;
534 /** Safe read path. */
535 uint32_t cTlbSafeReadPath;
536 /** Safe write path. */
537 uint32_t cTlbSafeWritePath;
538#if 0
539 /** TLB misses because of tag mismatch. */
540 uint32_t cTlbMissesTag;
541 /** TLB misses because of virtual access violation. */
542 uint32_t cTlbMissesVirtAccess;
543 /** TLB misses because of dirty bit. */
544 uint32_t cTlbMissesDirty;
545 /** TLB misses because of MMIO */
546 uint32_t cTlbMissesMmio;
547 /** TLB misses because of write access handlers. */
548 uint32_t cTlbMissesWriteHandler;
549 /** TLB misses because no r3(/r0) mapping. */
550 uint32_t cTlbMissesMapping;
551#endif
552 /** Alignment padding. */
553 uint32_t au32Padding[6];
554
555 /** The TLB entries.
556 * We've choosen 256 because that way we can obtain the result directly from a
557 * 8-bit register without an additional AND instruction. */
558 IEMTLBENTRY aEntries[256];
559} IEMTLB;
560AssertCompileSizeAlignment(IEMTLB, 64);
561/** IEMTLB::uTlbRevision increment. */
562#define IEMTLB_REVISION_INCR RT_BIT_64(36)
563/** IEMTLB::uTlbRevision mask. */
564#define IEMTLB_REVISION_MASK (~(RT_BIT_64(36) - 1))
565/** IEMTLB::uTlbPhysRev increment.
566 * @sa IEMTLBE_F_PHYS_REV */
567#define IEMTLB_PHYS_REV_INCR RT_BIT_64(10)
568/**
569 * Calculates the TLB tag for a virtual address.
570 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
571 * @param a_pTlb The TLB.
572 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
573 * the clearing of the top 16 bits won't work (if 32-bit
574 * we'll end up with mostly zeros).
575 */
576#define IEMTLB_CALC_TAG(a_pTlb, a_GCPtr) ( IEMTLB_CALC_TAG_NO_REV(a_GCPtr) | (a_pTlb)->uTlbRevision )
577/**
578 * Calculates the TLB tag for a virtual address but without TLB revision.
579 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
580 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
581 * the clearing of the top 16 bits won't work (if 32-bit
582 * we'll end up with mostly zeros).
583 */
584#define IEMTLB_CALC_TAG_NO_REV(a_GCPtr) ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
585/**
586 * Converts a TLB tag value into a TLB index.
587 * @returns Index into IEMTLB::aEntries.
588 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
589 */
590#define IEMTLB_TAG_TO_INDEX(a_uTag) ( (uint8_t)(a_uTag) )
591/**
592 * Converts a TLB tag value into a TLB index.
593 * @returns Index into IEMTLB::aEntries.
594 * @param a_pTlb The TLB.
595 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
596 */
597#define IEMTLB_TAG_TO_ENTRY(a_pTlb, a_uTag) ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_INDEX(a_uTag)] )
598
599
600/** @name IEM_MC_F_XXX - MC block flags/clues.
601 * @todo Merge with IEM_CIMPL_F_XXX
602 * @{ */
603#define IEM_MC_F_ONLY_8086 RT_BIT_32(0)
604#define IEM_MC_F_MIN_186 RT_BIT_32(1)
605#define IEM_MC_F_MIN_286 RT_BIT_32(2)
606#define IEM_MC_F_NOT_286_OR_OLDER IEM_MC_F_MIN_386
607#define IEM_MC_F_MIN_386 RT_BIT_32(3)
608#define IEM_MC_F_MIN_486 RT_BIT_32(4)
609#define IEM_MC_F_MIN_PENTIUM RT_BIT_32(5)
610#define IEM_MC_F_MIN_PENTIUM_II IEM_MC_F_MIN_PENTIUM
611#define IEM_MC_F_MIN_CORE IEM_MC_F_MIN_PENTIUM
612#define IEM_MC_F_64BIT RT_BIT_32(6)
613#define IEM_MC_F_NOT_64BIT RT_BIT_32(7)
614/** This is set by IEMAllN8vePython.py to indicate a variation without the
615 * flags-clearing-and-checking, when there is also a variation with that.
616 * @note Do not use this manully, it's only for python and for testing in
617 * the native recompiler! */
618#define IEM_MC_F_WITHOUT_FLAGS RT_BIT_32(8)
619/** @} */
620
621/** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls.
622 *
623 * These clues are mainly for the recompiler, so that it can emit correct code.
624 *
625 * They are processed by the python script and which also automatically
626 * calculates flags for MC blocks based on the statements, extending the use of
627 * these flags to describe MC block behavior to the recompiler core. The python
628 * script pass the flags to the IEM_MC2_END_EMIT_CALLS macro, but mainly for
629 * error checking purposes. The script emits the necessary fEndTb = true and
630 * similar statements as this reduces compile time a tiny bit.
631 *
632 * @{ */
633/** Flag set if direct branch, clear if absolute or indirect. */
634#define IEM_CIMPL_F_BRANCH_DIRECT RT_BIT_32(0)
635/** Flag set if indirect branch, clear if direct or relative.
636 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
637 * as well as for return instructions (RET, IRET, RETF). */
638#define IEM_CIMPL_F_BRANCH_INDIRECT RT_BIT_32(1)
639/** Flag set if relative branch, clear if absolute or indirect. */
640#define IEM_CIMPL_F_BRANCH_RELATIVE RT_BIT_32(2)
641/** Flag set if conditional branch, clear if unconditional. */
642#define IEM_CIMPL_F_BRANCH_CONDITIONAL RT_BIT_32(3)
643/** Flag set if it's a far branch (changes CS). */
644#define IEM_CIMPL_F_BRANCH_FAR RT_BIT_32(4)
645/** Convenience: Testing any kind of branch. */
646#define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
647
648/** Execution flags may change (IEMCPU::fExec). */
649#define IEM_CIMPL_F_MODE RT_BIT_32(5)
650/** May change significant portions of RFLAGS. */
651#define IEM_CIMPL_F_RFLAGS RT_BIT_32(6)
652/** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS. */
653#define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(7)
654/** May trigger interrupt shadowing. */
655#define IEM_CIMPL_F_INHIBIT_SHADOW RT_BIT_32(8)
656/** May enable interrupts, so recheck IRQ immediately afterwards executing
657 * the instruction. */
658#define IEM_CIMPL_F_CHECK_IRQ_AFTER RT_BIT_32(9)
659/** May disable interrupts, so recheck IRQ immediately before executing the
660 * instruction. */
661#define IEM_CIMPL_F_CHECK_IRQ_BEFORE RT_BIT_32(10)
662/** Convenience: Check for IRQ both before and after an instruction. */
663#define IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER (IEM_CIMPL_F_CHECK_IRQ_BEFORE | IEM_CIMPL_F_CHECK_IRQ_AFTER)
664/** May trigger a VM exit (treated like IEM_CIMPL_F_MODE atm). */
665#define IEM_CIMPL_F_VMEXIT RT_BIT_32(11)
666/** May modify FPU state.
667 * @todo Not sure if this is useful yet. */
668#define IEM_CIMPL_F_FPU RT_BIT_32(12)
669/** REP prefixed instruction which may yield before updating PC.
670 * @todo Not sure if this is useful, REP functions now return non-zero
671 * status if they don't update the PC. */
672#define IEM_CIMPL_F_REP RT_BIT_32(13)
673/** I/O instruction.
674 * @todo Not sure if this is useful yet. */
675#define IEM_CIMPL_F_IO RT_BIT_32(14)
676/** Force end of TB after the instruction. */
677#define IEM_CIMPL_F_END_TB RT_BIT_32(15)
678/** Flag set if a branch may also modify the stack (push/pop return address). */
679#define IEM_CIMPL_F_BRANCH_STACK RT_BIT_32(16)
680/** Flag set if a branch may also modify the stack (push/pop return address)
681 * and switch it (load/restore SS:RSP). */
682#define IEM_CIMPL_F_BRANCH_STACK_FAR RT_BIT_32(17)
683/** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
684#define IEM_CIMPL_F_XCPT \
685 (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_STACK_FAR \
686 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
687
688/** The block calls a C-implementation instruction function with two implicit arguments.
689 * Mutually exclusive with IEM_CIMPL_F_CALLS_AIMPL and
690 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
691 * @note The python scripts will add this if missing. */
692#define IEM_CIMPL_F_CALLS_CIMPL RT_BIT_32(18)
693/** The block calls an ASM-implementation instruction function.
694 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and
695 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
696 * @note The python scripts will add this if missing. */
697#define IEM_CIMPL_F_CALLS_AIMPL RT_BIT_32(19)
698/** The block calls an ASM-implementation instruction function with an implicit
699 * X86FXSTATE pointer argument.
700 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL, IEM_CIMPL_F_CALLS_AIMPL and
701 * IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE.
702 * @note The python scripts will add this if missing. */
703#define IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE RT_BIT_32(20)
704/** The block calls an ASM-implementation instruction function with an implicit
705 * X86XSAVEAREA pointer argument.
706 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL, IEM_CIMPL_F_CALLS_AIMPL and
707 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
708 * @note No different from IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE, so same value.
709 * @note The python scripts will add this if missing. */
710#define IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE
711/** @} */
712
713
714/** @name IEM_F_XXX - Execution mode flags (IEMCPU::fExec, IEMTB::fFlags).
715 *
716 * These flags are set when entering IEM and adjusted as code is executed, such
717 * that they will always contain the current values as instructions are
718 * finished.
719 *
720 * In recompiled execution mode, (most of) these flags are included in the
721 * translation block selection key and stored in IEMTB::fFlags alongside the
722 * IEMTB_F_XXX flags. The latter flags uses bits 31 thru 24, which are all zero
723 * in IEMCPU::fExec.
724 *
725 * @{ */
726/** Mode: The block target mode mask. */
727#define IEM_F_MODE_MASK UINT32_C(0x0000001f)
728/** Mode: The IEMMODE part of the IEMTB_F_MODE_MASK value. */
729#define IEM_F_MODE_CPUMODE_MASK UINT32_C(0x00000003)
730/** X86 Mode: Bit used to indicating pre-386 CPU in 16-bit mode (for eliminating
731 * conditional in EIP/IP updating), and flat wide open CS, SS, DS, and ES in
732 * 32-bit mode (for simplifying most memory accesses). */
733#define IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK UINT32_C(0x00000004)
734/** X86 Mode: Bit indicating protected mode, real mode (or SMM) when not set. */
735#define IEM_F_MODE_X86_PROT_MASK UINT32_C(0x00000008)
736/** X86 Mode: Bit used to indicate virtual 8086 mode (only 16-bit). */
737#define IEM_F_MODE_X86_V86_MASK UINT32_C(0x00000010)
738
739/** X86 Mode: 16-bit on 386 or later. */
740#define IEM_F_MODE_X86_16BIT UINT32_C(0x00000000)
741/** X86 Mode: 80286, 80186 and 8086/88 targetting blocks (EIP update opt). */
742#define IEM_F_MODE_X86_16BIT_PRE_386 UINT32_C(0x00000004)
743/** X86 Mode: 16-bit protected mode on 386 or later. */
744#define IEM_F_MODE_X86_16BIT_PROT UINT32_C(0x00000008)
745/** X86 Mode: 16-bit protected mode on 386 or later. */
746#define IEM_F_MODE_X86_16BIT_PROT_PRE_386 UINT32_C(0x0000000c)
747/** X86 Mode: 16-bit virtual 8086 protected mode (on 386 or later). */
748#define IEM_F_MODE_X86_16BIT_PROT_V86 UINT32_C(0x00000018)
749
750/** X86 Mode: 32-bit on 386 or later. */
751#define IEM_F_MODE_X86_32BIT UINT32_C(0x00000001)
752/** X86 Mode: 32-bit mode with wide open flat CS, SS, DS and ES. */
753#define IEM_F_MODE_X86_32BIT_FLAT UINT32_C(0x00000005)
754/** X86 Mode: 32-bit protected mode. */
755#define IEM_F_MODE_X86_32BIT_PROT UINT32_C(0x00000009)
756/** X86 Mode: 32-bit protected mode with wide open flat CS, SS, DS and ES. */
757#define IEM_F_MODE_X86_32BIT_PROT_FLAT UINT32_C(0x0000000d)
758
759/** X86 Mode: 64-bit (includes protected, but not the flat bit). */
760#define IEM_F_MODE_X86_64BIT UINT32_C(0x0000000a)
761
762/** X86 Mode: Checks if @a a_fExec represent a FLAT mode. */
763#define IEM_F_MODE_X86_IS_FLAT(a_fExec) ( ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT \
764 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT \
765 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT)
766
767/** Bypass access handlers when set. */
768#define IEM_F_BYPASS_HANDLERS UINT32_C(0x00010000)
769/** Have pending hardware instruction breakpoints. */
770#define IEM_F_PENDING_BRK_INSTR UINT32_C(0x00020000)
771/** Have pending hardware data breakpoints. */
772#define IEM_F_PENDING_BRK_DATA UINT32_C(0x00040000)
773
774/** X86: Have pending hardware I/O breakpoints. */
775#define IEM_F_PENDING_BRK_X86_IO UINT32_C(0x00000400)
776/** X86: Disregard the lock prefix (implied or not) when set. */
777#define IEM_F_X86_DISREGARD_LOCK UINT32_C(0x00000800)
778
779/** Pending breakpoint mask (what iemCalcExecDbgFlags works out). */
780#define IEM_F_PENDING_BRK_MASK (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA | IEM_F_PENDING_BRK_X86_IO)
781
782/** Caller configurable options. */
783#define IEM_F_USER_OPTS (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK)
784
785/** X86: The current protection level (CPL) shift factor. */
786#define IEM_F_X86_CPL_SHIFT 8
787/** X86: The current protection level (CPL) mask. */
788#define IEM_F_X86_CPL_MASK UINT32_C(0x00000300)
789/** X86: The current protection level (CPL) shifted mask. */
790#define IEM_F_X86_CPL_SMASK UINT32_C(0x00000003)
791
792/** X86 execution context.
793 * The IEM_F_X86_CTX_XXX values are individual flags that can be combined (with
794 * the exception of IEM_F_X86_CTX_NORMAL). This allows running VMs from SMM
795 * mode. */
796#define IEM_F_X86_CTX_MASK UINT32_C(0x0000f000)
797/** X86 context: Plain regular execution context. */
798#define IEM_F_X86_CTX_NORMAL UINT32_C(0x00000000)
799/** X86 context: VT-x enabled. */
800#define IEM_F_X86_CTX_VMX UINT32_C(0x00001000)
801/** X86 context: AMD-V enabled. */
802#define IEM_F_X86_CTX_SVM UINT32_C(0x00002000)
803/** X86 context: In AMD-V or VT-x guest mode. */
804#define IEM_F_X86_CTX_IN_GUEST UINT32_C(0x00004000)
805/** X86 context: System management mode (SMM). */
806#define IEM_F_X86_CTX_SMM UINT32_C(0x00008000)
807
808/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
809 * iemRegFinishClearingRF() most for most situations (CPUMCTX_DBG_HIT_DRX_MASK
810 * and CPUMCTX_DBG_DBGF_MASK are covered by the IEM_F_PENDING_BRK_XXX bits
811 * alread). */
812
813/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
814 * iemRegFinishClearingRF() most for most situations
815 * (CPUMCTX_DBG_HIT_DRX_MASK and CPUMCTX_DBG_DBGF_MASK are covered by
816 * the IEM_F_PENDING_BRK_XXX bits alread). */
817
818/** @} */
819
820
821/** @name IEMTB_F_XXX - Translation block flags (IEMTB::fFlags).
822 *
823 * Extends the IEM_F_XXX flags (subject to IEMTB_F_IEM_F_MASK) to make up the
824 * translation block flags. The combined flag mask (subject to
825 * IEMTB_F_KEY_MASK) is used as part of the lookup key for translation blocks.
826 *
827 * @{ */
828/** Mask of IEM_F_XXX flags included in IEMTB_F_XXX. */
829#define IEMTB_F_IEM_F_MASK UINT32_C(0x00ffffff)
830
831/** Type: The block type mask. */
832#define IEMTB_F_TYPE_MASK UINT32_C(0x03000000)
833/** Type: Purly threaded recompiler (via tables). */
834#define IEMTB_F_TYPE_THREADED UINT32_C(0x01000000)
835/** Type: Native recompilation. */
836#define IEMTB_F_TYPE_NATIVE UINT32_C(0x02000000)
837
838/** Set when we're starting the block in an "interrupt shadow".
839 * We don't need to distingish between the two types of this mask, thus the one.
840 * @see CPUMCTX_INHIBIT_SHADOW, CPUMIsInInterruptShadow() */
841#define IEMTB_F_INHIBIT_SHADOW UINT32_C(0x04000000)
842/** Set when we're currently inhibiting NMIs
843 * @see CPUMCTX_INHIBIT_NMI, CPUMAreInterruptsInhibitedByNmi() */
844#define IEMTB_F_INHIBIT_NMI UINT32_C(0x08000000)
845
846/** Checks that EIP/IP is wihin CS.LIM before each instruction. Used when
847 * we're close the limit before starting a TB, as determined by
848 * iemGetTbFlagsForCurrentPc(). */
849#define IEMTB_F_CS_LIM_CHECKS UINT32_C(0x10000000)
850
851/** Mask of the IEMTB_F_XXX flags that are part of the TB lookup key.
852 *
853 * @note We skip all of IEM_F_X86_CTX_MASK, with the exception of SMM (which we
854 * don't implement), because we don't currently generate any context
855 * specific code - that's all handled in CIMPL functions.
856 *
857 * For the threaded recompiler we don't generate any CPL specific code
858 * either, but the native recompiler does for memory access (saves getting
859 * the CPL from fExec and turning it into IEMTLBE_F_PT_NO_USER).
860 * Since most OSes will not share code between rings, this shouldn't
861 * have any real effect on TB/memory/recompiling load.
862 */
863#define IEMTB_F_KEY_MASK ((UINT32_MAX & ~(IEM_F_X86_CTX_MASK | IEMTB_F_TYPE_MASK)) | IEM_F_X86_CTX_SMM)
864/** @} */
865
866AssertCompile( (IEM_F_MODE_X86_16BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
867AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
868AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_PROT_MASK));
869AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_V86_MASK));
870AssertCompile( (IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
871AssertCompile( IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
872AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_PROT_MASK));
873AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
874AssertCompile( (IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
875AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
876AssertCompile( IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
877AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_V86_MASK));
878AssertCompile( (IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
879AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
880AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_PROT_MASK);
881AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
882AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_PROT_MASK);
883AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
884AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_V86_MASK);
885
886AssertCompile( (IEM_F_MODE_X86_32BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
887AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
888AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_PROT_MASK));
889AssertCompile( (IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
890AssertCompile( IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
891AssertCompile(!(IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_PROT_MASK));
892AssertCompile( (IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
893AssertCompile(!(IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
894AssertCompile( IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
895AssertCompile( (IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
896AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
897AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_PROT_MASK);
898
899AssertCompile( (IEM_F_MODE_X86_64BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT);
900AssertCompile( IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_PROT_MASK);
901AssertCompile(!(IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
902
903/** Native instruction type for use with the native code generator.
904 * This is a byte (uint8_t) for x86 and amd64 and uint32_t for the other(s). */
905#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
906typedef uint8_t IEMNATIVEINSTR;
907#else
908typedef uint32_t IEMNATIVEINSTR;
909#endif
910/** Pointer to a native instruction unit. */
911typedef IEMNATIVEINSTR *PIEMNATIVEINSTR;
912/** Pointer to a const native instruction unit. */
913typedef IEMNATIVEINSTR const *PCIEMNATIVEINSTR;
914
915/**
916 * A call for the threaded call table.
917 */
918typedef struct IEMTHRDEDCALLENTRY
919{
920 /** The function to call (IEMTHREADEDFUNCS). */
921 uint16_t enmFunction;
922 /** Instruction number in the TB (for statistics). */
923 uint8_t idxInstr;
924 uint8_t uUnused0;
925
926 /** Offset into IEMTB::pabOpcodes. */
927 uint16_t offOpcode;
928 /** The opcode length. */
929 uint8_t cbOpcode;
930 /** Index in to IEMTB::aRanges. */
931 uint8_t idxRange;
932
933 /** Generic parameters. */
934 uint64_t auParams[3];
935} IEMTHRDEDCALLENTRY;
936AssertCompileSize(IEMTHRDEDCALLENTRY, sizeof(uint64_t) * 4);
937/** Pointer to a threaded call entry. */
938typedef struct IEMTHRDEDCALLENTRY *PIEMTHRDEDCALLENTRY;
939/** Pointer to a const threaded call entry. */
940typedef IEMTHRDEDCALLENTRY const *PCIEMTHRDEDCALLENTRY;
941
942/**
943 * Native IEM TB 'function' typedef.
944 *
945 * This will throw/longjmp on occation.
946 *
947 * @note AMD64 doesn't have that many non-volatile registers and does sport
948 * 32-bit address displacments, so we don't need pCtx.
949 *
950 * On ARM64 pCtx allows us to directly address the whole register
951 * context without requiring a separate indexing register holding the
952 * offset. This saves an instruction loading the offset for each guest
953 * CPU context access, at the cost of a non-volatile register.
954 * Fortunately, ARM64 has quite a lot more registers.
955 */
956typedef
957#ifdef RT_ARCH_AMD64
958int FNIEMTBNATIVE(PVMCPUCC pVCpu)
959#else
960int FNIEMTBNATIVE(PVMCPUCC pVCpu, PCPUMCTX pCtx)
961#endif
962#if RT_CPLUSPLUS_PREREQ(201700)
963 IEM_NOEXCEPT_MAY_LONGJMP
964#endif
965 ;
966/** Pointer to a native IEM TB entry point function.
967 * This will throw/longjmp on occation. */
968typedef FNIEMTBNATIVE *PFNIEMTBNATIVE;
969
970
971/**
972 * Translation block debug info entry type.
973 */
974typedef enum IEMTBDBGENTRYTYPE
975{
976 kIemTbDbgEntryType_Invalid = 0,
977 /** The entry is for marking a native code position.
978 * Entries following this all apply to this position. */
979 kIemTbDbgEntryType_NativeOffset,
980 /** The entry is for a new guest instruction. */
981 kIemTbDbgEntryType_GuestInstruction,
982 /** Marks the start of a threaded call. */
983 kIemTbDbgEntryType_ThreadedCall,
984 /** Marks the location of a label. */
985 kIemTbDbgEntryType_Label,
986 /** Info about a host register shadowing a guest register. */
987 kIemTbDbgEntryType_GuestRegShadowing,
988#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
989 /** Info about a host SIMD register shadowing a guest SIMD register. */
990 kIemTbDbgEntryType_GuestSimdRegShadowing,
991#endif
992#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
993 /** Info about a delayed RIP update. */
994 kIemTbDbgEntryType_DelayedPcUpdate,
995#endif
996#if defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK) || defined(IEMNATIVE_WITH_SIMD_REG_ALLOCATOR)
997 /** Info about a shadowed guest register becoming dirty. */
998 kIemTbDbgEntryType_GuestRegDirty,
999 /** Info about register writeback/flush oepration. */
1000 kIemTbDbgEntryType_GuestRegWriteback,
1001#endif
1002 kIemTbDbgEntryType_End
1003} IEMTBDBGENTRYTYPE;
1004
1005/**
1006 * Translation block debug info entry.
1007 */
1008typedef union IEMTBDBGENTRY
1009{
1010 /** Plain 32-bit view. */
1011 uint32_t u;
1012
1013 /** Generic view for getting at the type field. */
1014 struct
1015 {
1016 /** IEMTBDBGENTRYTYPE */
1017 uint32_t uType : 4;
1018 uint32_t uTypeSpecific : 28;
1019 } Gen;
1020
1021 struct
1022 {
1023 /** kIemTbDbgEntryType_ThreadedCall1. */
1024 uint32_t uType : 4;
1025 /** Native code offset. */
1026 uint32_t offNative : 28;
1027 } NativeOffset;
1028
1029 struct
1030 {
1031 /** kIemTbDbgEntryType_GuestInstruction. */
1032 uint32_t uType : 4;
1033 uint32_t uUnused : 4;
1034 /** The IEM_F_XXX flags. */
1035 uint32_t fExec : 24;
1036 } GuestInstruction;
1037
1038 struct
1039 {
1040 /* kIemTbDbgEntryType_ThreadedCall. */
1041 uint32_t uType : 4;
1042 /** Set if the call was recompiled to native code, clear if just calling
1043 * threaded function. */
1044 uint32_t fRecompiled : 1;
1045 uint32_t uUnused : 11;
1046 /** The threaded call number (IEMTHREADEDFUNCS). */
1047 uint32_t enmCall : 16;
1048 } ThreadedCall;
1049
1050 struct
1051 {
1052 /* kIemTbDbgEntryType_Label. */
1053 uint32_t uType : 4;
1054 uint32_t uUnused : 4;
1055 /** The label type (IEMNATIVELABELTYPE). */
1056 uint32_t enmLabel : 8;
1057 /** The label data. */
1058 uint32_t uData : 16;
1059 } Label;
1060
1061 struct
1062 {
1063 /* kIemTbDbgEntryType_GuestRegShadowing. */
1064 uint32_t uType : 4;
1065 uint32_t uUnused : 4;
1066 /** The guest register being shadowed (IEMNATIVEGSTREG). */
1067 uint32_t idxGstReg : 8;
1068 /** The host new register number, UINT8_MAX if dropped. */
1069 uint32_t idxHstReg : 8;
1070 /** The previous host register number, UINT8_MAX if new. */
1071 uint32_t idxHstRegPrev : 8;
1072 } GuestRegShadowing;
1073
1074#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
1075 struct
1076 {
1077 /* kIemTbDbgEntryType_GuestSimdRegShadowing. */
1078 uint32_t uType : 4;
1079 uint32_t uUnused : 4;
1080 /** The guest register being shadowed (IEMNATIVEGSTSIMDREG). */
1081 uint32_t idxGstSimdReg : 8;
1082 /** The host new register number, UINT8_MAX if dropped. */
1083 uint32_t idxHstSimdReg : 8;
1084 /** The previous host register number, UINT8_MAX if new. */
1085 uint32_t idxHstSimdRegPrev : 8;
1086 } GuestSimdRegShadowing;
1087#endif
1088
1089#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
1090 struct
1091 {
1092 /* kIemTbDbgEntryType_DelayedPcUpdate. */
1093 uint32_t uType : 4;
1094 /* The instruction offset added to the program counter. */
1095 uint32_t offPc : 14;
1096 /** Number of instructions skipped. */
1097 uint32_t cInstrSkipped : 14;
1098 } DelayedPcUpdate;
1099#endif
1100
1101#if defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK) || defined(IEMNATIVE_WITH_SIMD_REG_ALLOCATOR)
1102 struct
1103 {
1104 /* kIemTbDbgEntryType_GuestRegDirty. */
1105 uint32_t uType : 4;
1106 uint32_t uUnused : 11;
1107 /** Flag whether this is about a SIMD (true) or general (false) register. */
1108 uint32_t fSimdReg : 1;
1109 /** The guest register index being marked as dirty. */
1110 uint32_t idxGstReg : 8;
1111 /** The host register number this register is shadowed in .*/
1112 uint32_t idxHstReg : 8;
1113 } GuestRegDirty;
1114
1115 struct
1116 {
1117 /* kIemTbDbgEntryType_GuestRegWriteback. */
1118 uint32_t uType : 4;
1119 /** Flag whether this is about a SIMD (true) or general (false) register flush. */
1120 uint32_t fSimdReg : 1;
1121 /** The mask shift. */
1122 uint32_t cShift : 2;
1123 /** The guest register mask being written back. */
1124 uint32_t fGstReg : 25;
1125 } GuestRegWriteback;
1126#endif
1127
1128} IEMTBDBGENTRY;
1129AssertCompileSize(IEMTBDBGENTRY, sizeof(uint32_t));
1130/** Pointer to a debug info entry. */
1131typedef IEMTBDBGENTRY *PIEMTBDBGENTRY;
1132/** Pointer to a const debug info entry. */
1133typedef IEMTBDBGENTRY const *PCIEMTBDBGENTRY;
1134
1135/**
1136 * Translation block debug info.
1137 */
1138typedef struct IEMTBDBG
1139{
1140 /** Number of entries in aEntries. */
1141 uint32_t cEntries;
1142 /** The offset of the last kIemTbDbgEntryType_NativeOffset record. */
1143 uint32_t offNativeLast;
1144 /** Debug info entries. */
1145 RT_FLEXIBLE_ARRAY_EXTENSION
1146 IEMTBDBGENTRY aEntries[RT_FLEXIBLE_ARRAY];
1147} IEMTBDBG;
1148/** Pointer to TB debug info. */
1149typedef IEMTBDBG *PIEMTBDBG;
1150/** Pointer to const TB debug info. */
1151typedef IEMTBDBG const *PCIEMTBDBG;
1152
1153
1154/**
1155 * Translation block.
1156 *
1157 * The current plan is to just keep TBs and associated lookup hash table private
1158 * to each VCpu as that simplifies TB removal greatly (no races) and generally
1159 * avoids using expensive atomic primitives for updating lists and stuff.
1160 */
1161#pragma pack(2) /* to prevent the Thrd structure from being padded unnecessarily */
1162typedef struct IEMTB
1163{
1164 /** Next block with the same hash table entry. */
1165 struct IEMTB *pNext;
1166 /** Usage counter. */
1167 uint32_t cUsed;
1168 /** The IEMCPU::msRecompilerPollNow last time it was used. */
1169 uint32_t msLastUsed;
1170
1171 /** @name What uniquely identifies the block.
1172 * @{ */
1173 RTGCPHYS GCPhysPc;
1174 /** IEMTB_F_XXX (i.e. IEM_F_XXX ++). */
1175 uint32_t fFlags;
1176 union
1177 {
1178 struct
1179 {
1180 /**< Relevant CS X86DESCATTR_XXX bits. */
1181 uint16_t fAttr;
1182 } x86;
1183 };
1184 /** @} */
1185
1186 /** Number of opcode ranges. */
1187 uint8_t cRanges;
1188 /** Statistics: Number of instructions in the block. */
1189 uint8_t cInstructions;
1190
1191 /** Type specific info. */
1192 union
1193 {
1194 struct
1195 {
1196 /** The call sequence table. */
1197 PIEMTHRDEDCALLENTRY paCalls;
1198 /** Number of calls in paCalls. */
1199 uint16_t cCalls;
1200 /** Number of calls allocated. */
1201 uint16_t cAllocated;
1202 } Thrd;
1203 struct
1204 {
1205 /** The native instructions (PFNIEMTBNATIVE). */
1206 PIEMNATIVEINSTR paInstructions;
1207 /** Number of instructions pointed to by paInstructions. */
1208 uint32_t cInstructions;
1209 } Native;
1210 /** Generic view for zeroing when freeing. */
1211 struct
1212 {
1213 uintptr_t uPtr;
1214 uint32_t uData;
1215 } Gen;
1216 };
1217
1218 /** The allocation chunk this TB belongs to. */
1219 uint8_t idxAllocChunk;
1220 uint8_t bUnused;
1221
1222 /** Number of bytes of opcodes stored in pabOpcodes.
1223 * @todo this field isn't really needed, aRanges keeps the actual info. */
1224 uint16_t cbOpcodes;
1225 /** Pointer to the opcode bytes this block was recompiled from. */
1226 uint8_t *pabOpcodes;
1227
1228 /** Debug info if enabled.
1229 * This is only generated by the native recompiler. */
1230 PIEMTBDBG pDbgInfo;
1231
1232 /* --- 64 byte cache line end --- */
1233
1234 /** Opcode ranges.
1235 *
1236 * The opcode checkers and maybe TLB loading functions will use this to figure
1237 * out what to do. The parameter will specify an entry and the opcode offset to
1238 * start at and the minimum number of bytes to verify (instruction length).
1239 *
1240 * When VT-x and AMD-V looks up the opcode bytes for an exitting instruction,
1241 * they'll first translate RIP (+ cbInstr - 1) to a physical address using the
1242 * code TLB (must have a valid entry for that address) and scan the ranges to
1243 * locate the corresponding opcodes. Probably.
1244 */
1245 struct IEMTBOPCODERANGE
1246 {
1247 /** Offset within pabOpcodes. */
1248 uint16_t offOpcodes;
1249 /** Number of bytes. */
1250 uint16_t cbOpcodes;
1251 /** The page offset. */
1252 RT_GCC_EXTENSION
1253 uint16_t offPhysPage : 12;
1254 /** Unused bits. */
1255 RT_GCC_EXTENSION
1256 uint16_t u2Unused : 2;
1257 /** Index into GCPhysPc + aGCPhysPages for the physical page address. */
1258 RT_GCC_EXTENSION
1259 uint16_t idxPhysPage : 2;
1260 } aRanges[8];
1261
1262 /** Physical pages that this TB covers.
1263 * The GCPhysPc w/o page offset is element zero, so starting here with 1. */
1264 RTGCPHYS aGCPhysPages[2];
1265} IEMTB;
1266#pragma pack()
1267AssertCompileMemberAlignment(IEMTB, GCPhysPc, sizeof(RTGCPHYS));
1268AssertCompileMemberAlignment(IEMTB, Thrd, sizeof(void *));
1269AssertCompileMemberAlignment(IEMTB, pabOpcodes, sizeof(void *));
1270AssertCompileMemberAlignment(IEMTB, pDbgInfo, sizeof(void *));
1271AssertCompileMemberAlignment(IEMTB, aGCPhysPages, sizeof(RTGCPHYS));
1272AssertCompileMemberOffset(IEMTB, aRanges, 64);
1273AssertCompileMemberSize(IEMTB, aRanges[0], 6);
1274#if 1
1275AssertCompileSize(IEMTB, 128);
1276# define IEMTB_SIZE_IS_POWER_OF_TWO /**< The IEMTB size is a power of two. */
1277#else
1278AssertCompileSize(IEMTB, 168);
1279# undef IEMTB_SIZE_IS_POWER_OF_TWO
1280#endif
1281
1282/** Pointer to a translation block. */
1283typedef IEMTB *PIEMTB;
1284/** Pointer to a const translation block. */
1285typedef IEMTB const *PCIEMTB;
1286
1287/**
1288 * A chunk of memory in the TB allocator.
1289 */
1290typedef struct IEMTBCHUNK
1291{
1292 /** Pointer to the translation blocks in this chunk. */
1293 PIEMTB paTbs;
1294#ifdef IN_RING0
1295 /** Allocation handle. */
1296 RTR0MEMOBJ hMemObj;
1297#endif
1298} IEMTBCHUNK;
1299
1300/**
1301 * A per-CPU translation block allocator.
1302 *
1303 * Because of how the IEMTBCACHE uses the lower 6 bits of the TB address to keep
1304 * the length of the collision list, and of course also for cache line alignment
1305 * reasons, the TBs must be allocated with at least 64-byte alignment.
1306 * Memory is there therefore allocated using one of the page aligned allocators.
1307 *
1308 *
1309 * To avoid wasting too much memory, it is allocated piecemeal as needed,
1310 * in chunks (IEMTBCHUNK) of 2 MiB or more. The TB has an 8-bit chunk index
1311 * that enables us to quickly calculate the allocation bitmap position when
1312 * freeing the translation block.
1313 */
1314typedef struct IEMTBALLOCATOR
1315{
1316 /** Magic value (IEMTBALLOCATOR_MAGIC). */
1317 uint32_t uMagic;
1318
1319#ifdef IEMTB_SIZE_IS_POWER_OF_TWO
1320 /** Mask corresponding to cTbsPerChunk - 1. */
1321 uint32_t fChunkMask;
1322 /** Shift count corresponding to cTbsPerChunk. */
1323 uint8_t cChunkShift;
1324#else
1325 uint32_t uUnused;
1326 uint8_t bUnused;
1327#endif
1328 /** Number of chunks we're allowed to allocate. */
1329 uint8_t cMaxChunks;
1330 /** Number of chunks currently populated. */
1331 uint16_t cAllocatedChunks;
1332 /** Number of translation blocks per chunk. */
1333 uint32_t cTbsPerChunk;
1334 /** Chunk size. */
1335 uint32_t cbPerChunk;
1336
1337 /** The maximum number of TBs. */
1338 uint32_t cMaxTbs;
1339 /** Total number of TBs in the populated chunks.
1340 * (cAllocatedChunks * cTbsPerChunk) */
1341 uint32_t cTotalTbs;
1342 /** The current number of TBs in use.
1343 * The number of free TBs: cAllocatedTbs - cInUseTbs; */
1344 uint32_t cInUseTbs;
1345 /** Statistics: Number of the cInUseTbs that are native ones. */
1346 uint32_t cNativeTbs;
1347 /** Statistics: Number of the cInUseTbs that are threaded ones. */
1348 uint32_t cThreadedTbs;
1349
1350 /** Where to start pruning TBs from when we're out.
1351 * See iemTbAllocatorAllocSlow for details. */
1352 uint32_t iPruneFrom;
1353 /** Hint about which bit to start scanning the bitmap from. */
1354 uint32_t iStartHint;
1355 /** Where to start pruning native TBs from when we're out of executable memory.
1356 * See iemTbAllocatorFreeupNativeSpace for details. */
1357 uint32_t iPruneNativeFrom;
1358 uint32_t uPadding;
1359
1360 /** Statistics: Number of TB allocation calls. */
1361 STAMCOUNTER StatAllocs;
1362 /** Statistics: Number of TB free calls. */
1363 STAMCOUNTER StatFrees;
1364 /** Statistics: Time spend pruning. */
1365 STAMPROFILE StatPrune;
1366 /** Statistics: Time spend pruning native TBs. */
1367 STAMPROFILE StatPruneNative;
1368
1369 /** The delayed free list (see iemTbAlloctorScheduleForFree). */
1370 PIEMTB pDelayedFreeHead;
1371
1372 /** Allocation chunks. */
1373 IEMTBCHUNK aChunks[256];
1374
1375 /** Allocation bitmap for all possible chunk chunks. */
1376 RT_FLEXIBLE_ARRAY_EXTENSION
1377 uint64_t bmAllocated[RT_FLEXIBLE_ARRAY];
1378} IEMTBALLOCATOR;
1379/** Pointer to a TB allocator. */
1380typedef struct IEMTBALLOCATOR *PIEMTBALLOCATOR;
1381
1382/** Magic value for the TB allocator (Emmet Harley Cohen). */
1383#define IEMTBALLOCATOR_MAGIC UINT32_C(0x19900525)
1384
1385
1386/**
1387 * A per-CPU translation block cache (hash table).
1388 *
1389 * The hash table is allocated once during IEM initialization and size double
1390 * the max TB count, rounded up to the nearest power of two (so we can use and
1391 * AND mask rather than a rest division when hashing).
1392 */
1393typedef struct IEMTBCACHE
1394{
1395 /** Magic value (IEMTBCACHE_MAGIC). */
1396 uint32_t uMagic;
1397 /** Size of the hash table. This is a power of two. */
1398 uint32_t cHash;
1399 /** The mask corresponding to cHash. */
1400 uint32_t uHashMask;
1401 uint32_t uPadding;
1402
1403 /** @name Statistics
1404 * @{ */
1405 /** Number of collisions ever. */
1406 STAMCOUNTER cCollisions;
1407
1408 /** Statistics: Number of TB lookup misses. */
1409 STAMCOUNTER cLookupMisses;
1410 /** Statistics: Number of TB lookup hits (debug only). */
1411 STAMCOUNTER cLookupHits;
1412 STAMCOUNTER auPadding2[3];
1413 /** Statistics: Collision list length pruning. */
1414 STAMPROFILE StatPrune;
1415 /** @} */
1416
1417 /** The hash table itself.
1418 * @note The lower 6 bits of the pointer is used for keeping the collision
1419 * list length, so we can take action when it grows too long.
1420 * This works because TBs are allocated using a 64 byte (or
1421 * higher) alignment from page aligned chunks of memory, so the lower
1422 * 6 bits of the address will always be zero.
1423 * See IEMTBCACHE_PTR_COUNT_MASK, IEMTBCACHE_PTR_MAKE and friends.
1424 */
1425 RT_FLEXIBLE_ARRAY_EXTENSION
1426 PIEMTB apHash[RT_FLEXIBLE_ARRAY];
1427} IEMTBCACHE;
1428/** Pointer to a per-CPU translation block cahce. */
1429typedef IEMTBCACHE *PIEMTBCACHE;
1430
1431/** Magic value for IEMTBCACHE (Johnny O'Neal). */
1432#define IEMTBCACHE_MAGIC UINT32_C(0x19561010)
1433
1434/** The collision count mask for IEMTBCACHE::apHash entries. */
1435#define IEMTBCACHE_PTR_COUNT_MASK ((uintptr_t)0x3f)
1436/** The max collision count for IEMTBCACHE::apHash entries before pruning. */
1437#define IEMTBCACHE_PTR_MAX_COUNT ((uintptr_t)0x30)
1438/** Combine a TB pointer and a collision list length into a value for an
1439 * IEMTBCACHE::apHash entry. */
1440#define IEMTBCACHE_PTR_MAKE(a_pTb, a_cCount) (PIEMTB)((uintptr_t)(a_pTb) | (a_cCount))
1441/** Combine a TB pointer and a collision list length into a value for an
1442 * IEMTBCACHE::apHash entry. */
1443#define IEMTBCACHE_PTR_GET_TB(a_pHashEntry) (PIEMTB)((uintptr_t)(a_pHashEntry) & ~IEMTBCACHE_PTR_COUNT_MASK)
1444/** Combine a TB pointer and a collision list length into a value for an
1445 * IEMTBCACHE::apHash entry. */
1446#define IEMTBCACHE_PTR_GET_COUNT(a_pHashEntry) ((uintptr_t)(a_pHashEntry) & IEMTBCACHE_PTR_COUNT_MASK)
1447
1448/**
1449 * Calculates the hash table slot for a TB from physical PC address and TB flags.
1450 */
1451#define IEMTBCACHE_HASH(a_paCache, a_fTbFlags, a_GCPhysPc) \
1452 IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, (a_fTbFlags) & IEMTB_F_KEY_MASK, a_GCPhysPc)
1453
1454/**
1455 * Calculates the hash table slot for a TB from physical PC address and TB
1456 * flags, ASSUMING the caller has applied IEMTB_F_KEY_MASK to @a a_fTbFlags.
1457 */
1458#define IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, a_fTbFlags, a_GCPhysPc) \
1459 (((uint32_t)(a_GCPhysPc) ^ (a_fTbFlags)) & (a_paCache)->uHashMask)
1460
1461
1462/** @name IEMBRANCHED_F_XXX - Branched indicator (IEMCPU::fTbBranched).
1463 *
1464 * These flags parallels the main IEM_CIMPL_F_BRANCH_XXX flags.
1465 *
1466 * @{ */
1467/** Value if no branching happened recently. */
1468#define IEMBRANCHED_F_NO UINT8_C(0x00)
1469/** Flag set if direct branch, clear if absolute or indirect. */
1470#define IEMBRANCHED_F_DIRECT UINT8_C(0x01)
1471/** Flag set if indirect branch, clear if direct or relative. */
1472#define IEMBRANCHED_F_INDIRECT UINT8_C(0x02)
1473/** Flag set if relative branch, clear if absolute or indirect. */
1474#define IEMBRANCHED_F_RELATIVE UINT8_C(0x04)
1475/** Flag set if conditional branch, clear if unconditional. */
1476#define IEMBRANCHED_F_CONDITIONAL UINT8_C(0x08)
1477/** Flag set if it's a far branch. */
1478#define IEMBRANCHED_F_FAR UINT8_C(0x10)
1479/** Flag set if the stack pointer is modified. */
1480#define IEMBRANCHED_F_STACK UINT8_C(0x20)
1481/** Flag set if the stack pointer and (maybe) the stack segment are modified. */
1482#define IEMBRANCHED_F_STACK_FAR UINT8_C(0x40)
1483/** Flag set (by IEM_MC_REL_JMP_XXX) if it's a zero bytes relative jump. */
1484#define IEMBRANCHED_F_ZERO UINT8_C(0x80)
1485/** @} */
1486
1487
1488/**
1489 * The per-CPU IEM state.
1490 */
1491typedef struct IEMCPU
1492{
1493 /** Info status code that needs to be propagated to the IEM caller.
1494 * This cannot be passed internally, as it would complicate all success
1495 * checks within the interpreter making the code larger and almost impossible
1496 * to get right. Instead, we'll store status codes to pass on here. Each
1497 * source of these codes will perform appropriate sanity checks. */
1498 int32_t rcPassUp; /* 0x00 */
1499 /** Execution flag, IEM_F_XXX. */
1500 uint32_t fExec; /* 0x04 */
1501
1502 /** @name Decoder state.
1503 * @{ */
1504#ifdef IEM_WITH_CODE_TLB
1505 /** The offset of the next instruction byte. */
1506 uint32_t offInstrNextByte; /* 0x08 */
1507 /** The number of bytes available at pbInstrBuf for the current instruction.
1508 * This takes the max opcode length into account so that doesn't need to be
1509 * checked separately. */
1510 uint32_t cbInstrBuf; /* 0x0c */
1511 /** Pointer to the page containing RIP, user specified buffer or abOpcode.
1512 * This can be NULL if the page isn't mappable for some reason, in which
1513 * case we'll do fallback stuff.
1514 *
1515 * If we're executing an instruction from a user specified buffer,
1516 * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page
1517 * aligned pointer but pointer to the user data.
1518 *
1519 * For instructions crossing pages, this will start on the first page and be
1520 * advanced to the next page by the time we've decoded the instruction. This
1521 * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt>
1522 */
1523 uint8_t const *pbInstrBuf; /* 0x10 */
1524# if ARCH_BITS == 32
1525 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */
1526# endif
1527 /** The program counter corresponding to pbInstrBuf.
1528 * This is set to a non-canonical address when we need to invalidate it. */
1529 uint64_t uInstrBufPc; /* 0x18 */
1530 /** The guest physical address corresponding to pbInstrBuf. */
1531 RTGCPHYS GCPhysInstrBuf; /* 0x20 */
1532 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots).
1533 * This takes the CS segment limit into account.
1534 * @note Set to zero when the code TLB is flushed to trigger TLB reload. */
1535 uint16_t cbInstrBufTotal; /* 0x28 */
1536# ifndef IEM_WITH_OPAQUE_DECODER_STATE
1537 /** Offset into pbInstrBuf of the first byte of the current instruction.
1538 * Can be negative to efficiently handle cross page instructions. */
1539 int16_t offCurInstrStart; /* 0x2a */
1540
1541 /** The prefix mask (IEM_OP_PRF_XXX). */
1542 uint32_t fPrefixes; /* 0x2c */
1543 /** The extra REX ModR/M register field bit (REX.R << 3). */
1544 uint8_t uRexReg; /* 0x30 */
1545 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1546 * (REX.B << 3). */
1547 uint8_t uRexB; /* 0x31 */
1548 /** The extra REX SIB index field bit (REX.X << 3). */
1549 uint8_t uRexIndex; /* 0x32 */
1550
1551 /** The effective segment register (X86_SREG_XXX). */
1552 uint8_t iEffSeg; /* 0x33 */
1553
1554 /** The offset of the ModR/M byte relative to the start of the instruction. */
1555 uint8_t offModRm; /* 0x34 */
1556
1557# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1558 /** The current offset into abOpcode. */
1559 uint8_t offOpcode; /* 0x35 */
1560# else
1561 uint8_t bUnused; /* 0x35 */
1562# endif
1563# else /* IEM_WITH_OPAQUE_DECODER_STATE */
1564 uint8_t abOpaqueDecoderPart1[0x36 - 0x2a];
1565# endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1566
1567#else /* !IEM_WITH_CODE_TLB */
1568# ifndef IEM_WITH_OPAQUE_DECODER_STATE
1569 /** The size of what has currently been fetched into abOpcode. */
1570 uint8_t cbOpcode; /* 0x08 */
1571 /** The current offset into abOpcode. */
1572 uint8_t offOpcode; /* 0x09 */
1573 /** The offset of the ModR/M byte relative to the start of the instruction. */
1574 uint8_t offModRm; /* 0x0a */
1575
1576 /** The effective segment register (X86_SREG_XXX). */
1577 uint8_t iEffSeg; /* 0x0b */
1578
1579 /** The prefix mask (IEM_OP_PRF_XXX). */
1580 uint32_t fPrefixes; /* 0x0c */
1581 /** The extra REX ModR/M register field bit (REX.R << 3). */
1582 uint8_t uRexReg; /* 0x10 */
1583 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1584 * (REX.B << 3). */
1585 uint8_t uRexB; /* 0x11 */
1586 /** The extra REX SIB index field bit (REX.X << 3). */
1587 uint8_t uRexIndex; /* 0x12 */
1588
1589# else /* IEM_WITH_OPAQUE_DECODER_STATE */
1590 uint8_t abOpaqueDecoderPart1[0x13 - 0x08];
1591# endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1592#endif /* !IEM_WITH_CODE_TLB */
1593
1594#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1595 /** The effective operand mode. */
1596 IEMMODE enmEffOpSize; /* 0x36, 0x13 */
1597 /** The default addressing mode. */
1598 IEMMODE enmDefAddrMode; /* 0x37, 0x14 */
1599 /** The effective addressing mode. */
1600 IEMMODE enmEffAddrMode; /* 0x38, 0x15 */
1601 /** The default operand mode. */
1602 IEMMODE enmDefOpSize; /* 0x39, 0x16 */
1603
1604 /** Prefix index (VEX.pp) for two byte and three byte tables. */
1605 uint8_t idxPrefix; /* 0x3a, 0x17 */
1606 /** 3rd VEX/EVEX/XOP register.
1607 * Please use IEM_GET_EFFECTIVE_VVVV to access. */
1608 uint8_t uVex3rdReg; /* 0x3b, 0x18 */
1609 /** The VEX/EVEX/XOP length field. */
1610 uint8_t uVexLength; /* 0x3c, 0x19 */
1611 /** Additional EVEX stuff. */
1612 uint8_t fEvexStuff; /* 0x3d, 0x1a */
1613
1614# ifndef IEM_WITH_CODE_TLB
1615 /** Explicit alignment padding. */
1616 uint8_t abAlignment2a[1]; /* 0x1b */
1617# endif
1618 /** The FPU opcode (FOP). */
1619 uint16_t uFpuOpcode; /* 0x3e, 0x1c */
1620# ifndef IEM_WITH_CODE_TLB
1621 /** Explicit alignment padding. */
1622 uint8_t abAlignment2b[2]; /* 0x1e */
1623# endif
1624
1625 /** The opcode bytes. */
1626 uint8_t abOpcode[15]; /* 0x40, 0x20 */
1627 /** Explicit alignment padding. */
1628# ifdef IEM_WITH_CODE_TLB
1629 //uint8_t abAlignment2c[0x4f - 0x4f]; /* 0x4f */
1630# else
1631 uint8_t abAlignment2c[0x4f - 0x2f]; /* 0x2f */
1632# endif
1633
1634#else /* IEM_WITH_OPAQUE_DECODER_STATE */
1635# ifdef IEM_WITH_CODE_TLB
1636 uint8_t abOpaqueDecoderPart2[0x4f - 0x36];
1637# else
1638 uint8_t abOpaqueDecoderPart2[0x4f - 0x13];
1639# endif
1640#endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1641 /** @} */
1642
1643
1644 /** The number of active guest memory mappings. */
1645 uint8_t cActiveMappings; /* 0x4f, 0x4f */
1646
1647 /** Records for tracking guest memory mappings. */
1648 struct
1649 {
1650 /** The address of the mapped bytes. */
1651 R3R0PTRTYPE(void *) pv;
1652 /** The access flags (IEM_ACCESS_XXX).
1653 * IEM_ACCESS_INVALID if the entry is unused. */
1654 uint32_t fAccess;
1655#if HC_ARCH_BITS == 64
1656 uint32_t u32Alignment4; /**< Alignment padding. */
1657#endif
1658 } aMemMappings[3]; /* 0x50 LB 0x30 */
1659
1660 /** Locking records for the mapped memory. */
1661 union
1662 {
1663 PGMPAGEMAPLOCK Lock;
1664 uint64_t au64Padding[2];
1665 } aMemMappingLocks[3]; /* 0x80 LB 0x30 */
1666
1667 /** Bounce buffer info.
1668 * This runs in parallel to aMemMappings. */
1669 struct
1670 {
1671 /** The physical address of the first byte. */
1672 RTGCPHYS GCPhysFirst;
1673 /** The physical address of the second page. */
1674 RTGCPHYS GCPhysSecond;
1675 /** The number of bytes in the first page. */
1676 uint16_t cbFirst;
1677 /** The number of bytes in the second page. */
1678 uint16_t cbSecond;
1679 /** Whether it's unassigned memory. */
1680 bool fUnassigned;
1681 /** Explicit alignment padding. */
1682 bool afAlignment5[3];
1683 } aMemBbMappings[3]; /* 0xb0 LB 0x48 */
1684
1685 /** The flags of the current exception / interrupt. */
1686 uint32_t fCurXcpt; /* 0xf8 */
1687 /** The current exception / interrupt. */
1688 uint8_t uCurXcpt; /* 0xfc */
1689 /** Exception / interrupt recursion depth. */
1690 int8_t cXcptRecursions; /* 0xfb */
1691
1692 /** The next unused mapping index.
1693 * @todo try find room for this up with cActiveMappings. */
1694 uint8_t iNextMapping; /* 0xfd */
1695 uint8_t abAlignment7[1];
1696
1697 /** Bounce buffer storage.
1698 * This runs in parallel to aMemMappings and aMemBbMappings. */
1699 struct
1700 {
1701 uint8_t ab[512];
1702 } aBounceBuffers[3]; /* 0x100 LB 0x600 */
1703
1704
1705 /** Pointer set jump buffer - ring-3 context. */
1706 R3PTRTYPE(jmp_buf *) pJmpBufR3;
1707 /** Pointer set jump buffer - ring-0 context. */
1708 R0PTRTYPE(jmp_buf *) pJmpBufR0;
1709
1710 /** @todo Should move this near @a fCurXcpt later. */
1711 /** The CR2 for the current exception / interrupt. */
1712 uint64_t uCurXcptCr2;
1713 /** The error code for the current exception / interrupt. */
1714 uint32_t uCurXcptErr;
1715
1716 /** @name Statistics
1717 * @{ */
1718 /** The number of instructions we've executed. */
1719 uint32_t cInstructions;
1720 /** The number of potential exits. */
1721 uint32_t cPotentialExits;
1722 /** The number of bytes data or stack written (mostly for IEMExecOneEx).
1723 * This may contain uncommitted writes. */
1724 uint32_t cbWritten;
1725 /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
1726 uint32_t cRetInstrNotImplemented;
1727 /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
1728 uint32_t cRetAspectNotImplemented;
1729 /** Counts informational statuses returned (other than VINF_SUCCESS). */
1730 uint32_t cRetInfStatuses;
1731 /** Counts other error statuses returned. */
1732 uint32_t cRetErrStatuses;
1733 /** Number of times rcPassUp has been used. */
1734 uint32_t cRetPassUpStatus;
1735 /** Number of times RZ left with instruction commit pending for ring-3. */
1736 uint32_t cPendingCommit;
1737 /** Number of misaligned (host sense) atomic instruction accesses. */
1738 uint32_t cMisalignedAtomics;
1739 /** Number of long jumps. */
1740 uint32_t cLongJumps;
1741 /** @} */
1742
1743 /** @name Target CPU information.
1744 * @{ */
1745#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1746 /** The target CPU. */
1747 uint8_t uTargetCpu;
1748#else
1749 uint8_t bTargetCpuPadding;
1750#endif
1751 /** For selecting assembly works matching the target CPU EFLAGS behaviour, see
1752 * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no
1753 * native host support and the 2nd for when there is.
1754 *
1755 * The two values are typically indexed by a g_CpumHostFeatures bit.
1756 *
1757 * This is for instance used for the BSF & BSR instructions where AMD and
1758 * Intel CPUs produce different EFLAGS. */
1759 uint8_t aidxTargetCpuEflFlavour[2];
1760
1761 /** The CPU vendor. */
1762 CPUMCPUVENDOR enmCpuVendor;
1763 /** @} */
1764
1765 /** @name Host CPU information.
1766 * @{ */
1767 /** The CPU vendor. */
1768 CPUMCPUVENDOR enmHostCpuVendor;
1769 /** @} */
1770
1771 /** Counts RDMSR \#GP(0) LogRel(). */
1772 uint8_t cLogRelRdMsr;
1773 /** Counts WRMSR \#GP(0) LogRel(). */
1774 uint8_t cLogRelWrMsr;
1775 /** Alignment padding. */
1776 uint8_t abAlignment9[42];
1777
1778 /** @name Recompilation
1779 * @{ */
1780 /** Pointer to the current translation block.
1781 * This can either be one being executed or one being compiled. */
1782 R3PTRTYPE(PIEMTB) pCurTbR3;
1783#ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
1784 /** Frame pointer for the last native TB to execute. */
1785 R3PTRTYPE(void *) pvTbFramePointerR3;
1786#else
1787 R3PTRTYPE(void *) pvUnusedR3;
1788#endif
1789 /** Fixed TB used for threaded recompilation.
1790 * This is allocated once with maxed-out sizes and re-used afterwards. */
1791 R3PTRTYPE(PIEMTB) pThrdCompileTbR3;
1792 /** Pointer to the ring-3 TB cache for this EMT. */
1793 R3PTRTYPE(PIEMTBCACHE) pTbCacheR3;
1794 /** The PC (RIP) at the start of pCurTbR3/pCurTbR0.
1795 * The TBs are based on physical addresses, so this is needed to correleated
1796 * RIP to opcode bytes stored in the TB (AMD-V / VT-x). */
1797 uint64_t uCurTbStartPc;
1798 /** Number of threaded TBs executed. */
1799 uint64_t cTbExecThreaded;
1800 /** Number of native TBs executed. */
1801 uint64_t cTbExecNative;
1802 /** Whether we need to check the opcode bytes for the current instruction.
1803 * This is set by a previous instruction if it modified memory or similar. */
1804 bool fTbCheckOpcodes;
1805 /** Indicates whether and how we just branched - IEMBRANCHED_F_XXX. */
1806 uint8_t fTbBranched;
1807 /** Set when GCPhysInstrBuf is updated because of a page crossing. */
1808 bool fTbCrossedPage;
1809 /** Whether to end the current TB. */
1810 bool fEndTb;
1811 /** Number of instructions before we need emit an IRQ check call again.
1812 * This helps making sure we don't execute too long w/o checking for
1813 * interrupts and immediately following instructions that may enable
1814 * interrupts (e.g. POPF, IRET, STI). With STI an additional hack is
1815 * required to make sure we check following the next instruction as well, see
1816 * fTbCurInstrIsSti. */
1817 uint8_t cInstrTillIrqCheck;
1818 /** Indicates that the current instruction is an STI. This is set by the
1819 * iemCImpl_sti code and subsequently cleared by the recompiler. */
1820 bool fTbCurInstrIsSti;
1821 /** The size of the IEMTB::pabOpcodes allocation in pThrdCompileTbR3. */
1822 uint16_t cbOpcodesAllocated;
1823 /** The current instruction number in a native TB.
1824 * This is set by code that may trigger an unexpected TB exit (throw/longjmp)
1825 * and will be picked up by the TB execution loop. Only used when
1826 * IEMNATIVE_WITH_INSTRUCTION_COUNTING is defined. */
1827 uint8_t idxTbCurInstr;
1828 /** Spaced reserved for recompiler data / alignment. */
1829 bool afRecompilerStuff1[3];
1830 /** The virtual sync time at the last timer poll call. */
1831 uint32_t msRecompilerPollNow;
1832 /** The IEMTB::cUsed value when to attempt native recompilation of a TB. */
1833 uint32_t uTbNativeRecompileAtUsedCount;
1834 /** The IEM_CIMPL_F_XXX mask for the current instruction. */
1835 uint32_t fTbCurInstr;
1836 /** The IEM_CIMPL_F_XXX mask for the previous instruction. */
1837 uint32_t fTbPrevInstr;
1838 /** Strict: Tracking skipped EFLAGS calculations. Any bits set here are
1839 * currently not up to date in EFLAGS. */
1840 uint32_t fSkippingEFlags;
1841 /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set. */
1842 RTGCPHYS GCPhysInstrBufPrev;
1843 /** Pointer to the ring-3 TB allocator for this EMT. */
1844 R3PTRTYPE(PIEMTBALLOCATOR) pTbAllocatorR3;
1845 /** Pointer to the ring-3 executable memory allocator for this EMT. */
1846 R3PTRTYPE(struct IEMEXECMEMALLOCATOR *) pExecMemAllocatorR3;
1847 /** Pointer to the native recompiler state for ring-3. */
1848 R3PTRTYPE(struct IEMRECOMPILERSTATE *) pNativeRecompilerStateR3;
1849
1850 /** Statistics: Times TB execution was broken off before reaching the end. */
1851 STAMCOUNTER StatTbExecBreaks;
1852 /** Statistics: Times BltIn_CheckIrq breaks out of the TB. */
1853 STAMCOUNTER StatCheckIrqBreaks;
1854 /** Statistics: Times BltIn_CheckMode breaks out of the TB. */
1855 STAMCOUNTER StatCheckModeBreaks;
1856 /** Statistics: Times a post jump target check missed and had to find new TB. */
1857 STAMCOUNTER StatCheckBranchMisses;
1858 /** Statistics: Times a jump or page crossing required a TB with CS.LIM checking. */
1859 STAMCOUNTER StatCheckNeedCsLimChecking;
1860 /** Exec memory allocator statistics: Number of times allocaintg executable memory failed. */
1861 STAMCOUNTER StatNativeExecMemInstrBufAllocFailed;
1862 /** Native TB statistics: Number of fully recompiled TBs. */
1863 STAMCOUNTER StatNativeFullyRecompiledTbs;
1864 /** Threaded TB statistics: Number of instructions per TB. */
1865 STAMPROFILE StatTbThreadedInstr;
1866 /** Threaded TB statistics: Number of calls per TB. */
1867 STAMPROFILE StatTbThreadedCalls;
1868 /** Native TB statistics: Native code size per TB. */
1869 STAMPROFILE StatTbNativeCode;
1870 /** Native TB statistics: Profiling native recompilation. */
1871 STAMPROFILE StatNativeRecompilation;
1872 /** Native TB statistics: Number of calls per TB that were recompiled properly. */
1873 STAMPROFILE StatNativeCallsRecompiled;
1874 /** Native TB statistics: Number of threaded calls per TB that weren't recompiled. */
1875 STAMPROFILE StatNativeCallsThreaded;
1876 /** Native recompiled execution: TLB hits for data fetches. */
1877 STAMCOUNTER StatNativeTlbHitsForFetch;
1878 /** Native recompiled execution: TLB hits for data stores. */
1879 STAMCOUNTER StatNativeTlbHitsForStore;
1880 /** Native recompiled execution: TLB hits for stack accesses. */
1881 STAMCOUNTER StatNativeTlbHitsForStack;
1882 /** Native recompiled execution: TLB hits for mapped accesses. */
1883 STAMCOUNTER StatNativeTlbHitsForMapped;
1884 /** Native recompiled execution: Code TLB misses for new page. */
1885 STAMCOUNTER StatNativeCodeTlbMissesNewPage;
1886 /** Native recompiled execution: Code TLB hits for new page. */
1887 STAMCOUNTER StatNativeCodeTlbHitsForNewPage;
1888 /** Native recompiled execution: Code TLB misses for new page with offset. */
1889 STAMCOUNTER StatNativeCodeTlbMissesNewPageWithOffset;
1890 /** Native recompiled execution: Code TLB hits for new page with offset. */
1891 STAMCOUNTER StatNativeCodeTlbHitsForNewPageWithOffset;
1892
1893 /** Native recompiler: Number of calls to iemNativeRegAllocFindFree. */
1894 STAMCOUNTER StatNativeRegFindFree;
1895 /** Native recompiler: Number of times iemNativeRegAllocFindFree needed
1896 * to free a variable. */
1897 STAMCOUNTER StatNativeRegFindFreeVar;
1898 /** Native recompiler: Number of times iemNativeRegAllocFindFree did
1899 * not need to free any variables. */
1900 STAMCOUNTER StatNativeRegFindFreeNoVar;
1901 /** Native recompiler: Liveness info freed shadowed guest registers in
1902 * iemNativeRegAllocFindFree. */
1903 STAMCOUNTER StatNativeRegFindFreeLivenessUnshadowed;
1904 /** Native recompiler: Liveness info helped with the allocation in
1905 * iemNativeRegAllocFindFree. */
1906 STAMCOUNTER StatNativeRegFindFreeLivenessHelped;
1907
1908 /** Native recompiler: Number of times status flags calc has been skipped. */
1909 STAMCOUNTER StatNativeEflSkippedArithmetic;
1910 /** Native recompiler: Number of times status flags calc has been skipped. */
1911 STAMCOUNTER StatNativeEflSkippedLogical;
1912
1913 /** Native recompiler: Number of opportunities to skip EFLAGS.CF updating. */
1914 STAMCOUNTER StatNativeLivenessEflCfSkippable;
1915 /** Native recompiler: Number of opportunities to skip EFLAGS.PF updating. */
1916 STAMCOUNTER StatNativeLivenessEflPfSkippable;
1917 /** Native recompiler: Number of opportunities to skip EFLAGS.AF updating. */
1918 STAMCOUNTER StatNativeLivenessEflAfSkippable;
1919 /** Native recompiler: Number of opportunities to skip EFLAGS.ZF updating. */
1920 STAMCOUNTER StatNativeLivenessEflZfSkippable;
1921 /** Native recompiler: Number of opportunities to skip EFLAGS.SF updating. */
1922 STAMCOUNTER StatNativeLivenessEflSfSkippable;
1923 /** Native recompiler: Number of opportunities to skip EFLAGS.OF updating. */
1924 STAMCOUNTER StatNativeLivenessEflOfSkippable;
1925 /** Native recompiler: Number of required EFLAGS.CF updates. */
1926 STAMCOUNTER StatNativeLivenessEflCfRequired;
1927 /** Native recompiler: Number of required EFLAGS.PF updates. */
1928 STAMCOUNTER StatNativeLivenessEflPfRequired;
1929 /** Native recompiler: Number of required EFLAGS.AF updates. */
1930 STAMCOUNTER StatNativeLivenessEflAfRequired;
1931 /** Native recompiler: Number of required EFLAGS.ZF updates. */
1932 STAMCOUNTER StatNativeLivenessEflZfRequired;
1933 /** Native recompiler: Number of required EFLAGS.SF updates. */
1934 STAMCOUNTER StatNativeLivenessEflSfRequired;
1935 /** Native recompiler: Number of required EFLAGS.OF updates. */
1936 STAMCOUNTER StatNativeLivenessEflOfRequired;
1937 /** Native recompiler: Number of potentially delayable EFLAGS.CF updates. */
1938 STAMCOUNTER StatNativeLivenessEflCfDelayable;
1939 /** Native recompiler: Number of potentially delayable EFLAGS.PF updates. */
1940 STAMCOUNTER StatNativeLivenessEflPfDelayable;
1941 /** Native recompiler: Number of potentially delayable EFLAGS.AF updates. */
1942 STAMCOUNTER StatNativeLivenessEflAfDelayable;
1943 /** Native recompiler: Number of potentially delayable EFLAGS.ZF updates. */
1944 STAMCOUNTER StatNativeLivenessEflZfDelayable;
1945 /** Native recompiler: Number of potentially delayable EFLAGS.SF updates. */
1946 STAMCOUNTER StatNativeLivenessEflSfDelayable;
1947 /** Native recompiler: Number of potentially delayable EFLAGS.OF updates. */
1948 STAMCOUNTER StatNativeLivenessEflOfDelayable;
1949
1950 /** Native recompiler: Number of potential PC updates in total. */
1951 STAMCOUNTER StatNativePcUpdateTotal;
1952 /** Native recompiler: Number of PC updates which could be delayed. */
1953 STAMCOUNTER StatNativePcUpdateDelayed;
1954
1955#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
1956 /** Native recompiler: Number of calls to iemNativeSimdRegAllocFindFree. */
1957 STAMCOUNTER StatNativeSimdRegFindFree;
1958 /** Native recompiler: Number of times iemNativeSimdRegAllocFindFree needed
1959 * to free a variable. */
1960 STAMCOUNTER StatNativeSimdRegFindFreeVar;
1961 /** Native recompiler: Number of times iemNativeSimdRegAllocFindFree did
1962 * not need to free any variables. */
1963 STAMCOUNTER StatNativeSimdRegFindFreeNoVar;
1964 /** Native recompiler: Liveness info freed shadowed guest registers in
1965 * iemNativeSimdRegAllocFindFree. */
1966 STAMCOUNTER StatNativeSimdRegFindFreeLivenessUnshadowed;
1967 /** Native recompiler: Liveness info helped with the allocation in
1968 * iemNativeSimdRegAllocFindFree. */
1969 STAMCOUNTER StatNativeSimdRegFindFreeLivenessHelped;
1970
1971 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks. */
1972 STAMCOUNTER StatNativeMaybeDeviceNotAvailXcptCheckPotential;
1973 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks. */
1974 STAMCOUNTER StatNativeMaybeSseXcptCheckPotential;
1975 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks. */
1976 STAMCOUNTER StatNativeMaybeAvxXcptCheckPotential;
1977
1978 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted. */
1979 STAMCOUNTER StatNativeMaybeDeviceNotAvailXcptCheckOmitted;
1980 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted. */
1981 STAMCOUNTER StatNativeMaybeSseXcptCheckOmitted;
1982 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted. */
1983 STAMCOUNTER StatNativeMaybeAvxXcptCheckOmitted;
1984#endif
1985
1986 uint64_t au64Padding[4];
1987 /** @} */
1988
1989 /** Data TLB.
1990 * @remarks Must be 64-byte aligned. */
1991 IEMTLB DataTlb;
1992 /** Instruction TLB.
1993 * @remarks Must be 64-byte aligned. */
1994 IEMTLB CodeTlb;
1995
1996 /** Exception statistics. */
1997 STAMCOUNTER aStatXcpts[32];
1998 /** Interrupt statistics. */
1999 uint32_t aStatInts[256];
2000
2001#if defined(VBOX_WITH_STATISTICS) && !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
2002 /** Instruction statistics for ring-0/raw-mode. */
2003 IEMINSTRSTATS StatsRZ;
2004 /** Instruction statistics for ring-3. */
2005 IEMINSTRSTATS StatsR3;
2006# ifdef VBOX_WITH_IEM_RECOMPILER
2007 /** Statistics per threaded function call.
2008 * Updated by both the threaded and native recompilers. */
2009 uint32_t acThreadedFuncStats[0x5100 /*20736*/];
2010# endif
2011#endif
2012} IEMCPU;
2013AssertCompileMemberOffset(IEMCPU, cActiveMappings, 0x4f);
2014AssertCompileMemberAlignment(IEMCPU, aMemMappings, 16);
2015AssertCompileMemberAlignment(IEMCPU, aMemMappingLocks, 16);
2016AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64);
2017AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
2018AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
2019
2020/** Pointer to the per-CPU IEM state. */
2021typedef IEMCPU *PIEMCPU;
2022/** Pointer to the const per-CPU IEM state. */
2023typedef IEMCPU const *PCIEMCPU;
2024
2025
2026/** @def IEM_GET_CTX
2027 * Gets the guest CPU context for the calling EMT.
2028 * @returns PCPUMCTX
2029 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2030 */
2031#define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
2032
2033/** @def IEM_CTX_ASSERT
2034 * Asserts that the @a a_fExtrnMbz is present in the CPU context.
2035 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2036 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
2037 */
2038#define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) \
2039 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
2040 ("fExtrn=%#RX64 & fExtrnMbz=%#RX64 -> %#RX64\n", \
2041 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz), (a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz) ))
2042
2043/** @def IEM_CTX_IMPORT_RET
2044 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
2045 *
2046 * Will call the keep to import the bits as needed.
2047 *
2048 * Returns on import failure.
2049 *
2050 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2051 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
2052 */
2053#define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \
2054 do { \
2055 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
2056 { /* likely */ } \
2057 else \
2058 { \
2059 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
2060 AssertRCReturn(rcCtxImport, rcCtxImport); \
2061 } \
2062 } while (0)
2063
2064/** @def IEM_CTX_IMPORT_NORET
2065 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
2066 *
2067 * Will call the keep to import the bits as needed.
2068 *
2069 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2070 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
2071 */
2072#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \
2073 do { \
2074 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
2075 { /* likely */ } \
2076 else \
2077 { \
2078 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
2079 AssertLogRelRC(rcCtxImport); \
2080 } \
2081 } while (0)
2082
2083/** @def IEM_CTX_IMPORT_JMP
2084 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
2085 *
2086 * Will call the keep to import the bits as needed.
2087 *
2088 * Jumps on import failure.
2089 *
2090 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2091 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
2092 */
2093#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \
2094 do { \
2095 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
2096 { /* likely */ } \
2097 else \
2098 { \
2099 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
2100 AssertRCStmt(rcCtxImport, IEM_DO_LONGJMP(pVCpu, rcCtxImport)); \
2101 } \
2102 } while (0)
2103
2104
2105
2106/** @def IEM_GET_TARGET_CPU
2107 * Gets the current IEMTARGETCPU value.
2108 * @returns IEMTARGETCPU value.
2109 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2110 */
2111#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
2112# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
2113#else
2114# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
2115#endif
2116
2117/** @def IEM_GET_INSTR_LEN
2118 * Gets the instruction length. */
2119#ifdef IEM_WITH_CODE_TLB
2120# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(int32_t)(a_pVCpu)->iem.s.offCurInstrStart)
2121#else
2122# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offOpcode)
2123#endif
2124
2125/** @def IEM_TRY_SETJMP
2126 * Wrapper around setjmp / try, hiding all the ugly differences.
2127 *
2128 * @note Use with extreme care as this is a fragile macro.
2129 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2130 * @param a_rcTarget The variable that should receive the status code in case
2131 * of a longjmp/throw.
2132 */
2133/** @def IEM_TRY_SETJMP_AGAIN
2134 * For when setjmp / try is used again in the same variable scope as a previous
2135 * IEM_TRY_SETJMP invocation.
2136 */
2137/** @def IEM_CATCH_LONGJMP_BEGIN
2138 * Start wrapper for catch / setjmp-else.
2139 *
2140 * This will set up a scope.
2141 *
2142 * @note Use with extreme care as this is a fragile macro.
2143 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2144 * @param a_rcTarget The variable that should receive the status code in case
2145 * of a longjmp/throw.
2146 */
2147/** @def IEM_CATCH_LONGJMP_END
2148 * End wrapper for catch / setjmp-else.
2149 *
2150 * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
2151 * state.
2152 *
2153 * @note Use with extreme care as this is a fragile macro.
2154 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2155 */
2156#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
2157# ifdef IEM_WITH_THROW_CATCH
2158# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
2159 a_rcTarget = VINF_SUCCESS; \
2160 try
2161# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
2162 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
2163# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
2164 catch (int rcThrown) \
2165 { \
2166 a_rcTarget = rcThrown
2167# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
2168 } \
2169 ((void)0)
2170# else /* !IEM_WITH_THROW_CATCH */
2171# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
2172 jmp_buf JmpBuf; \
2173 jmp_buf * volatile pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
2174 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
2175 if ((rcStrict = setjmp(JmpBuf)) == 0)
2176# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
2177 pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
2178 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
2179 if ((rcStrict = setjmp(JmpBuf)) == 0)
2180# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
2181 else \
2182 { \
2183 ((void)0)
2184# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
2185 } \
2186 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
2187# endif /* !IEM_WITH_THROW_CATCH */
2188#endif /* IEM_WITH_SETJMP */
2189
2190
2191/**
2192 * Shared per-VM IEM data.
2193 */
2194typedef struct IEM
2195{
2196 /** The VMX APIC-access page handler type. */
2197 PGMPHYSHANDLERTYPE hVmxApicAccessPage;
2198#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
2199 /** Set if the CPUID host call functionality is enabled. */
2200 bool fCpuIdHostCall;
2201#endif
2202} IEM;
2203
2204
2205
2206/** @name IEM_ACCESS_XXX - Access details.
2207 * @{ */
2208#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
2209#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
2210#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
2211#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
2212#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
2213#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
2214#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
2215#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
2216#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
2217#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
2218/** The writes are partial, so if initialize the bounce buffer with the
2219 * orignal RAM content. */
2220#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
2221/** Used in aMemMappings to indicate that the entry is bounce buffered. */
2222#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
2223/** Bounce buffer with ring-3 write pending, first page. */
2224#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
2225/** Bounce buffer with ring-3 write pending, second page. */
2226#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
2227/** Not locked, accessed via the TLB. */
2228#define IEM_ACCESS_NOT_LOCKED UINT32_C(0x00001000)
2229/** Atomic access.
2230 * This enables special alignment checks and the VINF_EM_EMULATE_SPLIT_LOCK
2231 * fallback for misaligned stuff. See @bugref{10547}. */
2232#define IEM_ACCESS_ATOMIC UINT32_C(0x00002000)
2233/** Valid bit mask. */
2234#define IEM_ACCESS_VALID_MASK UINT32_C(0x00003fff)
2235/** Shift count for the TLB flags (upper word). */
2236#define IEM_ACCESS_SHIFT_TLB_FLAGS 16
2237
2238/** Atomic read+write data alias. */
2239#define IEM_ACCESS_DATA_ATOMIC (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA | IEM_ACCESS_ATOMIC)
2240/** Read+write data alias. */
2241#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
2242/** Write data alias. */
2243#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
2244/** Read data alias. */
2245#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
2246/** Instruction fetch alias. */
2247#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
2248/** Stack write alias. */
2249#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
2250/** Stack read alias. */
2251#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
2252/** Stack read+write alias. */
2253#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
2254/** Read system table alias. */
2255#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
2256/** Read+write system table alias. */
2257#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
2258/** @} */
2259
2260/** @name Prefix constants (IEMCPU::fPrefixes)
2261 * @{ */
2262#define IEM_OP_PRF_SEG_CS RT_BIT_32(0) /**< CS segment prefix (0x2e). */
2263#define IEM_OP_PRF_SEG_SS RT_BIT_32(1) /**< SS segment prefix (0x36). */
2264#define IEM_OP_PRF_SEG_DS RT_BIT_32(2) /**< DS segment prefix (0x3e). */
2265#define IEM_OP_PRF_SEG_ES RT_BIT_32(3) /**< ES segment prefix (0x26). */
2266#define IEM_OP_PRF_SEG_FS RT_BIT_32(4) /**< FS segment prefix (0x64). */
2267#define IEM_OP_PRF_SEG_GS RT_BIT_32(5) /**< GS segment prefix (0x65). */
2268#define IEM_OP_PRF_SEG_MASK UINT32_C(0x3f)
2269
2270#define IEM_OP_PRF_SIZE_OP RT_BIT_32(8) /**< Operand size prefix (0x66). */
2271#define IEM_OP_PRF_SIZE_REX_W RT_BIT_32(9) /**< REX.W prefix (0x48-0x4f). */
2272#define IEM_OP_PRF_SIZE_ADDR RT_BIT_32(10) /**< Address size prefix (0x67). */
2273
2274#define IEM_OP_PRF_LOCK RT_BIT_32(16) /**< Lock prefix (0xf0). */
2275#define IEM_OP_PRF_REPNZ RT_BIT_32(17) /**< Repeat-not-zero prefix (0xf2). */
2276#define IEM_OP_PRF_REPZ RT_BIT_32(18) /**< Repeat-if-zero prefix (0xf3). */
2277
2278#define IEM_OP_PRF_REX RT_BIT_32(24) /**< Any REX prefix (0x40-0x4f). */
2279#define IEM_OP_PRF_REX_B RT_BIT_32(25) /**< REX.B prefix (0x41,0x43,0x45,0x47,0x49,0x4b,0x4d,0x4f). */
2280#define IEM_OP_PRF_REX_X RT_BIT_32(26) /**< REX.X prefix (0x42,0x43,0x46,0x47,0x4a,0x4b,0x4e,0x4f). */
2281#define IEM_OP_PRF_REX_R RT_BIT_32(27) /**< REX.R prefix (0x44,0x45,0x46,0x47,0x4c,0x4d,0x4e,0x4f). */
2282/** Mask with all the REX prefix flags.
2283 * This is generally for use when needing to undo the REX prefixes when they
2284 * are followed legacy prefixes and therefore does not immediately preceed
2285 * the first opcode byte.
2286 * For testing whether any REX prefix is present, use IEM_OP_PRF_REX instead. */
2287#define IEM_OP_PRF_REX_MASK (IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W )
2288
2289#define IEM_OP_PRF_VEX RT_BIT_32(28) /**< Indiciates VEX prefix. */
2290#define IEM_OP_PRF_EVEX RT_BIT_32(29) /**< Indiciates EVEX prefix. */
2291#define IEM_OP_PRF_XOP RT_BIT_32(30) /**< Indiciates XOP prefix. */
2292/** @} */
2293
2294/** @name IEMOPFORM_XXX - Opcode forms
2295 * @note These are ORed together with IEMOPHINT_XXX.
2296 * @{ */
2297/** ModR/M: reg, r/m */
2298#define IEMOPFORM_RM 0
2299/** ModR/M: reg, r/m (register) */
2300#define IEMOPFORM_RM_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
2301/** ModR/M: reg, r/m (memory) */
2302#define IEMOPFORM_RM_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
2303/** ModR/M: reg, r/m, imm */
2304#define IEMOPFORM_RMI 1
2305/** ModR/M: reg, r/m (register), imm */
2306#define IEMOPFORM_RMI_REG (IEMOPFORM_RMI | IEMOPFORM_MOD3)
2307/** ModR/M: reg, r/m (memory), imm */
2308#define IEMOPFORM_RMI_MEM (IEMOPFORM_RMI | IEMOPFORM_NOT_MOD3)
2309/** ModR/M: reg, r/m, xmm0 */
2310#define IEMOPFORM_RM0 2
2311/** ModR/M: reg, r/m (register), xmm0 */
2312#define IEMOPFORM_RM0_REG (IEMOPFORM_RM0 | IEMOPFORM_MOD3)
2313/** ModR/M: reg, r/m (memory), xmm0 */
2314#define IEMOPFORM_RM0_MEM (IEMOPFORM_RM0 | IEMOPFORM_NOT_MOD3)
2315/** ModR/M: r/m, reg */
2316#define IEMOPFORM_MR 3
2317/** ModR/M: r/m (register), reg */
2318#define IEMOPFORM_MR_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
2319/** ModR/M: r/m (memory), reg */
2320#define IEMOPFORM_MR_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
2321/** ModR/M: r/m, reg, imm */
2322#define IEMOPFORM_MRI 4
2323/** ModR/M: r/m (register), reg, imm */
2324#define IEMOPFORM_MRI_REG (IEMOPFORM_MRI | IEMOPFORM_MOD3)
2325/** ModR/M: r/m (memory), reg, imm */
2326#define IEMOPFORM_MRI_MEM (IEMOPFORM_MRI | IEMOPFORM_NOT_MOD3)
2327/** ModR/M: r/m only */
2328#define IEMOPFORM_M 5
2329/** ModR/M: r/m only (register). */
2330#define IEMOPFORM_M_REG (IEMOPFORM_M | IEMOPFORM_MOD3)
2331/** ModR/M: r/m only (memory). */
2332#define IEMOPFORM_M_MEM (IEMOPFORM_M | IEMOPFORM_NOT_MOD3)
2333/** ModR/M: r/m, imm */
2334#define IEMOPFORM_MI 6
2335/** ModR/M: r/m (register), imm */
2336#define IEMOPFORM_MI_REG (IEMOPFORM_MI | IEMOPFORM_MOD3)
2337/** ModR/M: r/m (memory), imm */
2338#define IEMOPFORM_MI_MEM (IEMOPFORM_MI | IEMOPFORM_NOT_MOD3)
2339/** ModR/M: r/m, 1 (shift and rotate instructions) */
2340#define IEMOPFORM_M1 7
2341/** ModR/M: r/m (register), 1. */
2342#define IEMOPFORM_M1_REG (IEMOPFORM_M1 | IEMOPFORM_MOD3)
2343/** ModR/M: r/m (memory), 1. */
2344#define IEMOPFORM_M1_MEM (IEMOPFORM_M1 | IEMOPFORM_NOT_MOD3)
2345/** ModR/M: r/m, CL (shift and rotate instructions)
2346 * @todo This should just've been a generic fixed register. But the python
2347 * code doesn't needs more convincing. */
2348#define IEMOPFORM_M_CL 8
2349/** ModR/M: r/m (register), CL. */
2350#define IEMOPFORM_M_CL_REG (IEMOPFORM_M_CL | IEMOPFORM_MOD3)
2351/** ModR/M: r/m (memory), CL. */
2352#define IEMOPFORM_M_CL_MEM (IEMOPFORM_M_CL | IEMOPFORM_NOT_MOD3)
2353/** ModR/M: reg only */
2354#define IEMOPFORM_R 9
2355
2356/** VEX+ModR/M: reg, r/m */
2357#define IEMOPFORM_VEX_RM 16
2358/** VEX+ModR/M: reg, r/m (register) */
2359#define IEMOPFORM_VEX_RM_REG (IEMOPFORM_VEX_RM | IEMOPFORM_MOD3)
2360/** VEX+ModR/M: reg, r/m (memory) */
2361#define IEMOPFORM_VEX_RM_MEM (IEMOPFORM_VEX_RM | IEMOPFORM_NOT_MOD3)
2362/** VEX+ModR/M: r/m, reg */
2363#define IEMOPFORM_VEX_MR 17
2364/** VEX+ModR/M: r/m (register), reg */
2365#define IEMOPFORM_VEX_MR_REG (IEMOPFORM_VEX_MR | IEMOPFORM_MOD3)
2366/** VEX+ModR/M: r/m (memory), reg */
2367#define IEMOPFORM_VEX_MR_MEM (IEMOPFORM_VEX_MR | IEMOPFORM_NOT_MOD3)
2368/** VEX+ModR/M: r/m, reg, imm8 */
2369#define IEMOPFORM_VEX_MRI 18
2370/** VEX+ModR/M: r/m (register), reg, imm8 */
2371#define IEMOPFORM_VEX_MRI_REG (IEMOPFORM_VEX_MRI | IEMOPFORM_MOD3)
2372/** VEX+ModR/M: r/m (memory), reg, imm8 */
2373#define IEMOPFORM_VEX_MRI_MEM (IEMOPFORM_VEX_MRI | IEMOPFORM_NOT_MOD3)
2374/** VEX+ModR/M: r/m only */
2375#define IEMOPFORM_VEX_M 19
2376/** VEX+ModR/M: r/m only (register). */
2377#define IEMOPFORM_VEX_M_REG (IEMOPFORM_VEX_M | IEMOPFORM_MOD3)
2378/** VEX+ModR/M: r/m only (memory). */
2379#define IEMOPFORM_VEX_M_MEM (IEMOPFORM_VEX_M | IEMOPFORM_NOT_MOD3)
2380/** VEX+ModR/M: reg only */
2381#define IEMOPFORM_VEX_R 20
2382/** VEX+ModR/M: reg, vvvv, r/m */
2383#define IEMOPFORM_VEX_RVM 21
2384/** VEX+ModR/M: reg, vvvv, r/m (register). */
2385#define IEMOPFORM_VEX_RVM_REG (IEMOPFORM_VEX_RVM | IEMOPFORM_MOD3)
2386/** VEX+ModR/M: reg, vvvv, r/m (memory). */
2387#define IEMOPFORM_VEX_RVM_MEM (IEMOPFORM_VEX_RVM | IEMOPFORM_NOT_MOD3)
2388/** VEX+ModR/M: reg, vvvv, r/m, imm */
2389#define IEMOPFORM_VEX_RVMI 22
2390/** VEX+ModR/M: reg, vvvv, r/m (register), imm. */
2391#define IEMOPFORM_VEX_RVMI_REG (IEMOPFORM_VEX_RVMI | IEMOPFORM_MOD3)
2392/** VEX+ModR/M: reg, vvvv, r/m (memory), imm. */
2393#define IEMOPFORM_VEX_RVMI_MEM (IEMOPFORM_VEX_RVMI | IEMOPFORM_NOT_MOD3)
2394/** VEX+ModR/M: reg, vvvv, r/m, imm(reg) */
2395#define IEMOPFORM_VEX_RVMR 23
2396/** VEX+ModR/M: reg, vvvv, r/m (register), imm(reg). */
2397#define IEMOPFORM_VEX_RVMR_REG (IEMOPFORM_VEX_RVMI | IEMOPFORM_MOD3)
2398/** VEX+ModR/M: reg, vvvv, r/m (memory), imm(reg). */
2399#define IEMOPFORM_VEX_RVMR_MEM (IEMOPFORM_VEX_RVMI | IEMOPFORM_NOT_MOD3)
2400/** VEX+ModR/M: reg, r/m, vvvv */
2401#define IEMOPFORM_VEX_RMV 24
2402/** VEX+ModR/M: reg, r/m, vvvv (register). */
2403#define IEMOPFORM_VEX_RMV_REG (IEMOPFORM_VEX_RMV | IEMOPFORM_MOD3)
2404/** VEX+ModR/M: reg, r/m, vvvv (memory). */
2405#define IEMOPFORM_VEX_RMV_MEM (IEMOPFORM_VEX_RMV | IEMOPFORM_NOT_MOD3)
2406/** VEX+ModR/M: reg, r/m, imm8 */
2407#define IEMOPFORM_VEX_RMI 25
2408/** VEX+ModR/M: reg, r/m, imm8 (register). */
2409#define IEMOPFORM_VEX_RMI_REG (IEMOPFORM_VEX_RMI | IEMOPFORM_MOD3)
2410/** VEX+ModR/M: reg, r/m, imm8 (memory). */
2411#define IEMOPFORM_VEX_RMI_MEM (IEMOPFORM_VEX_RMI | IEMOPFORM_NOT_MOD3)
2412/** VEX+ModR/M: r/m, vvvv, reg */
2413#define IEMOPFORM_VEX_MVR 26
2414/** VEX+ModR/M: r/m, vvvv, reg (register) */
2415#define IEMOPFORM_VEX_MVR_REG (IEMOPFORM_VEX_MVR | IEMOPFORM_MOD3)
2416/** VEX+ModR/M: r/m, vvvv, reg (memory) */
2417#define IEMOPFORM_VEX_MVR_MEM (IEMOPFORM_VEX_MVR | IEMOPFORM_NOT_MOD3)
2418/** VEX+ModR/M+/n: vvvv, r/m */
2419#define IEMOPFORM_VEX_VM 27
2420/** VEX+ModR/M+/n: vvvv, r/m (register) */
2421#define IEMOPFORM_VEX_VM_REG (IEMOPFORM_VEX_VM | IEMOPFORM_MOD3)
2422/** VEX+ModR/M+/n: vvvv, r/m (memory) */
2423#define IEMOPFORM_VEX_VM_MEM (IEMOPFORM_VEX_VM | IEMOPFORM_NOT_MOD3)
2424/** VEX+ModR/M+/n: vvvv, r/m, imm8 */
2425#define IEMOPFORM_VEX_VMI 28
2426/** VEX+ModR/M+/n: vvvv, r/m, imm8 (register) */
2427#define IEMOPFORM_VEX_VMI_REG (IEMOPFORM_VEX_VMI | IEMOPFORM_MOD3)
2428/** VEX+ModR/M+/n: vvvv, r/m, imm8 (memory) */
2429#define IEMOPFORM_VEX_VMI_MEM (IEMOPFORM_VEX_VMI | IEMOPFORM_NOT_MOD3)
2430
2431/** Fixed register instruction, no R/M. */
2432#define IEMOPFORM_FIXED 32
2433
2434/** The r/m is a register. */
2435#define IEMOPFORM_MOD3 RT_BIT_32(8)
2436/** The r/m is a memory access. */
2437#define IEMOPFORM_NOT_MOD3 RT_BIT_32(9)
2438/** @} */
2439
2440/** @name IEMOPHINT_XXX - Additional Opcode Hints
2441 * @note These are ORed together with IEMOPFORM_XXX.
2442 * @{ */
2443/** Ignores the operand size prefix (66h). */
2444#define IEMOPHINT_IGNORES_OZ_PFX RT_BIT_32(10)
2445/** Ignores REX.W (aka WIG). */
2446#define IEMOPHINT_IGNORES_REXW RT_BIT_32(11)
2447/** Both the operand size prefixes (66h + REX.W) are ignored. */
2448#define IEMOPHINT_IGNORES_OP_SIZES (IEMOPHINT_IGNORES_OZ_PFX | IEMOPHINT_IGNORES_REXW)
2449/** Allowed with the lock prefix. */
2450#define IEMOPHINT_LOCK_ALLOWED RT_BIT_32(11)
2451/** The VEX.L value is ignored (aka LIG). */
2452#define IEMOPHINT_VEX_L_IGNORED RT_BIT_32(12)
2453/** The VEX.L value must be zero (i.e. 128-bit width only). */
2454#define IEMOPHINT_VEX_L_ZERO RT_BIT_32(13)
2455/** The VEX.L value must be one (i.e. 256-bit width only). */
2456#define IEMOPHINT_VEX_L_ONE RT_BIT_32(14)
2457/** The VEX.V value must be zero. */
2458#define IEMOPHINT_VEX_V_ZERO RT_BIT_32(15)
2459/** The REX.W/VEX.V value must be zero. */
2460#define IEMOPHINT_REX_W_ZERO RT_BIT_32(16)
2461#define IEMOPHINT_VEX_W_ZERO IEMOPHINT_REX_W_ZERO
2462/** The REX.W/VEX.V value must be one. */
2463#define IEMOPHINT_REX_W_ONE RT_BIT_32(17)
2464#define IEMOPHINT_VEX_W_ONE IEMOPHINT_REX_W_ONE
2465
2466/** Hint to IEMAllInstructionPython.py that this macro should be skipped. */
2467#define IEMOPHINT_SKIP_PYTHON RT_BIT_32(31)
2468/** @} */
2469
2470/**
2471 * Possible hardware task switch sources.
2472 */
2473typedef enum IEMTASKSWITCH
2474{
2475 /** Task switch caused by an interrupt/exception. */
2476 IEMTASKSWITCH_INT_XCPT = 1,
2477 /** Task switch caused by a far CALL. */
2478 IEMTASKSWITCH_CALL,
2479 /** Task switch caused by a far JMP. */
2480 IEMTASKSWITCH_JUMP,
2481 /** Task switch caused by an IRET. */
2482 IEMTASKSWITCH_IRET
2483} IEMTASKSWITCH;
2484AssertCompileSize(IEMTASKSWITCH, 4);
2485
2486/**
2487 * Possible CrX load (write) sources.
2488 */
2489typedef enum IEMACCESSCRX
2490{
2491 /** CrX access caused by 'mov crX' instruction. */
2492 IEMACCESSCRX_MOV_CRX,
2493 /** CrX (CR0) write caused by 'lmsw' instruction. */
2494 IEMACCESSCRX_LMSW,
2495 /** CrX (CR0) write caused by 'clts' instruction. */
2496 IEMACCESSCRX_CLTS,
2497 /** CrX (CR0) read caused by 'smsw' instruction. */
2498 IEMACCESSCRX_SMSW
2499} IEMACCESSCRX;
2500
2501#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2502/** @name IEM_SLAT_FAIL_XXX - Second-level address translation failure information.
2503 *
2504 * These flags provide further context to SLAT page-walk failures that could not be
2505 * determined by PGM (e.g, PGM is not privy to memory access permissions).
2506 *
2507 * @{
2508 */
2509/** Translating a nested-guest linear address failed accessing a nested-guest
2510 * physical address. */
2511# define IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR RT_BIT_32(0)
2512/** Translating a nested-guest linear address failed accessing a
2513 * paging-structure entry or updating accessed/dirty bits. */
2514# define IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE RT_BIT_32(1)
2515/** @} */
2516
2517DECLCALLBACK(FNPGMPHYSHANDLER) iemVmxApicAccessPageHandler;
2518# ifndef IN_RING3
2519DECLCALLBACK(FNPGMRZPHYSPFHANDLER) iemVmxApicAccessPagePfHandler;
2520# endif
2521#endif
2522
2523/**
2524 * Indicates to the verifier that the given flag set is undefined.
2525 *
2526 * Can be invoked again to add more flags.
2527 *
2528 * This is a NOOP if the verifier isn't compiled in.
2529 *
2530 * @note We're temporarily keeping this until code is converted to new
2531 * disassembler style opcode handling.
2532 */
2533#define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0)
2534
2535
2536/** @def IEM_DECL_IMPL_TYPE
2537 * For typedef'ing an instruction implementation function.
2538 *
2539 * @param a_RetType The return type.
2540 * @param a_Name The name of the type.
2541 * @param a_ArgList The argument list enclosed in parentheses.
2542 */
2543
2544/** @def IEM_DECL_IMPL_DEF
2545 * For defining an instruction implementation function.
2546 *
2547 * @param a_RetType The return type.
2548 * @param a_Name The name of the type.
2549 * @param a_ArgList The argument list enclosed in parentheses.
2550 */
2551
2552#if defined(__GNUC__) && defined(RT_ARCH_X86)
2553# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2554 __attribute__((__fastcall__)) a_RetType (a_Name) a_ArgList
2555# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2556 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2557# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2558 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2559
2560#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
2561# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2562 a_RetType (__fastcall a_Name) a_ArgList
2563# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2564 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2565# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2566 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2567
2568#elif __cplusplus >= 201700 /* P0012R1 support */
2569# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2570 a_RetType (VBOXCALL a_Name) a_ArgList RT_NOEXCEPT
2571# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2572 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2573# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2574 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2575
2576#else
2577# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2578 a_RetType (VBOXCALL a_Name) a_ArgList
2579# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2580 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2581# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2582 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2583
2584#endif
2585
2586/** Defined in IEMAllAImplC.cpp but also used by IEMAllAImplA.asm. */
2587RT_C_DECLS_BEGIN
2588extern uint8_t const g_afParity[256];
2589RT_C_DECLS_END
2590
2591
2592/** @name Arithmetic assignment operations on bytes (binary).
2593 * @{ */
2594typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU8, (uint32_t fEFlagsIn, uint8_t *pu8Dst, uint8_t u8Src));
2595typedef FNIEMAIMPLBINU8 *PFNIEMAIMPLBINU8;
2596FNIEMAIMPLBINU8 iemAImpl_add_u8, iemAImpl_add_u8_locked;
2597FNIEMAIMPLBINU8 iemAImpl_adc_u8, iemAImpl_adc_u8_locked;
2598FNIEMAIMPLBINU8 iemAImpl_sub_u8, iemAImpl_sub_u8_locked;
2599FNIEMAIMPLBINU8 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked;
2600FNIEMAIMPLBINU8 iemAImpl_or_u8, iemAImpl_or_u8_locked;
2601FNIEMAIMPLBINU8 iemAImpl_xor_u8, iemAImpl_xor_u8_locked;
2602FNIEMAIMPLBINU8 iemAImpl_and_u8, iemAImpl_and_u8_locked;
2603
2604typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINTODOU8, (uint8_t *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
2605typedef FNIEMAIMPLBINTODOU8 *PFNIEMAIMPLBINTODOU8;
2606/** @} */
2607
2608/** @name Arithmetic assignment operations on words (binary).
2609 * @{ */
2610typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU16, (uint32_t fEFlagsIn, uint16_t *pu16Dst, uint16_t u16Src));
2611typedef FNIEMAIMPLBINU16 *PFNIEMAIMPLBINU16;
2612FNIEMAIMPLBINU16 iemAImpl_add_u16, iemAImpl_add_u16_locked;
2613FNIEMAIMPLBINU16 iemAImpl_adc_u16, iemAImpl_adc_u16_locked;
2614FNIEMAIMPLBINU16 iemAImpl_sub_u16, iemAImpl_sub_u16_locked;
2615FNIEMAIMPLBINU16 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked;
2616FNIEMAIMPLBINU16 iemAImpl_or_u16, iemAImpl_or_u16_locked;
2617FNIEMAIMPLBINU16 iemAImpl_xor_u16, iemAImpl_xor_u16_locked;
2618FNIEMAIMPLBINU16 iemAImpl_and_u16, iemAImpl_and_u16_locked;
2619
2620typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINTODOU16, (uint16_t *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
2621typedef FNIEMAIMPLBINTODOU16 *PFNIEMAIMPLBINTODOU16;
2622/** @} */
2623
2624
2625/** @name Arithmetic assignment operations on double words (binary).
2626 * @{ */
2627typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU32, (uint32_t fEFlagsIn, uint32_t *pu32Dst, uint32_t u32Src));
2628typedef FNIEMAIMPLBINU32 *PFNIEMAIMPLBINU32;
2629FNIEMAIMPLBINU32 iemAImpl_add_u32, iemAImpl_add_u32_locked;
2630FNIEMAIMPLBINU32 iemAImpl_adc_u32, iemAImpl_adc_u32_locked;
2631FNIEMAIMPLBINU32 iemAImpl_sub_u32, iemAImpl_sub_u32_locked;
2632FNIEMAIMPLBINU32 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked;
2633FNIEMAIMPLBINU32 iemAImpl_or_u32, iemAImpl_or_u32_locked;
2634FNIEMAIMPLBINU32 iemAImpl_xor_u32, iemAImpl_xor_u32_locked;
2635FNIEMAIMPLBINU32 iemAImpl_and_u32, iemAImpl_and_u32_locked;
2636
2637typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINTODOU32, (uint32_t *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
2638typedef FNIEMAIMPLBINTODOU32 *PFNIEMAIMPLBINTODOU32;
2639FNIEMAIMPLBINTODOU32 iemAImpl_blsi_u32, iemAImpl_blsi_u32_fallback;
2640FNIEMAIMPLBINTODOU32 iemAImpl_blsr_u32, iemAImpl_blsr_u32_fallback;
2641FNIEMAIMPLBINTODOU32 iemAImpl_blsmsk_u32, iemAImpl_blsmsk_u32_fallback;
2642/** @} */
2643
2644/** @name Arithmetic assignment operations on quad words (binary).
2645 * @{ */
2646typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU64, (uint32_t fEFlagsIn, uint64_t *pu64Dst, uint64_t u64Src));
2647typedef FNIEMAIMPLBINU64 *PFNIEMAIMPLBINU64;
2648FNIEMAIMPLBINU64 iemAImpl_add_u64, iemAImpl_add_u64_locked;
2649FNIEMAIMPLBINU64 iemAImpl_adc_u64, iemAImpl_adc_u64_locked;
2650FNIEMAIMPLBINU64 iemAImpl_sub_u64, iemAImpl_sub_u64_locked;
2651FNIEMAIMPLBINU64 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked;
2652FNIEMAIMPLBINU64 iemAImpl_or_u64, iemAImpl_or_u64_locked;
2653FNIEMAIMPLBINU64 iemAImpl_xor_u64, iemAImpl_xor_u64_locked;
2654FNIEMAIMPLBINU64 iemAImpl_and_u64, iemAImpl_and_u64_locked;
2655
2656typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINTODOU64, (uint64_t *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
2657typedef FNIEMAIMPLBINTODOU64 *PFNIEMAIMPLBINTODOU64;
2658FNIEMAIMPLBINTODOU64 iemAImpl_blsi_u64, iemAImpl_blsi_u64_fallback;
2659FNIEMAIMPLBINTODOU64 iemAImpl_blsr_u64, iemAImpl_blsr_u64_fallback;
2660FNIEMAIMPLBINTODOU64 iemAImpl_blsmsk_u64, iemAImpl_blsmsk_u64_fallback;
2661/** @} */
2662
2663typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU8, (uint32_t fEFlagsIn, uint8_t const *pu8Dst, uint8_t u8Src));
2664typedef FNIEMAIMPLBINROU8 *PFNIEMAIMPLBINROU8;
2665typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU16,(uint32_t fEFlagsIn, uint16_t const *pu16Dst, uint16_t u16Src));
2666typedef FNIEMAIMPLBINROU16 *PFNIEMAIMPLBINROU16;
2667typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU32,(uint32_t fEFlagsIn, uint32_t const *pu32Dst, uint32_t u32Src));
2668typedef FNIEMAIMPLBINROU32 *PFNIEMAIMPLBINROU32;
2669typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU64,(uint32_t fEFlagsIn, uint64_t const *pu64Dst, uint64_t u64Src));
2670typedef FNIEMAIMPLBINROU64 *PFNIEMAIMPLBINROU64;
2671
2672/** @name Compare operations (thrown in with the binary ops).
2673 * @{ */
2674FNIEMAIMPLBINROU8 iemAImpl_cmp_u8;
2675FNIEMAIMPLBINROU16 iemAImpl_cmp_u16;
2676FNIEMAIMPLBINROU32 iemAImpl_cmp_u32;
2677FNIEMAIMPLBINROU64 iemAImpl_cmp_u64;
2678/** @} */
2679
2680/** @name Test operations (thrown in with the binary ops).
2681 * @{ */
2682FNIEMAIMPLBINROU8 iemAImpl_test_u8;
2683FNIEMAIMPLBINROU16 iemAImpl_test_u16;
2684FNIEMAIMPLBINROU32 iemAImpl_test_u32;
2685FNIEMAIMPLBINROU64 iemAImpl_test_u64;
2686/** @} */
2687
2688typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINTODOROU16,(uint16_t const *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
2689typedef FNIEMAIMPLBINTODOROU16 *PFNIEMAIMPLBINTODOROU16;
2690typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINTODOROU32,(uint32_t const *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
2691typedef FNIEMAIMPLBINTODOROU32 *PFNIEMAIMPLBINTODOROU32;
2692typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINTODOROU64,(uint64_t const *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
2693typedef FNIEMAIMPLBINTODOROU64 *PFNIEMAIMPLBINTODOROU64;
2694
2695/** @name Bit operations operations (thrown in with the binary ops).
2696 * @{ */
2697FNIEMAIMPLBINTODOROU16 iemAImpl_bt_u16;
2698FNIEMAIMPLBINTODOROU32 iemAImpl_bt_u32;
2699FNIEMAIMPLBINTODOROU64 iemAImpl_bt_u64;
2700FNIEMAIMPLBINTODOU16 iemAImpl_btc_u16, iemAImpl_btc_u16_locked;
2701FNIEMAIMPLBINTODOU32 iemAImpl_btc_u32, iemAImpl_btc_u32_locked;
2702FNIEMAIMPLBINTODOU64 iemAImpl_btc_u64, iemAImpl_btc_u64_locked;
2703FNIEMAIMPLBINTODOU16 iemAImpl_btr_u16, iemAImpl_btr_u16_locked;
2704FNIEMAIMPLBINTODOU32 iemAImpl_btr_u32, iemAImpl_btr_u32_locked;
2705FNIEMAIMPLBINTODOU64 iemAImpl_btr_u64, iemAImpl_btr_u64_locked;
2706FNIEMAIMPLBINTODOU16 iemAImpl_bts_u16, iemAImpl_bts_u16_locked;
2707FNIEMAIMPLBINTODOU32 iemAImpl_bts_u32, iemAImpl_bts_u32_locked;
2708FNIEMAIMPLBINTODOU64 iemAImpl_bts_u64, iemAImpl_bts_u64_locked;
2709/** @} */
2710
2711/** @name Arithmetic three operand operations on double words (binary).
2712 * @{ */
2713typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2, uint32_t *pEFlags));
2714typedef FNIEMAIMPLBINVEXU32 *PFNIEMAIMPLBINVEXU32;
2715FNIEMAIMPLBINVEXU32 iemAImpl_andn_u32, iemAImpl_andn_u32_fallback;
2716FNIEMAIMPLBINVEXU32 iemAImpl_bextr_u32, iemAImpl_bextr_u32_fallback;
2717FNIEMAIMPLBINVEXU32 iemAImpl_bzhi_u32, iemAImpl_bzhi_u32_fallback;
2718/** @} */
2719
2720/** @name Arithmetic three operand operations on quad words (binary).
2721 * @{ */
2722typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2, uint32_t *pEFlags));
2723typedef FNIEMAIMPLBINVEXU64 *PFNIEMAIMPLBINVEXU64;
2724FNIEMAIMPLBINVEXU64 iemAImpl_andn_u64, iemAImpl_andn_u64_fallback;
2725FNIEMAIMPLBINVEXU64 iemAImpl_bextr_u64, iemAImpl_bextr_u64_fallback;
2726FNIEMAIMPLBINVEXU64 iemAImpl_bzhi_u64, iemAImpl_bzhi_u64_fallback;
2727/** @} */
2728
2729/** @name Arithmetic three operand operations on double words w/o EFLAGS (binary).
2730 * @{ */
2731typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32NOEFL, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2));
2732typedef FNIEMAIMPLBINVEXU32NOEFL *PFNIEMAIMPLBINVEXU32NOEFL;
2733FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pdep_u32, iemAImpl_pdep_u32_fallback;
2734FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pext_u32, iemAImpl_pext_u32_fallback;
2735FNIEMAIMPLBINVEXU32NOEFL iemAImpl_sarx_u32, iemAImpl_sarx_u32_fallback;
2736FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shlx_u32, iemAImpl_shlx_u32_fallback;
2737FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shrx_u32, iemAImpl_shrx_u32_fallback;
2738FNIEMAIMPLBINVEXU32NOEFL iemAImpl_rorx_u32;
2739/** @} */
2740
2741/** @name Arithmetic three operand operations on quad words w/o EFLAGS (binary).
2742 * @{ */
2743typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64NOEFL, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2));
2744typedef FNIEMAIMPLBINVEXU64NOEFL *PFNIEMAIMPLBINVEXU64NOEFL;
2745FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pdep_u64, iemAImpl_pdep_u64_fallback;
2746FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pext_u64, iemAImpl_pext_u64_fallback;
2747FNIEMAIMPLBINVEXU64NOEFL iemAImpl_sarx_u64, iemAImpl_sarx_u64_fallback;
2748FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shlx_u64, iemAImpl_shlx_u64_fallback;
2749FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shrx_u64, iemAImpl_shrx_u64_fallback;
2750FNIEMAIMPLBINVEXU64NOEFL iemAImpl_rorx_u64;
2751/** @} */
2752
2753/** @name MULX 32-bit and 64-bit.
2754 * @{ */
2755typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU32, (uint32_t *puDst1, uint32_t *puDst2, uint32_t uSrc1, uint32_t uSrc2));
2756typedef FNIEMAIMPLMULXVEXU32 *PFNIEMAIMPLMULXVEXU32;
2757FNIEMAIMPLMULXVEXU32 iemAImpl_mulx_u32, iemAImpl_mulx_u32_fallback;
2758
2759typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU64, (uint64_t *puDst1, uint64_t *puDst2, uint64_t uSrc1, uint64_t uSrc2));
2760typedef FNIEMAIMPLMULXVEXU64 *PFNIEMAIMPLMULXVEXU64;
2761FNIEMAIMPLMULXVEXU64 iemAImpl_mulx_u64, iemAImpl_mulx_u64_fallback;
2762/** @} */
2763
2764
2765/** @name Exchange memory with register operations.
2766 * @{ */
2767IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_locked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2768IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_locked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2769IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_locked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2770IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_locked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2771IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_unlocked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2772IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_unlocked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2773IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_unlocked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2774IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_unlocked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2775/** @} */
2776
2777/** @name Exchange and add operations.
2778 * @{ */
2779IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2780IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2781IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2782IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2783IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8_locked, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2784IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16_locked,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2785IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32_locked,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2786IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64_locked,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2787/** @} */
2788
2789/** @name Compare and exchange.
2790 * @{ */
2791IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2792IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8_locked, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2793IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16, (uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2794IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16_locked,(uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2795IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32, (uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2796IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32_locked,(uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2797#if ARCH_BITS == 32
2798IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2799IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2800#else
2801IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2802IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2803#endif
2804IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2805 uint32_t *pEFlags));
2806IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b_locked,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2807 uint32_t *pEFlags));
2808IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2809 uint32_t *pEFlags));
2810IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_locked,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2811 uint32_t *pEFlags));
2812#ifndef RT_ARCH_ARM64
2813IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_fallback,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx,
2814 PRTUINT128U pu128RbxRcx, uint32_t *pEFlags));
2815#endif
2816/** @} */
2817
2818/** @name Memory ordering
2819 * @{ */
2820typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEMFENCE,(void));
2821typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE;
2822IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void));
2823IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void));
2824IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void));
2825#ifndef RT_ARCH_ARM64
2826IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void));
2827#endif
2828/** @} */
2829
2830/** @name Double precision shifts
2831 * @{ */
2832typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags));
2833typedef FNIEMAIMPLSHIFTDBLU16 *PFNIEMAIMPLSHIFTDBLU16;
2834typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU32,(uint32_t *pu32Dst, uint32_t u32Src, uint8_t cShift, uint32_t *pEFlags));
2835typedef FNIEMAIMPLSHIFTDBLU32 *PFNIEMAIMPLSHIFTDBLU32;
2836typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t cShift, uint32_t *pEFlags));
2837typedef FNIEMAIMPLSHIFTDBLU64 *PFNIEMAIMPLSHIFTDBLU64;
2838FNIEMAIMPLSHIFTDBLU16 iemAImpl_shld_u16, iemAImpl_shld_u16_amd, iemAImpl_shld_u16_intel;
2839FNIEMAIMPLSHIFTDBLU32 iemAImpl_shld_u32, iemAImpl_shld_u32_amd, iemAImpl_shld_u32_intel;
2840FNIEMAIMPLSHIFTDBLU64 iemAImpl_shld_u64, iemAImpl_shld_u64_amd, iemAImpl_shld_u64_intel;
2841FNIEMAIMPLSHIFTDBLU16 iemAImpl_shrd_u16, iemAImpl_shrd_u16_amd, iemAImpl_shrd_u16_intel;
2842FNIEMAIMPLSHIFTDBLU32 iemAImpl_shrd_u32, iemAImpl_shrd_u32_amd, iemAImpl_shrd_u32_intel;
2843FNIEMAIMPLSHIFTDBLU64 iemAImpl_shrd_u64, iemAImpl_shrd_u64_amd, iemAImpl_shrd_u64_intel;
2844/** @} */
2845
2846
2847/** @name Bit search operations (thrown in with the binary ops).
2848 * @{ */
2849FNIEMAIMPLBINU16 iemAImpl_bsf_u16, iemAImpl_bsf_u16_amd, iemAImpl_bsf_u16_intel;
2850FNIEMAIMPLBINU32 iemAImpl_bsf_u32, iemAImpl_bsf_u32_amd, iemAImpl_bsf_u32_intel;
2851FNIEMAIMPLBINU64 iemAImpl_bsf_u64, iemAImpl_bsf_u64_amd, iemAImpl_bsf_u64_intel;
2852FNIEMAIMPLBINU16 iemAImpl_bsr_u16, iemAImpl_bsr_u16_amd, iemAImpl_bsr_u16_intel;
2853FNIEMAIMPLBINU32 iemAImpl_bsr_u32, iemAImpl_bsr_u32_amd, iemAImpl_bsr_u32_intel;
2854FNIEMAIMPLBINU64 iemAImpl_bsr_u64, iemAImpl_bsr_u64_amd, iemAImpl_bsr_u64_intel;
2855FNIEMAIMPLBINU16 iemAImpl_lzcnt_u16, iemAImpl_lzcnt_u16_amd, iemAImpl_lzcnt_u16_intel;
2856FNIEMAIMPLBINU32 iemAImpl_lzcnt_u32, iemAImpl_lzcnt_u32_amd, iemAImpl_lzcnt_u32_intel;
2857FNIEMAIMPLBINU64 iemAImpl_lzcnt_u64, iemAImpl_lzcnt_u64_amd, iemAImpl_lzcnt_u64_intel;
2858FNIEMAIMPLBINU16 iemAImpl_tzcnt_u16, iemAImpl_tzcnt_u16_amd, iemAImpl_tzcnt_u16_intel;
2859FNIEMAIMPLBINU32 iemAImpl_tzcnt_u32, iemAImpl_tzcnt_u32_amd, iemAImpl_tzcnt_u32_intel;
2860FNIEMAIMPLBINU64 iemAImpl_tzcnt_u64, iemAImpl_tzcnt_u64_amd, iemAImpl_tzcnt_u64_intel;
2861FNIEMAIMPLBINU16 iemAImpl_popcnt_u16, iemAImpl_popcnt_u16_fallback;
2862FNIEMAIMPLBINU32 iemAImpl_popcnt_u32, iemAImpl_popcnt_u32_fallback;
2863FNIEMAIMPLBINU64 iemAImpl_popcnt_u64, iemAImpl_popcnt_u64_fallback;
2864/** @} */
2865
2866/** @name Signed multiplication operations (thrown in with the binary ops).
2867 * @{ */
2868FNIEMAIMPLBINU16 iemAImpl_imul_two_u16, iemAImpl_imul_two_u16_amd, iemAImpl_imul_two_u16_intel;
2869FNIEMAIMPLBINU32 iemAImpl_imul_two_u32, iemAImpl_imul_two_u32_amd, iemAImpl_imul_two_u32_intel;
2870FNIEMAIMPLBINU64 iemAImpl_imul_two_u64, iemAImpl_imul_two_u64_amd, iemAImpl_imul_two_u64_intel;
2871/** @} */
2872
2873/** @name Arithmetic assignment operations on bytes (unary).
2874 * @{ */
2875typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU8, (uint8_t *pu8Dst, uint32_t *pEFlags));
2876typedef FNIEMAIMPLUNARYU8 *PFNIEMAIMPLUNARYU8;
2877FNIEMAIMPLUNARYU8 iemAImpl_inc_u8, iemAImpl_inc_u8_locked;
2878FNIEMAIMPLUNARYU8 iemAImpl_dec_u8, iemAImpl_dec_u8_locked;
2879FNIEMAIMPLUNARYU8 iemAImpl_not_u8, iemAImpl_not_u8_locked;
2880FNIEMAIMPLUNARYU8 iemAImpl_neg_u8, iemAImpl_neg_u8_locked;
2881/** @} */
2882
2883/** @name Arithmetic assignment operations on words (unary).
2884 * @{ */
2885typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU16, (uint16_t *pu16Dst, uint32_t *pEFlags));
2886typedef FNIEMAIMPLUNARYU16 *PFNIEMAIMPLUNARYU16;
2887FNIEMAIMPLUNARYU16 iemAImpl_inc_u16, iemAImpl_inc_u16_locked;
2888FNIEMAIMPLUNARYU16 iemAImpl_dec_u16, iemAImpl_dec_u16_locked;
2889FNIEMAIMPLUNARYU16 iemAImpl_not_u16, iemAImpl_not_u16_locked;
2890FNIEMAIMPLUNARYU16 iemAImpl_neg_u16, iemAImpl_neg_u16_locked;
2891/** @} */
2892
2893/** @name Arithmetic assignment operations on double words (unary).
2894 * @{ */
2895typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU32, (uint32_t *pu32Dst, uint32_t *pEFlags));
2896typedef FNIEMAIMPLUNARYU32 *PFNIEMAIMPLUNARYU32;
2897FNIEMAIMPLUNARYU32 iemAImpl_inc_u32, iemAImpl_inc_u32_locked;
2898FNIEMAIMPLUNARYU32 iemAImpl_dec_u32, iemAImpl_dec_u32_locked;
2899FNIEMAIMPLUNARYU32 iemAImpl_not_u32, iemAImpl_not_u32_locked;
2900FNIEMAIMPLUNARYU32 iemAImpl_neg_u32, iemAImpl_neg_u32_locked;
2901/** @} */
2902
2903/** @name Arithmetic assignment operations on quad words (unary).
2904 * @{ */
2905typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU64, (uint64_t *pu64Dst, uint32_t *pEFlags));
2906typedef FNIEMAIMPLUNARYU64 *PFNIEMAIMPLUNARYU64;
2907FNIEMAIMPLUNARYU64 iemAImpl_inc_u64, iemAImpl_inc_u64_locked;
2908FNIEMAIMPLUNARYU64 iemAImpl_dec_u64, iemAImpl_dec_u64_locked;
2909FNIEMAIMPLUNARYU64 iemAImpl_not_u64, iemAImpl_not_u64_locked;
2910FNIEMAIMPLUNARYU64 iemAImpl_neg_u64, iemAImpl_neg_u64_locked;
2911/** @} */
2912
2913
2914/** @name Shift operations on bytes (Group 2).
2915 * @{ */
2916typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU8,(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags));
2917typedef FNIEMAIMPLSHIFTU8 *PFNIEMAIMPLSHIFTU8;
2918FNIEMAIMPLSHIFTU8 iemAImpl_rol_u8, iemAImpl_rol_u8_amd, iemAImpl_rol_u8_intel;
2919FNIEMAIMPLSHIFTU8 iemAImpl_ror_u8, iemAImpl_ror_u8_amd, iemAImpl_ror_u8_intel;
2920FNIEMAIMPLSHIFTU8 iemAImpl_rcl_u8, iemAImpl_rcl_u8_amd, iemAImpl_rcl_u8_intel;
2921FNIEMAIMPLSHIFTU8 iemAImpl_rcr_u8, iemAImpl_rcr_u8_amd, iemAImpl_rcr_u8_intel;
2922FNIEMAIMPLSHIFTU8 iemAImpl_shl_u8, iemAImpl_shl_u8_amd, iemAImpl_shl_u8_intel;
2923FNIEMAIMPLSHIFTU8 iemAImpl_shr_u8, iemAImpl_shr_u8_amd, iemAImpl_shr_u8_intel;
2924FNIEMAIMPLSHIFTU8 iemAImpl_sar_u8, iemAImpl_sar_u8_amd, iemAImpl_sar_u8_intel;
2925/** @} */
2926
2927/** @name Shift operations on words (Group 2).
2928 * @{ */
2929typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU16,(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags));
2930typedef FNIEMAIMPLSHIFTU16 *PFNIEMAIMPLSHIFTU16;
2931FNIEMAIMPLSHIFTU16 iemAImpl_rol_u16, iemAImpl_rol_u16_amd, iemAImpl_rol_u16_intel;
2932FNIEMAIMPLSHIFTU16 iemAImpl_ror_u16, iemAImpl_ror_u16_amd, iemAImpl_ror_u16_intel;
2933FNIEMAIMPLSHIFTU16 iemAImpl_rcl_u16, iemAImpl_rcl_u16_amd, iemAImpl_rcl_u16_intel;
2934FNIEMAIMPLSHIFTU16 iemAImpl_rcr_u16, iemAImpl_rcr_u16_amd, iemAImpl_rcr_u16_intel;
2935FNIEMAIMPLSHIFTU16 iemAImpl_shl_u16, iemAImpl_shl_u16_amd, iemAImpl_shl_u16_intel;
2936FNIEMAIMPLSHIFTU16 iemAImpl_shr_u16, iemAImpl_shr_u16_amd, iemAImpl_shr_u16_intel;
2937FNIEMAIMPLSHIFTU16 iemAImpl_sar_u16, iemAImpl_sar_u16_amd, iemAImpl_sar_u16_intel;
2938/** @} */
2939
2940/** @name Shift operations on double words (Group 2).
2941 * @{ */
2942typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU32,(uint32_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags));
2943typedef FNIEMAIMPLSHIFTU32 *PFNIEMAIMPLSHIFTU32;
2944FNIEMAIMPLSHIFTU32 iemAImpl_rol_u32, iemAImpl_rol_u32_amd, iemAImpl_rol_u32_intel;
2945FNIEMAIMPLSHIFTU32 iemAImpl_ror_u32, iemAImpl_ror_u32_amd, iemAImpl_ror_u32_intel;
2946FNIEMAIMPLSHIFTU32 iemAImpl_rcl_u32, iemAImpl_rcl_u32_amd, iemAImpl_rcl_u32_intel;
2947FNIEMAIMPLSHIFTU32 iemAImpl_rcr_u32, iemAImpl_rcr_u32_amd, iemAImpl_rcr_u32_intel;
2948FNIEMAIMPLSHIFTU32 iemAImpl_shl_u32, iemAImpl_shl_u32_amd, iemAImpl_shl_u32_intel;
2949FNIEMAIMPLSHIFTU32 iemAImpl_shr_u32, iemAImpl_shr_u32_amd, iemAImpl_shr_u32_intel;
2950FNIEMAIMPLSHIFTU32 iemAImpl_sar_u32, iemAImpl_sar_u32_amd, iemAImpl_sar_u32_intel;
2951/** @} */
2952
2953/** @name Shift operations on words (Group 2).
2954 * @{ */
2955typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU64,(uint64_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags));
2956typedef FNIEMAIMPLSHIFTU64 *PFNIEMAIMPLSHIFTU64;
2957FNIEMAIMPLSHIFTU64 iemAImpl_rol_u64, iemAImpl_rol_u64_amd, iemAImpl_rol_u64_intel;
2958FNIEMAIMPLSHIFTU64 iemAImpl_ror_u64, iemAImpl_ror_u64_amd, iemAImpl_ror_u64_intel;
2959FNIEMAIMPLSHIFTU64 iemAImpl_rcl_u64, iemAImpl_rcl_u64_amd, iemAImpl_rcl_u64_intel;
2960FNIEMAIMPLSHIFTU64 iemAImpl_rcr_u64, iemAImpl_rcr_u64_amd, iemAImpl_rcr_u64_intel;
2961FNIEMAIMPLSHIFTU64 iemAImpl_shl_u64, iemAImpl_shl_u64_amd, iemAImpl_shl_u64_intel;
2962FNIEMAIMPLSHIFTU64 iemAImpl_shr_u64, iemAImpl_shr_u64_amd, iemAImpl_shr_u64_intel;
2963FNIEMAIMPLSHIFTU64 iemAImpl_sar_u64, iemAImpl_sar_u64_amd, iemAImpl_sar_u64_intel;
2964/** @} */
2965
2966/** @name Multiplication and division operations.
2967 * @{ */
2968typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t *pEFlags));
2969typedef FNIEMAIMPLMULDIVU8 *PFNIEMAIMPLMULDIVU8;
2970FNIEMAIMPLMULDIVU8 iemAImpl_mul_u8, iemAImpl_mul_u8_amd, iemAImpl_mul_u8_intel;
2971FNIEMAIMPLMULDIVU8 iemAImpl_imul_u8, iemAImpl_imul_u8_amd, iemAImpl_imul_u8_intel;
2972FNIEMAIMPLMULDIVU8 iemAImpl_div_u8, iemAImpl_div_u8_amd, iemAImpl_div_u8_intel;
2973FNIEMAIMPLMULDIVU8 iemAImpl_idiv_u8, iemAImpl_idiv_u8_amd, iemAImpl_idiv_u8_intel;
2974
2975typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t *pEFlags));
2976typedef FNIEMAIMPLMULDIVU16 *PFNIEMAIMPLMULDIVU16;
2977FNIEMAIMPLMULDIVU16 iemAImpl_mul_u16, iemAImpl_mul_u16_amd, iemAImpl_mul_u16_intel;
2978FNIEMAIMPLMULDIVU16 iemAImpl_imul_u16, iemAImpl_imul_u16_amd, iemAImpl_imul_u16_intel;
2979FNIEMAIMPLMULDIVU16 iemAImpl_div_u16, iemAImpl_div_u16_amd, iemAImpl_div_u16_intel;
2980FNIEMAIMPLMULDIVU16 iemAImpl_idiv_u16, iemAImpl_idiv_u16_amd, iemAImpl_idiv_u16_intel;
2981
2982typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t *pEFlags));
2983typedef FNIEMAIMPLMULDIVU32 *PFNIEMAIMPLMULDIVU32;
2984FNIEMAIMPLMULDIVU32 iemAImpl_mul_u32, iemAImpl_mul_u32_amd, iemAImpl_mul_u32_intel;
2985FNIEMAIMPLMULDIVU32 iemAImpl_imul_u32, iemAImpl_imul_u32_amd, iemAImpl_imul_u32_intel;
2986FNIEMAIMPLMULDIVU32 iemAImpl_div_u32, iemAImpl_div_u32_amd, iemAImpl_div_u32_intel;
2987FNIEMAIMPLMULDIVU32 iemAImpl_idiv_u32, iemAImpl_idiv_u32_amd, iemAImpl_idiv_u32_intel;
2988
2989typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t *pEFlags));
2990typedef FNIEMAIMPLMULDIVU64 *PFNIEMAIMPLMULDIVU64;
2991FNIEMAIMPLMULDIVU64 iemAImpl_mul_u64, iemAImpl_mul_u64_amd, iemAImpl_mul_u64_intel;
2992FNIEMAIMPLMULDIVU64 iemAImpl_imul_u64, iemAImpl_imul_u64_amd, iemAImpl_imul_u64_intel;
2993FNIEMAIMPLMULDIVU64 iemAImpl_div_u64, iemAImpl_div_u64_amd, iemAImpl_div_u64_intel;
2994FNIEMAIMPLMULDIVU64 iemAImpl_idiv_u64, iemAImpl_idiv_u64_amd, iemAImpl_idiv_u64_intel;
2995/** @} */
2996
2997/** @name Byte Swap.
2998 * @{ */
2999IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u16,(uint32_t *pu32Dst)); /* Yes, 32-bit register access. */
3000IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u32,(uint32_t *pu32Dst));
3001IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u64,(uint64_t *pu64Dst));
3002/** @} */
3003
3004/** @name Misc.
3005 * @{ */
3006FNIEMAIMPLBINTODOU16 iemAImpl_arpl;
3007/** @} */
3008
3009/** @name RDRAND and RDSEED
3010 * @{ */
3011typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU16,(uint16_t *puDst, uint32_t *pEFlags));
3012typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU32,(uint32_t *puDst, uint32_t *pEFlags));
3013typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU64,(uint64_t *puDst, uint32_t *pEFlags));
3014typedef FNIEMAIMPLRDRANDSEEDU16 *PFNIEMAIMPLRDRANDSEEDU16;
3015typedef FNIEMAIMPLRDRANDSEEDU32 *PFNIEMAIMPLRDRANDSEEDU32;
3016typedef FNIEMAIMPLRDRANDSEEDU64 *PFNIEMAIMPLRDRANDSEEDU64;
3017
3018FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdrand_u16, iemAImpl_rdrand_u16_fallback;
3019FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdrand_u32, iemAImpl_rdrand_u32_fallback;
3020FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdrand_u64, iemAImpl_rdrand_u64_fallback;
3021FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdseed_u16, iemAImpl_rdseed_u16_fallback;
3022FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdseed_u32, iemAImpl_rdseed_u32_fallback;
3023FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdseed_u64, iemAImpl_rdseed_u64_fallback;
3024/** @} */
3025
3026/** @name ADOX and ADCX
3027 * @{ */
3028FNIEMAIMPLBINTODOU32 iemAImpl_adcx_u32, iemAImpl_adcx_u32_fallback;
3029FNIEMAIMPLBINTODOU64 iemAImpl_adcx_u64, iemAImpl_adcx_u64_fallback;
3030FNIEMAIMPLBINTODOU32 iemAImpl_adox_u32, iemAImpl_adox_u32_fallback;
3031FNIEMAIMPLBINTODOU64 iemAImpl_adox_u64, iemAImpl_adox_u64_fallback;
3032/** @} */
3033
3034/** @name FPU operations taking a 32-bit float argument
3035 * @{ */
3036typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3037 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
3038typedef FNIEMAIMPLFPUR32FSW *PFNIEMAIMPLFPUR32FSW;
3039
3040typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3041 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
3042typedef FNIEMAIMPLFPUR32 *PFNIEMAIMPLFPUR32;
3043
3044FNIEMAIMPLFPUR32FSW iemAImpl_fcom_r80_by_r32;
3045FNIEMAIMPLFPUR32 iemAImpl_fadd_r80_by_r32;
3046FNIEMAIMPLFPUR32 iemAImpl_fmul_r80_by_r32;
3047FNIEMAIMPLFPUR32 iemAImpl_fsub_r80_by_r32;
3048FNIEMAIMPLFPUR32 iemAImpl_fsubr_r80_by_r32;
3049FNIEMAIMPLFPUR32 iemAImpl_fdiv_r80_by_r32;
3050FNIEMAIMPLFPUR32 iemAImpl_fdivr_r80_by_r32;
3051
3052IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT32U pr32Val));
3053IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
3054 PRTFLOAT32U pr32Val, PCRTFLOAT80U pr80Val));
3055/** @} */
3056
3057/** @name FPU operations taking a 64-bit float argument
3058 * @{ */
3059typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3060 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
3061typedef FNIEMAIMPLFPUR64FSW *PFNIEMAIMPLFPUR64FSW;
3062
3063typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3064 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
3065typedef FNIEMAIMPLFPUR64 *PFNIEMAIMPLFPUR64;
3066
3067FNIEMAIMPLFPUR64FSW iemAImpl_fcom_r80_by_r64;
3068FNIEMAIMPLFPUR64 iemAImpl_fadd_r80_by_r64;
3069FNIEMAIMPLFPUR64 iemAImpl_fmul_r80_by_r64;
3070FNIEMAIMPLFPUR64 iemAImpl_fsub_r80_by_r64;
3071FNIEMAIMPLFPUR64 iemAImpl_fsubr_r80_by_r64;
3072FNIEMAIMPLFPUR64 iemAImpl_fdiv_r80_by_r64;
3073FNIEMAIMPLFPUR64 iemAImpl_fdivr_r80_by_r64;
3074
3075IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT64U pr64Val));
3076IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
3077 PRTFLOAT64U pr32Val, PCRTFLOAT80U pr80Val));
3078/** @} */
3079
3080/** @name FPU operations taking a 80-bit float argument
3081 * @{ */
3082typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3083 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
3084typedef FNIEMAIMPLFPUR80 *PFNIEMAIMPLFPUR80;
3085FNIEMAIMPLFPUR80 iemAImpl_fadd_r80_by_r80;
3086FNIEMAIMPLFPUR80 iemAImpl_fmul_r80_by_r80;
3087FNIEMAIMPLFPUR80 iemAImpl_fsub_r80_by_r80;
3088FNIEMAIMPLFPUR80 iemAImpl_fsubr_r80_by_r80;
3089FNIEMAIMPLFPUR80 iemAImpl_fdiv_r80_by_r80;
3090FNIEMAIMPLFPUR80 iemAImpl_fdivr_r80_by_r80;
3091FNIEMAIMPLFPUR80 iemAImpl_fprem_r80_by_r80;
3092FNIEMAIMPLFPUR80 iemAImpl_fprem1_r80_by_r80;
3093FNIEMAIMPLFPUR80 iemAImpl_fscale_r80_by_r80;
3094
3095FNIEMAIMPLFPUR80 iemAImpl_fpatan_r80_by_r80, iemAImpl_fpatan_r80_by_r80_amd, iemAImpl_fpatan_r80_by_r80_intel;
3096FNIEMAIMPLFPUR80 iemAImpl_fyl2x_r80_by_r80, iemAImpl_fyl2x_r80_by_r80_amd, iemAImpl_fyl2x_r80_by_r80_intel;
3097FNIEMAIMPLFPUR80 iemAImpl_fyl2xp1_r80_by_r80, iemAImpl_fyl2xp1_r80_by_r80_amd, iemAImpl_fyl2xp1_r80_by_r80_intel;
3098
3099typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3100 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
3101typedef FNIEMAIMPLFPUR80FSW *PFNIEMAIMPLFPUR80FSW;
3102FNIEMAIMPLFPUR80FSW iemAImpl_fcom_r80_by_r80;
3103FNIEMAIMPLFPUR80FSW iemAImpl_fucom_r80_by_r80;
3104
3105typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPUR80EFL,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
3106 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
3107typedef FNIEMAIMPLFPUR80EFL *PFNIEMAIMPLFPUR80EFL;
3108FNIEMAIMPLFPUR80EFL iemAImpl_fcomi_r80_by_r80;
3109FNIEMAIMPLFPUR80EFL iemAImpl_fucomi_r80_by_r80;
3110
3111typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARY,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
3112typedef FNIEMAIMPLFPUR80UNARY *PFNIEMAIMPLFPUR80UNARY;
3113FNIEMAIMPLFPUR80UNARY iemAImpl_fabs_r80;
3114FNIEMAIMPLFPUR80UNARY iemAImpl_fchs_r80;
3115FNIEMAIMPLFPUR80UNARY iemAImpl_f2xm1_r80, iemAImpl_f2xm1_r80_amd, iemAImpl_f2xm1_r80_intel;
3116FNIEMAIMPLFPUR80UNARY iemAImpl_fsqrt_r80;
3117FNIEMAIMPLFPUR80UNARY iemAImpl_frndint_r80;
3118FNIEMAIMPLFPUR80UNARY iemAImpl_fsin_r80, iemAImpl_fsin_r80_amd, iemAImpl_fsin_r80_intel;
3119FNIEMAIMPLFPUR80UNARY iemAImpl_fcos_r80, iemAImpl_fcos_r80_amd, iemAImpl_fcos_r80_intel;
3120
3121typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYFSW,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw, PCRTFLOAT80U pr80Val));
3122typedef FNIEMAIMPLFPUR80UNARYFSW *PFNIEMAIMPLFPUR80UNARYFSW;
3123FNIEMAIMPLFPUR80UNARYFSW iemAImpl_ftst_r80;
3124FNIEMAIMPLFPUR80UNARYFSW iemAImpl_fxam_r80;
3125
3126typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80LDCONST,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes));
3127typedef FNIEMAIMPLFPUR80LDCONST *PFNIEMAIMPLFPUR80LDCONST;
3128FNIEMAIMPLFPUR80LDCONST iemAImpl_fld1;
3129FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2t;
3130FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2e;
3131FNIEMAIMPLFPUR80LDCONST iemAImpl_fldpi;
3132FNIEMAIMPLFPUR80LDCONST iemAImpl_fldlg2;
3133FNIEMAIMPLFPUR80LDCONST iemAImpl_fldln2;
3134FNIEMAIMPLFPUR80LDCONST iemAImpl_fldz;
3135
3136typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYTWO,(PCX86FXSTATE pFpuState, PIEMFPURESULTTWO pFpuResTwo,
3137 PCRTFLOAT80U pr80Val));
3138typedef FNIEMAIMPLFPUR80UNARYTWO *PFNIEMAIMPLFPUR80UNARYTWO;
3139FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fptan_r80_r80, iemAImpl_fptan_r80_r80_amd, iemAImpl_fptan_r80_r80_intel;
3140FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fxtract_r80_r80;
3141FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fsincos_r80_r80, iemAImpl_fsincos_r80_r80_amd, iemAImpl_fsincos_r80_r80_intel;
3142
3143IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
3144IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
3145 PRTFLOAT80U pr80Dst, PCRTFLOAT80U pr80Src));
3146
3147IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_d80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTPBCD80U pd80Val));
3148IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_d80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
3149 PRTPBCD80U pd80Dst, PCRTFLOAT80U pr80Src));
3150
3151/** @} */
3152
3153/** @name FPU operations taking a 16-bit signed integer argument
3154 * @{ */
3155typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3156 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
3157typedef FNIEMAIMPLFPUI16 *PFNIEMAIMPLFPUI16;
3158typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI16,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
3159 int16_t *pi16Dst, PCRTFLOAT80U pr80Src));
3160typedef FNIEMAIMPLFPUSTR80TOI16 *PFNIEMAIMPLFPUSTR80TOI16;
3161
3162FNIEMAIMPLFPUI16 iemAImpl_fiadd_r80_by_i16;
3163FNIEMAIMPLFPUI16 iemAImpl_fimul_r80_by_i16;
3164FNIEMAIMPLFPUI16 iemAImpl_fisub_r80_by_i16;
3165FNIEMAIMPLFPUI16 iemAImpl_fisubr_r80_by_i16;
3166FNIEMAIMPLFPUI16 iemAImpl_fidiv_r80_by_i16;
3167FNIEMAIMPLFPUI16 iemAImpl_fidivr_r80_by_i16;
3168
3169typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3170 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
3171typedef FNIEMAIMPLFPUI16FSW *PFNIEMAIMPLFPUI16FSW;
3172FNIEMAIMPLFPUI16FSW iemAImpl_ficom_r80_by_i16;
3173
3174IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int16_t const *pi16Val));
3175FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fist_r80_to_i16;
3176FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fistt_r80_to_i16, iemAImpl_fistt_r80_to_i16_amd, iemAImpl_fistt_r80_to_i16_intel;
3177/** @} */
3178
3179/** @name FPU operations taking a 32-bit signed integer argument
3180 * @{ */
3181typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3182 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
3183typedef FNIEMAIMPLFPUI32 *PFNIEMAIMPLFPUI32;
3184typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI32,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
3185 int32_t *pi32Dst, PCRTFLOAT80U pr80Src));
3186typedef FNIEMAIMPLFPUSTR80TOI32 *PFNIEMAIMPLFPUSTR80TOI32;
3187
3188FNIEMAIMPLFPUI32 iemAImpl_fiadd_r80_by_i32;
3189FNIEMAIMPLFPUI32 iemAImpl_fimul_r80_by_i32;
3190FNIEMAIMPLFPUI32 iemAImpl_fisub_r80_by_i32;
3191FNIEMAIMPLFPUI32 iemAImpl_fisubr_r80_by_i32;
3192FNIEMAIMPLFPUI32 iemAImpl_fidiv_r80_by_i32;
3193FNIEMAIMPLFPUI32 iemAImpl_fidivr_r80_by_i32;
3194
3195typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3196 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
3197typedef FNIEMAIMPLFPUI32FSW *PFNIEMAIMPLFPUI32FSW;
3198FNIEMAIMPLFPUI32FSW iemAImpl_ficom_r80_by_i32;
3199
3200IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int32_t const *pi32Val));
3201FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fist_r80_to_i32;
3202FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fistt_r80_to_i32;
3203/** @} */
3204
3205/** @name FPU operations taking a 64-bit signed integer argument
3206 * @{ */
3207typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI64,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
3208 int64_t *pi64Dst, PCRTFLOAT80U pr80Src));
3209typedef FNIEMAIMPLFPUSTR80TOI64 *PFNIEMAIMPLFPUSTR80TOI64;
3210
3211IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int64_t const *pi64Val));
3212FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fist_r80_to_i64;
3213FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fistt_r80_to_i64;
3214/** @} */
3215
3216
3217/** Temporary type representing a 256-bit vector register. */
3218typedef struct { uint64_t au64[4]; } IEMVMM256;
3219/** Temporary type pointing to a 256-bit vector register. */
3220typedef IEMVMM256 *PIEMVMM256;
3221/** Temporary type pointing to a const 256-bit vector register. */
3222typedef IEMVMM256 *PCIEMVMM256;
3223
3224
3225/** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1.
3226 * @{ */
3227typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *puDst, uint64_t const *puSrc));
3228typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF2U64;
3229typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U128,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc));
3230typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF2U128;
3231typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U128,(PX86XSAVEAREA pExtState, PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
3232typedef FNIEMAIMPLMEDIAF3U128 *PFNIEMAIMPLMEDIAF3U128;
3233typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U256,(PX86XSAVEAREA pExtState, PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
3234typedef FNIEMAIMPLMEDIAF3U256 *PFNIEMAIMPLMEDIAF3U256;
3235typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U64,(uint64_t *puDst, uint64_t const *puSrc));
3236typedef FNIEMAIMPLMEDIAOPTF2U64 *PFNIEMAIMPLMEDIAOPTF2U64;
3237typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128,(PRTUINT128U puDst, PCRTUINT128U puSrc));
3238typedef FNIEMAIMPLMEDIAOPTF2U128 *PFNIEMAIMPLMEDIAOPTF2U128;
3239typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
3240typedef FNIEMAIMPLMEDIAOPTF3U128 *PFNIEMAIMPLMEDIAOPTF3U128;
3241typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
3242typedef FNIEMAIMPLMEDIAOPTF3U256 *PFNIEMAIMPLMEDIAOPTF3U256;
3243typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256,(PRTUINT256U puDst, PCRTUINT256U puSrc));
3244typedef FNIEMAIMPLMEDIAOPTF2U256 *PFNIEMAIMPLMEDIAOPTF2U256;
3245FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pshufb_u64, iemAImpl_pshufb_u64_fallback;
3246FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pand_u64, iemAImpl_pandn_u64, iemAImpl_por_u64, iemAImpl_pxor_u64;
3247FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqd_u64;
3248FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pcmpgtb_u64, iemAImpl_pcmpgtw_u64, iemAImpl_pcmpgtd_u64;
3249FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_paddb_u64, iemAImpl_paddsb_u64, iemAImpl_paddusb_u64;
3250FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_paddw_u64, iemAImpl_paddsw_u64, iemAImpl_paddusw_u64;
3251FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_paddd_u64;
3252FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_paddq_u64;
3253FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psubb_u64, iemAImpl_psubsb_u64, iemAImpl_psubusb_u64;
3254FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psubw_u64, iemAImpl_psubsw_u64, iemAImpl_psubusw_u64;
3255FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psubd_u64;
3256FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psubq_u64;
3257FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmaddwd_u64, iemAImpl_pmaddwd_u64_fallback;
3258FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmullw_u64, iemAImpl_pmulhw_u64;
3259FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pminub_u64, iemAImpl_pmaxub_u64;
3260FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pminsw_u64, iemAImpl_pmaxsw_u64;
3261FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pabsb_u64, iemAImpl_pabsb_u64_fallback;
3262FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pabsw_u64, iemAImpl_pabsw_u64_fallback;
3263FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pabsd_u64, iemAImpl_pabsd_u64_fallback;
3264FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psignb_u64, iemAImpl_psignb_u64_fallback;
3265FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psignw_u64, iemAImpl_psignw_u64_fallback;
3266FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psignd_u64, iemAImpl_psignd_u64_fallback;
3267FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phaddw_u64, iemAImpl_phaddw_u64_fallback;
3268FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phaddd_u64, iemAImpl_phaddd_u64_fallback;
3269FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phsubw_u64, iemAImpl_phsubw_u64_fallback;
3270FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phsubd_u64, iemAImpl_phsubd_u64_fallback;
3271FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phaddsw_u64, iemAImpl_phaddsw_u64_fallback;
3272FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phsubsw_u64, iemAImpl_phsubsw_u64_fallback;
3273FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmaddubsw_u64, iemAImpl_pmaddubsw_u64_fallback;
3274FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmulhrsw_u64, iemAImpl_pmulhrsw_u64_fallback;
3275FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmuludq_u64;
3276FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllw_u64, iemAImpl_psrlw_u64, iemAImpl_psraw_u64;
3277FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pslld_u64, iemAImpl_psrld_u64, iemAImpl_psrad_u64;
3278FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllq_u64, iemAImpl_psrlq_u64;
3279FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packsswb_u64, iemAImpl_packuswb_u64;
3280FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packssdw_u64;
3281FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmulhuw_u64;
3282FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pavgb_u64, iemAImpl_pavgw_u64;
3283FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psadbw_u64;
3284
3285FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pshufb_u128, iemAImpl_pshufb_u128_fallback;
3286FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pand_u128, iemAImpl_pandn_u128, iemAImpl_por_u128, iemAImpl_pxor_u128;
3287FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpeqb_u128, iemAImpl_pcmpeqw_u128, iemAImpl_pcmpeqd_u128;
3288FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpeqq_u128, iemAImpl_pcmpeqq_u128_fallback;
3289FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpgtb_u128, iemAImpl_pcmpgtw_u128, iemAImpl_pcmpgtd_u128;
3290FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpgtq_u128, iemAImpl_pcmpgtq_u128_fallback;
3291FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddb_u128, iemAImpl_paddsb_u128, iemAImpl_paddusb_u128;
3292FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddw_u128, iemAImpl_paddsw_u128, iemAImpl_paddusw_u128;
3293FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddd_u128;
3294FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddq_u128;
3295FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubb_u128, iemAImpl_psubsb_u128, iemAImpl_psubusb_u128;
3296FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubw_u128, iemAImpl_psubsw_u128, iemAImpl_psubusw_u128;
3297FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubd_u128;
3298FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubq_u128;
3299FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmullw_u128, iemAImpl_pmullw_u128_fallback;
3300FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhw_u128;
3301FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulld_u128, iemAImpl_pmulld_u128_fallback;
3302FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaddwd_u128, iemAImpl_pmaddwd_u128_fallback;
3303FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminub_u128;
3304FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminud_u128, iemAImpl_pminud_u128_fallback;
3305FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminuw_u128, iemAImpl_pminuw_u128_fallback;
3306FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminsb_u128, iemAImpl_pminsb_u128_fallback;
3307FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminsd_u128, iemAImpl_pminsd_u128_fallback;
3308FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminsw_u128, iemAImpl_pminsw_u128_fallback;
3309FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxub_u128;
3310FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxud_u128, iemAImpl_pmaxud_u128_fallback;
3311FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxuw_u128, iemAImpl_pmaxuw_u128_fallback;
3312FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxsb_u128, iemAImpl_pmaxsb_u128_fallback;
3313FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxsw_u128;
3314FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxsd_u128, iemAImpl_pmaxsd_u128_fallback;
3315FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pabsb_u128, iemAImpl_pabsb_u128_fallback;
3316FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pabsw_u128, iemAImpl_pabsw_u128_fallback;
3317FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pabsd_u128, iemAImpl_pabsd_u128_fallback;
3318FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psignb_u128, iemAImpl_psignb_u128_fallback;
3319FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psignw_u128, iemAImpl_psignw_u128_fallback;
3320FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psignd_u128, iemAImpl_psignd_u128_fallback;
3321FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phaddw_u128, iemAImpl_phaddw_u128_fallback;
3322FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phaddd_u128, iemAImpl_phaddd_u128_fallback;
3323FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phsubw_u128, iemAImpl_phsubw_u128_fallback;
3324FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phsubd_u128, iemAImpl_phsubd_u128_fallback;
3325FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phaddsw_u128, iemAImpl_phaddsw_u128_fallback;
3326FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phsubsw_u128, iemAImpl_phsubsw_u128_fallback;
3327FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaddubsw_u128, iemAImpl_pmaddubsw_u128_fallback;
3328FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhrsw_u128, iemAImpl_pmulhrsw_u128_fallback;
3329FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuludq_u128;
3330FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaddwd_u128, iemAImpl_pmaddwd_u128_fallback;
3331FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packsswb_u128, iemAImpl_packuswb_u128;
3332FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packssdw_u128, iemAImpl_packusdw_u128;
3333FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllw_u128, iemAImpl_psrlw_u128, iemAImpl_psraw_u128;
3334FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pslld_u128, iemAImpl_psrld_u128, iemAImpl_psrad_u128;
3335FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllq_u128, iemAImpl_psrlq_u128;
3336FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhuw_u128;
3337FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pavgb_u128, iemAImpl_pavgw_u128;
3338FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psadbw_u128;
3339FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuldq_u128, iemAImpl_pmuldq_u128_fallback;
3340FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpcklps_u128, iemAImpl_unpcklpd_u128;
3341FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpckhps_u128, iemAImpl_unpckhpd_u128;
3342FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phminposuw_u128, iemAImpl_phminposuw_u128_fallback;
3343
3344FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpshufb_u128, iemAImpl_vpshufb_u128_fallback;
3345FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpand_u128, iemAImpl_vpand_u128_fallback;
3346FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpandn_u128, iemAImpl_vpandn_u128_fallback;
3347FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpor_u128, iemAImpl_vpor_u128_fallback;
3348FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpxor_u128, iemAImpl_vpxor_u128_fallback;
3349FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqb_u128, iemAImpl_vpcmpeqb_u128_fallback;
3350FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqw_u128, iemAImpl_vpcmpeqw_u128_fallback;
3351FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqd_u128, iemAImpl_vpcmpeqd_u128_fallback;
3352FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqq_u128, iemAImpl_vpcmpeqq_u128_fallback;
3353FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtb_u128, iemAImpl_vpcmpgtb_u128_fallback;
3354FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtw_u128, iemAImpl_vpcmpgtw_u128_fallback;
3355FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtd_u128, iemAImpl_vpcmpgtd_u128_fallback;
3356FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtq_u128, iemAImpl_vpcmpgtq_u128_fallback;
3357FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddb_u128, iemAImpl_vpaddb_u128_fallback;
3358FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddw_u128, iemAImpl_vpaddw_u128_fallback;
3359FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddd_u128, iemAImpl_vpaddd_u128_fallback;
3360FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddq_u128, iemAImpl_vpaddq_u128_fallback;
3361FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubb_u128, iemAImpl_vpsubb_u128_fallback;
3362FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubw_u128, iemAImpl_vpsubw_u128_fallback;
3363FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubd_u128, iemAImpl_vpsubd_u128_fallback;
3364FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubq_u128, iemAImpl_vpsubq_u128_fallback;
3365FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminub_u128, iemAImpl_vpminub_u128_fallback;
3366FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminuw_u128, iemAImpl_vpminuw_u128_fallback;
3367FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminud_u128, iemAImpl_vpminud_u128_fallback;
3368FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminsb_u128, iemAImpl_vpminsb_u128_fallback;
3369FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminsw_u128, iemAImpl_vpminsw_u128_fallback;
3370FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminsd_u128, iemAImpl_vpminsd_u128_fallback;
3371FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxub_u128, iemAImpl_vpmaxub_u128_fallback;
3372FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxuw_u128, iemAImpl_vpmaxuw_u128_fallback;
3373FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxud_u128, iemAImpl_vpmaxud_u128_fallback;
3374FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxsb_u128, iemAImpl_vpmaxsb_u128_fallback;
3375FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxsw_u128, iemAImpl_vpmaxsw_u128_fallback;
3376FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxsd_u128, iemAImpl_vpmaxsd_u128_fallback;
3377FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpacksswb_u128, iemAImpl_vpacksswb_u128_fallback;
3378FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackssdw_u128, iemAImpl_vpackssdw_u128_fallback;
3379FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackuswb_u128, iemAImpl_vpackuswb_u128_fallback;
3380FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackusdw_u128, iemAImpl_vpackusdw_u128_fallback;
3381FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmullw_u128, iemAImpl_vpmullw_u128_fallback;
3382FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulld_u128, iemAImpl_vpmulld_u128_fallback;
3383FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhw_u128, iemAImpl_vpmulhw_u128_fallback;
3384FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhuw_u128, iemAImpl_vpmulhuw_u128_fallback;
3385FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgb_u128, iemAImpl_vpavgb_u128_fallback;
3386FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgw_u128, iemAImpl_vpavgw_u128_fallback;
3387FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignb_u128, iemAImpl_vpsignb_u128_fallback;
3388FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignw_u128, iemAImpl_vpsignw_u128_fallback;
3389FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignd_u128, iemAImpl_vpsignd_u128_fallback;
3390FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddw_u128, iemAImpl_vphaddw_u128_fallback;
3391FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddd_u128, iemAImpl_vphaddd_u128_fallback;
3392FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubw_u128, iemAImpl_vphsubw_u128_fallback;
3393FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubd_u128, iemAImpl_vphsubd_u128_fallback;
3394FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddsw_u128, iemAImpl_vphaddsw_u128_fallback;
3395FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubsw_u128, iemAImpl_vphsubsw_u128_fallback;
3396FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddubsw_u128, iemAImpl_vpmaddubsw_u128_fallback;
3397FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhrsw_u128, iemAImpl_vpmulhrsw_u128_fallback;
3398FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsadbw_u128, iemAImpl_vpsadbw_u128_fallback;
3399FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuldq_u128, iemAImpl_vpmuldq_u128_fallback;
3400FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuludq_u128, iemAImpl_vpmuludq_u128_fallback;
3401FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsb_u128, iemAImpl_vpsubsb_u128_fallback;
3402FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsw_u128, iemAImpl_vpsubsw_u128_fallback;
3403FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusb_u128, iemAImpl_vpsubusb_u128_fallback;
3404FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusw_u128, iemAImpl_vpsubusw_u128_fallback;
3405FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusb_u128, iemAImpl_vpaddusb_u128_fallback;
3406FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusw_u128, iemAImpl_vpaddusw_u128_fallback;
3407FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsb_u128, iemAImpl_vpaddsb_u128_fallback;
3408FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsw_u128, iemAImpl_vpaddsw_u128_fallback;
3409FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllw_u128, iemAImpl_vpsllw_u128_fallback;
3410FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpslld_u128, iemAImpl_vpslld_u128_fallback;
3411FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllq_u128, iemAImpl_vpsllq_u128_fallback;
3412FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsraw_u128, iemAImpl_vpsraw_u128_fallback;
3413FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrad_u128, iemAImpl_vpsrad_u128_fallback;
3414FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlw_u128, iemAImpl_vpsrlw_u128_fallback;
3415FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrld_u128, iemAImpl_vpsrld_u128_fallback;
3416FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlq_u128, iemAImpl_vpsrlq_u128_fallback;
3417FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddwd_u128, iemAImpl_vpmaddwd_u128_fallback;
3418
3419FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsb_u128, iemAImpl_vpabsb_u128_fallback;
3420FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsw_u128, iemAImpl_vpabsd_u128_fallback;
3421FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsd_u128, iemAImpl_vpabsw_u128_fallback;
3422FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vphminposuw_u128, iemAImpl_vphminposuw_u128_fallback;
3423
3424FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpshufb_u256, iemAImpl_vpshufb_u256_fallback;
3425FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpand_u256, iemAImpl_vpand_u256_fallback;
3426FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpandn_u256, iemAImpl_vpandn_u256_fallback;
3427FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpor_u256, iemAImpl_vpor_u256_fallback;
3428FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpxor_u256, iemAImpl_vpxor_u256_fallback;
3429FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqb_u256, iemAImpl_vpcmpeqb_u256_fallback;
3430FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqw_u256, iemAImpl_vpcmpeqw_u256_fallback;
3431FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqd_u256, iemAImpl_vpcmpeqd_u256_fallback;
3432FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqq_u256, iemAImpl_vpcmpeqq_u256_fallback;
3433FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtb_u256, iemAImpl_vpcmpgtb_u256_fallback;
3434FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtw_u256, iemAImpl_vpcmpgtw_u256_fallback;
3435FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtd_u256, iemAImpl_vpcmpgtd_u256_fallback;
3436FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtq_u256, iemAImpl_vpcmpgtq_u256_fallback;
3437FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddb_u256, iemAImpl_vpaddb_u256_fallback;
3438FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddw_u256, iemAImpl_vpaddw_u256_fallback;
3439FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddd_u256, iemAImpl_vpaddd_u256_fallback;
3440FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddq_u256, iemAImpl_vpaddq_u256_fallback;
3441FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubb_u256, iemAImpl_vpsubb_u256_fallback;
3442FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubw_u256, iemAImpl_vpsubw_u256_fallback;
3443FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubd_u256, iemAImpl_vpsubd_u256_fallback;
3444FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubq_u256, iemAImpl_vpsubq_u256_fallback;
3445FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminub_u256, iemAImpl_vpminub_u256_fallback;
3446FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminuw_u256, iemAImpl_vpminuw_u256_fallback;
3447FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminud_u256, iemAImpl_vpminud_u256_fallback;
3448FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminsb_u256, iemAImpl_vpminsb_u256_fallback;
3449FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminsw_u256, iemAImpl_vpminsw_u256_fallback;
3450FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminsd_u256, iemAImpl_vpminsd_u256_fallback;
3451FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxub_u256, iemAImpl_vpmaxub_u256_fallback;
3452FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxuw_u256, iemAImpl_vpmaxuw_u256_fallback;
3453FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxud_u256, iemAImpl_vpmaxud_u256_fallback;
3454FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxsb_u256, iemAImpl_vpmaxsb_u256_fallback;
3455FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxsw_u256, iemAImpl_vpmaxsw_u256_fallback;
3456FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxsd_u256, iemAImpl_vpmaxsd_u256_fallback;
3457FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpacksswb_u256, iemAImpl_vpacksswb_u256_fallback;
3458FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackssdw_u256, iemAImpl_vpackssdw_u256_fallback;
3459FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackuswb_u256, iemAImpl_vpackuswb_u256_fallback;
3460FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackusdw_u256, iemAImpl_vpackusdw_u256_fallback;
3461FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmullw_u256, iemAImpl_vpmullw_u256_fallback;
3462FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulld_u256, iemAImpl_vpmulld_u256_fallback;
3463FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhw_u256, iemAImpl_vpmulhw_u256_fallback;
3464FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhuw_u256, iemAImpl_vpmulhuw_u256_fallback;
3465FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgb_u256, iemAImpl_vpavgb_u256_fallback;
3466FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgw_u256, iemAImpl_vpavgw_u256_fallback;
3467FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignb_u256, iemAImpl_vpsignb_u256_fallback;
3468FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignw_u256, iemAImpl_vpsignw_u256_fallback;
3469FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignd_u256, iemAImpl_vpsignd_u256_fallback;
3470FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddw_u256, iemAImpl_vphaddw_u256_fallback;
3471FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddd_u256, iemAImpl_vphaddd_u256_fallback;
3472FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubw_u256, iemAImpl_vphsubw_u256_fallback;
3473FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubd_u256, iemAImpl_vphsubd_u256_fallback;
3474FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddsw_u256, iemAImpl_vphaddsw_u256_fallback;
3475FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubsw_u256, iemAImpl_vphsubsw_u256_fallback;
3476FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddubsw_u256, iemAImpl_vpmaddubsw_u256_fallback;
3477FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhrsw_u256, iemAImpl_vpmulhrsw_u256_fallback;
3478FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsadbw_u256, iemAImpl_vpsadbw_u256_fallback;
3479FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuldq_u256, iemAImpl_vpmuldq_u256_fallback;
3480FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuludq_u256, iemAImpl_vpmuludq_u256_fallback;
3481FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsb_u256, iemAImpl_vpsubsb_u256_fallback;
3482FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsw_u256, iemAImpl_vpsubsw_u256_fallback;
3483FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusb_u256, iemAImpl_vpsubusb_u256_fallback;
3484FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusw_u256, iemAImpl_vpsubusw_u256_fallback;
3485FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusb_u256, iemAImpl_vpaddusb_u256_fallback;
3486FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusw_u256, iemAImpl_vpaddusw_u256_fallback;
3487FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsb_u256, iemAImpl_vpaddsb_u256_fallback;
3488FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsw_u256, iemAImpl_vpaddsw_u256_fallback;
3489FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllw_u256, iemAImpl_vpsllw_u256_fallback;
3490FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpslld_u256, iemAImpl_vpslld_u256_fallback;
3491FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllq_u256, iemAImpl_vpsllq_u256_fallback;
3492FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsraw_u256, iemAImpl_vpsraw_u256_fallback;
3493FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrad_u256, iemAImpl_vpsrad_u256_fallback;
3494FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlw_u256, iemAImpl_vpsrlw_u256_fallback;
3495FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrld_u256, iemAImpl_vpsrld_u256_fallback;
3496FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlq_u256, iemAImpl_vpsrlq_u256_fallback;
3497FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddwd_u256, iemAImpl_vpmaddwd_u256_fallback;
3498
3499FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsb_u256, iemAImpl_vpabsb_u256_fallback;
3500FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsw_u256, iemAImpl_vpabsw_u256_fallback;
3501FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsd_u256, iemAImpl_vpabsd_u256_fallback;
3502/** @} */
3503
3504/** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1.
3505 * @{ */
3506FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpcklbw_u64, iemAImpl_punpcklwd_u64, iemAImpl_punpckldq_u64;
3507FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128;
3508FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpcklbw_u128, iemAImpl_vpunpcklbw_u128_fallback,
3509 iemAImpl_vpunpcklwd_u128, iemAImpl_vpunpcklwd_u128_fallback,
3510 iemAImpl_vpunpckldq_u128, iemAImpl_vpunpckldq_u128_fallback,
3511 iemAImpl_vpunpcklqdq_u128, iemAImpl_vpunpcklqdq_u128_fallback,
3512 iemAImpl_vunpcklps_u128, iemAImpl_vunpcklps_u128_fallback,
3513 iemAImpl_vunpcklpd_u128, iemAImpl_vunpcklpd_u128_fallback,
3514 iemAImpl_vunpckhps_u128, iemAImpl_vunpckhps_u128_fallback,
3515 iemAImpl_vunpckhpd_u128, iemAImpl_vunpckhpd_u128_fallback;
3516
3517FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpcklbw_u256, iemAImpl_vpunpcklbw_u256_fallback,
3518 iemAImpl_vpunpcklwd_u256, iemAImpl_vpunpcklwd_u256_fallback,
3519 iemAImpl_vpunpckldq_u256, iemAImpl_vpunpckldq_u256_fallback,
3520 iemAImpl_vpunpcklqdq_u256, iemAImpl_vpunpcklqdq_u256_fallback,
3521 iemAImpl_vunpcklps_u256, iemAImpl_vunpcklps_u256_fallback,
3522 iemAImpl_vunpcklpd_u256, iemAImpl_vunpcklpd_u256_fallback,
3523 iemAImpl_vunpckhps_u256, iemAImpl_vunpckhps_u256_fallback,
3524 iemAImpl_vunpckhpd_u256, iemAImpl_vunpckhpd_u256_fallback;
3525/** @} */
3526
3527/** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1.
3528 * @{ */
3529FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64;
3530FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128;
3531FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpckhbw_u128, iemAImpl_vpunpckhbw_u128_fallback,
3532 iemAImpl_vpunpckhwd_u128, iemAImpl_vpunpckhwd_u128_fallback,
3533 iemAImpl_vpunpckhdq_u128, iemAImpl_vpunpckhdq_u128_fallback,
3534 iemAImpl_vpunpckhqdq_u128, iemAImpl_vpunpckhqdq_u128_fallback;
3535FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpckhbw_u256, iemAImpl_vpunpckhbw_u256_fallback,
3536 iemAImpl_vpunpckhwd_u256, iemAImpl_vpunpckhwd_u256_fallback,
3537 iemAImpl_vpunpckhdq_u256, iemAImpl_vpunpckhdq_u256_fallback,
3538 iemAImpl_vpunpckhqdq_u256, iemAImpl_vpunpckhqdq_u256_fallback;
3539/** @} */
3540
3541/** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil)
3542 * @{ */
3543typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3544typedef FNIEMAIMPLMEDIAPSHUFU128 *PFNIEMAIMPLMEDIAPSHUFU128;
3545typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
3546typedef FNIEMAIMPLMEDIAPSHUFU256 *PFNIEMAIMPLMEDIAPSHUFU256;
3547IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw_u64,(uint64_t *puDst, uint64_t const *puSrc, uint8_t bEvil));
3548FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_pshufhw_u128, iemAImpl_pshuflw_u128, iemAImpl_pshufd_u128;
3549#ifndef IEM_WITHOUT_ASSEMBLY
3550FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256, iemAImpl_vpshuflw_u256, iemAImpl_vpshufd_u256;
3551#endif
3552FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256_fallback, iemAImpl_vpshuflw_u256_fallback, iemAImpl_vpshufd_u256_fallback;
3553/** @} */
3554
3555/** @name Media (SSE/MMX/AVX) operation: Shift Immediate Stuff (evil)
3556 * @{ */
3557typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU64,(uint64_t *puDst, uint8_t bShift));
3558typedef FNIEMAIMPLMEDIAPSHIFTU64 *PFNIEMAIMPLMEDIAPSHIFTU64;
3559typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU128,(PRTUINT128U puDst, uint8_t bShift));
3560typedef FNIEMAIMPLMEDIAPSHIFTU128 *PFNIEMAIMPLMEDIAPSHIFTU128;
3561typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU256,(PRTUINT256U puDst, uint8_t bShift));
3562typedef FNIEMAIMPLMEDIAPSHIFTU256 *PFNIEMAIMPLMEDIAPSHIFTU256;
3563FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psllw_imm_u64, iemAImpl_pslld_imm_u64, iemAImpl_psllq_imm_u64;
3564FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psrlw_imm_u64, iemAImpl_psrld_imm_u64, iemAImpl_psrlq_imm_u64;
3565FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psraw_imm_u64, iemAImpl_psrad_imm_u64;
3566FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psllw_imm_u128, iemAImpl_pslld_imm_u128, iemAImpl_psllq_imm_u128;
3567FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psrlw_imm_u128, iemAImpl_psrld_imm_u128, iemAImpl_psrlq_imm_u128;
3568FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psraw_imm_u128, iemAImpl_psrad_imm_u128;
3569FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_pslldq_imm_u128, iemAImpl_psrldq_imm_u128;
3570/** @} */
3571
3572/** @name Media (SSE/MMX/AVX) operation: Move Byte Mask
3573 * @{ */
3574IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u64,(uint64_t *pu64Dst, uint64_t const *puSrc));
3575IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u128,(uint64_t *pu64Dst, PCRTUINT128U puSrc));
3576#ifndef IEM_WITHOUT_ASSEMBLY
3577IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3578#endif
3579IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256_fallback,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3580/** @} */
3581
3582/** @name Media (SSE/MMX/AVX) operations: Variable Blend Packed Bytes/R32/R64.
3583 * @{ */
3584typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puMask));
3585typedef FNIEMAIMPLBLENDU128 *PFNIEMAIMPLBLENDU128;
3586typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, PCRTUINT128U puMask));
3587typedef FNIEMAIMPLAVXBLENDU128 *PFNIEMAIMPLAVXBLENDU128;
3588typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, PCRTUINT256U puMask));
3589typedef FNIEMAIMPLAVXBLENDU256 *PFNIEMAIMPLAVXBLENDU256;
3590
3591FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128;
3592FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128_fallback;
3593FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128;
3594FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128_fallback;
3595FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256;
3596FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256_fallback;
3597
3598FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128;
3599FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128_fallback;
3600FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128;
3601FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128_fallback;
3602FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256;
3603FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256_fallback;
3604
3605FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128;
3606FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128_fallback;
3607FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128;
3608FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128_fallback;
3609FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256;
3610FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256_fallback;
3611/** @} */
3612
3613
3614/** @name Media (SSE/MMX/AVX) operation: Sort this later
3615 * @{ */
3616IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3617IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3618IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3619IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3620IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3621
3622IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3623IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3624IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3625IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3626IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3627
3628IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3629IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3630IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3631IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3632IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3633
3634IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3635IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3636IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3637IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3638IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3639
3640IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3641IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3642IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3643IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3644IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3645
3646IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3647IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3648IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3649IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3650IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3651
3652IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3653IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3654IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3655IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3656IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3657
3658IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3659IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3660IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3661IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3662IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3663
3664IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3665IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3666IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3667IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3668IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3669
3670IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3671IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3672IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3673IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3674IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3675
3676IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3677IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3678IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3679IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3680IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3681
3682IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3683IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3684IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3685IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3686IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3687
3688IEM_DECL_IMPL_DEF(void, iemAImpl_shufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3689IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3690IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3691IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3692IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3693
3694IEM_DECL_IMPL_DEF(void, iemAImpl_shufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3695IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3696IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3697IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3698IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3699
3700IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3701IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64_fallback,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3702
3703IEM_DECL_IMPL_DEF(void, iemAImpl_movmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3704IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3705IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3706IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3707IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3708
3709IEM_DECL_IMPL_DEF(void, iemAImpl_movmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3710IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3711IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3712IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3713IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3714
3715
3716typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3717typedef FNIEMAIMPLMEDIAOPTF2U128IMM8 *PFNIEMAIMPLMEDIAOPTF2U128IMM8;
3718typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
3719typedef FNIEMAIMPLMEDIAOPTF2U256IMM8 *PFNIEMAIMPLMEDIAOPTF2U256IMM8;
3720typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3721typedef FNIEMAIMPLMEDIAOPTF3U128IMM8 *PFNIEMAIMPLMEDIAOPTF3U128IMM8;
3722typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3723typedef FNIEMAIMPLMEDIAOPTF3U256IMM8 *PFNIEMAIMPLMEDIAOPTF3U256IMM8;
3724
3725FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_palignr_u128, iemAImpl_palignr_u128_fallback;
3726FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pblendw_u128, iemAImpl_pblendw_u128_fallback;
3727FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendps_u128, iemAImpl_blendps_u128_fallback;
3728FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendpd_u128, iemAImpl_blendpd_u128_fallback;
3729
3730FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpalignr_u128, iemAImpl_vpalignr_u128_fallback;
3731FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendw_u128, iemAImpl_vpblendw_u128_fallback;
3732FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendd_u128, iemAImpl_vpblendd_u128_fallback;
3733FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendps_u128, iemAImpl_vblendps_u128_fallback;
3734FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendpd_u128, iemAImpl_vblendpd_u128_fallback;
3735
3736FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpalignr_u256, iemAImpl_vpalignr_u256_fallback;
3737FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendw_u256, iemAImpl_vpblendw_u256_fallback;
3738FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendd_u256, iemAImpl_vpblendd_u256_fallback;
3739FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendps_u256, iemAImpl_vblendps_u256_fallback;
3740FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendpd_u256, iemAImpl_vblendpd_u256_fallback;
3741FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2i128_u256, iemAImpl_vperm2i128_u256_fallback;
3742FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2f128_u256, iemAImpl_vperm2f128_u256_fallback;
3743
3744FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesimc_u128, iemAImpl_aesimc_u128_fallback;
3745FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenc_u128, iemAImpl_aesenc_u128_fallback;
3746FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenclast_u128, iemAImpl_aesenclast_u128_fallback;
3747FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdec_u128, iemAImpl_aesdec_u128_fallback;
3748FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdeclast_u128, iemAImpl_aesdeclast_u128_fallback;
3749
3750FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesimc_u128, iemAImpl_vaesimc_u128_fallback;
3751FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenc_u128, iemAImpl_vaesenc_u128_fallback;
3752FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenclast_u128, iemAImpl_vaesenclast_u128_fallback;
3753FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdec_u128, iemAImpl_vaesdec_u128_fallback;
3754FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdeclast_u128, iemAImpl_vaesdeclast_u128_fallback;
3755
3756FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_aeskeygenassist_u128, iemAImpl_aeskeygenassist_u128_fallback;
3757
3758FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vaeskeygenassist_u128, iemAImpl_vaeskeygenassist_u128_fallback;
3759
3760FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1nexte_u128, iemAImpl_sha1nexte_u128_fallback;
3761FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg1_u128, iemAImpl_sha1msg1_u128_fallback;
3762FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg2_u128, iemAImpl_sha1msg2_u128_fallback;
3763FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg1_u128, iemAImpl_sha256msg1_u128_fallback;
3764FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg2_u128, iemAImpl_sha256msg2_u128_fallback;
3765FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_sha1rnds4_u128, iemAImpl_sha1rnds4_u128_fallback;
3766IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3767IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3768
3769typedef struct IEMPCMPISTRXSRC
3770{
3771 RTUINT128U uSrc1;
3772 RTUINT128U uSrc2;
3773} IEMPCMPISTRXSRC;
3774typedef IEMPCMPISTRXSRC *PIEMPCMPISTRXSRC;
3775typedef const IEMPCMPISTRXSRC *PCIEMPCMPISTRXSRC;
3776
3777typedef struct IEMPCMPESTRXSRC
3778{
3779 RTUINT128U uSrc1;
3780 RTUINT128U uSrc2;
3781 uint64_t u64Rax;
3782 uint64_t u64Rdx;
3783} IEMPCMPESTRXSRC;
3784typedef IEMPCMPESTRXSRC *PIEMPCMPESTRXSRC;
3785typedef const IEMPCMPESTRXSRC *PCIEMPCMPESTRXSRC;
3786
3787typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
3788typedef FNIEMAIMPLPCMPISTRIU128IMM8 *PFNIEMAIMPLPCMPISTRIU128IMM8;
3789typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3790typedef FNIEMAIMPLPCMPESTRIU128IMM8 *PFNIEMAIMPLPCMPESTRIU128IMM8;
3791
3792typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
3793typedef FNIEMAIMPLPCMPISTRMU128IMM8 *PFNIEMAIMPLPCMPISTRMU128IMM8;
3794typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3795typedef FNIEMAIMPLPCMPESTRMU128IMM8 *PFNIEMAIMPLPCMPESTRMU128IMM8;
3796
3797FNIEMAIMPLPCMPISTRIU128IMM8 iemAImpl_pcmpistri_u128, iemAImpl_pcmpistri_u128_fallback;
3798FNIEMAIMPLPCMPESTRIU128IMM8 iemAImpl_pcmpestri_u128, iemAImpl_pcmpestri_u128_fallback;
3799FNIEMAIMPLPCMPISTRMU128IMM8 iemAImpl_pcmpistrm_u128, iemAImpl_pcmpistrm_u128_fallback;
3800FNIEMAIMPLPCMPESTRMU128IMM8 iemAImpl_pcmpestrm_u128, iemAImpl_pcmpestrm_u128_fallback;
3801
3802FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pclmulqdq_u128, iemAImpl_pclmulqdq_u128_fallback;
3803FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpclmulqdq_u128, iemAImpl_vpclmulqdq_u128_fallback;
3804
3805FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_mpsadbw_u128, iemAImpl_mpsadbw_u128_fallback;
3806FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vmpsadbw_u128, iemAImpl_vmpsadbw_u128_fallback;
3807FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vmpsadbw_u256, iemAImpl_vmpsadbw_u256_fallback;
3808
3809FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsllw_imm_u128, iemAImpl_vpsllw_imm_u128_fallback;
3810FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsllw_imm_u256, iemAImpl_vpsllw_imm_u256_fallback;
3811FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpslld_imm_u128, iemAImpl_vpslld_imm_u128_fallback;
3812FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpslld_imm_u256, iemAImpl_vpslld_imm_u256_fallback;
3813FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsllq_imm_u128, iemAImpl_vpsllq_imm_u128_fallback;
3814FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsllq_imm_u256, iemAImpl_vpsllq_imm_u256_fallback;
3815IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
3816IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
3817IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
3818IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
3819
3820FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsraw_imm_u128, iemAImpl_vpsraw_imm_u128_fallback;
3821FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsraw_imm_u256, iemAImpl_vpsraw_imm_u256_fallback;
3822FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrad_imm_u128, iemAImpl_vpsrad_imm_u128_fallback;
3823FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrad_imm_u256, iemAImpl_vpsrad_imm_u256_fallback;
3824
3825FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrlw_imm_u128, iemAImpl_vpsrlw_imm_u128_fallback;
3826FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrlw_imm_u256, iemAImpl_vpsrlw_imm_u256_fallback;
3827FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrld_imm_u128, iemAImpl_vpsrld_imm_u128_fallback;
3828FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrld_imm_u256, iemAImpl_vpsrld_imm_u256_fallback;
3829FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrlq_imm_u128, iemAImpl_vpsrlq_imm_u128_fallback;
3830FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrlq_imm_u256, iemAImpl_vpsrlq_imm_u256_fallback;
3831IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
3832IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
3833IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
3834IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
3835
3836FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpermilps_u128, iemAImpl_vpermilps_u128_fallback;
3837FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_vpermilps_imm_u128, iemAImpl_vpermilps_imm_u128_fallback;
3838FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpermilps_u256, iemAImpl_vpermilps_u256_fallback;
3839FNIEMAIMPLMEDIAOPTF2U256IMM8 iemAImpl_vpermilps_imm_u256, iemAImpl_vpermilps_imm_u256_fallback;
3840
3841FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpermilpd_u128, iemAImpl_vpermilpd_u128_fallback;
3842FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_vpermilpd_imm_u128, iemAImpl_vpermilpd_imm_u128_fallback;
3843FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpermilpd_u256, iemAImpl_vpermilpd_u256_fallback;
3844FNIEMAIMPLMEDIAOPTF2U256IMM8 iemAImpl_vpermilpd_imm_u256, iemAImpl_vpermilpd_imm_u256_fallback;
3845
3846FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllvd_u128, iemAImpl_vpsllvd_u128_fallback;
3847FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllvd_u256, iemAImpl_vpsllvd_u256_fallback;
3848FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllvq_u128, iemAImpl_vpsllvq_u128_fallback;
3849FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllvq_u256, iemAImpl_vpsllvq_u256_fallback;
3850FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsravd_u128, iemAImpl_vpsravd_u128_fallback;
3851FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsravd_u256, iemAImpl_vpsravd_u256_fallback;
3852FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlvd_u128, iemAImpl_vpsrlvd_u128_fallback;
3853FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlvd_u256, iemAImpl_vpsrlvd_u256_fallback;
3854FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlvq_u128, iemAImpl_vpsrlvq_u128_fallback;
3855FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlvq_u256, iemAImpl_vpsrlvq_u256_fallback;
3856/** @} */
3857
3858/** @name Media Odds and Ends
3859 * @{ */
3860typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U8,(uint32_t *puDst, uint8_t uSrc));
3861typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U16,(uint32_t *puDst, uint16_t uSrc));
3862typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U32,(uint32_t *puDst, uint32_t uSrc));
3863typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U64,(uint32_t *puDst, uint64_t uSrc));
3864FNIEMAIMPLCR32U8 iemAImpl_crc32_u8, iemAImpl_crc32_u8_fallback;
3865FNIEMAIMPLCR32U16 iemAImpl_crc32_u16, iemAImpl_crc32_u16_fallback;
3866FNIEMAIMPLCR32U32 iemAImpl_crc32_u32, iemAImpl_crc32_u32_fallback;
3867FNIEMAIMPLCR32U64 iemAImpl_crc32_u64, iemAImpl_crc32_u64_fallback;
3868
3869typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL128,(PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint32_t *pEFlags));
3870typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL256,(PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint32_t *pEFlags));
3871FNIEMAIMPLF2EFL128 iemAImpl_ptest_u128;
3872FNIEMAIMPLF2EFL256 iemAImpl_vptest_u256, iemAImpl_vptest_u256_fallback;
3873
3874typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I32U64,(uint32_t uMxCsrIn, int32_t *pi32Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
3875typedef FNIEMAIMPLSSEF2I32U64 *PFNIEMAIMPLSSEF2I32U64;
3876typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I64U64,(uint32_t uMxCsrIn, int64_t *pi64Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
3877typedef FNIEMAIMPLSSEF2I64U64 *PFNIEMAIMPLSSEF2I64U64;
3878typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I32U32,(uint32_t uMxCsrIn, int32_t *pi32Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
3879typedef FNIEMAIMPLSSEF2I32U32 *PFNIEMAIMPLSSEF2I32U32;
3880typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I64U32,(uint32_t uMxCsrIn, int64_t *pi64Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
3881typedef FNIEMAIMPLSSEF2I64U32 *PFNIEMAIMPLSSEF2I64U32;
3882
3883FNIEMAIMPLSSEF2I32U64 iemAImpl_cvttsd2si_i32_r64;
3884FNIEMAIMPLSSEF2I32U64 iemAImpl_cvtsd2si_i32_r64;
3885
3886FNIEMAIMPLSSEF2I64U64 iemAImpl_cvttsd2si_i64_r64;
3887FNIEMAIMPLSSEF2I64U64 iemAImpl_cvtsd2si_i64_r64;
3888
3889FNIEMAIMPLSSEF2I32U32 iemAImpl_cvttss2si_i32_r32;
3890FNIEMAIMPLSSEF2I32U32 iemAImpl_cvtss2si_i32_r32;
3891
3892FNIEMAIMPLSSEF2I64U32 iemAImpl_cvttss2si_i64_r32;
3893FNIEMAIMPLSSEF2I64U32 iemAImpl_cvtss2si_i64_r32;
3894
3895typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R32I32,(uint32_t uMxCsrIn, PRTFLOAT32U pr32Dst, const int32_t *pi32Src));
3896typedef FNIEMAIMPLSSEF2R32I32 *PFNIEMAIMPLSSEF2R32I32;
3897typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R32I64,(uint32_t uMxCsrIn, PRTFLOAT32U pr32Dst, const int64_t *pi64Src));
3898typedef FNIEMAIMPLSSEF2R32I64 *PFNIEMAIMPLSSEF2R32I64;
3899
3900FNIEMAIMPLSSEF2R32I32 iemAImpl_cvtsi2ss_r32_i32;
3901FNIEMAIMPLSSEF2R32I64 iemAImpl_cvtsi2ss_r32_i64;
3902
3903typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R64I32,(uint32_t uMxCsrIn, PRTFLOAT64U pr64Dst, const int32_t *pi32Src));
3904typedef FNIEMAIMPLSSEF2R64I32 *PFNIEMAIMPLSSEF2R64I32;
3905typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R64I64,(uint32_t uMxCsrIn, PRTFLOAT64U pr64Dst, const int64_t *pi64Src));
3906typedef FNIEMAIMPLSSEF2R64I64 *PFNIEMAIMPLSSEF2R64I64;
3907
3908FNIEMAIMPLSSEF2R64I32 iemAImpl_cvtsi2sd_r64_i32;
3909FNIEMAIMPLSSEF2R64I64 iemAImpl_cvtsi2sd_r64_i64;
3910
3911
3912typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLF2EFLMXCSRR32R32,(uint32_t uMxCsrIn, uint32_t *pfEFlags, RTFLOAT32U uSrc1, RTFLOAT32U uSrc2));
3913typedef FNIEMAIMPLF2EFLMXCSRR32R32 *PFNIEMAIMPLF2EFLMXCSRR32R32;
3914
3915typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLF2EFLMXCSRR64R64,(uint32_t uMxCsrIn, uint32_t *pfEFlags, RTFLOAT64U uSrc1, RTFLOAT64U uSrc2));
3916typedef FNIEMAIMPLF2EFLMXCSRR64R64 *PFNIEMAIMPLF2EFLMXCSRR64R64;
3917
3918FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_ucomiss_u128;
3919FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_vucomiss_u128, iemAImpl_vucomiss_u128_fallback;
3920
3921FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_ucomisd_u128;
3922FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_vucomisd_u128, iemAImpl_vucomisd_u128_fallback;
3923
3924FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_comiss_u128;
3925FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_vcomiss_u128, iemAImpl_vcomiss_u128_fallback;
3926
3927FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_comisd_u128;
3928FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_vcomisd_u128, iemAImpl_vcomisd_u128_fallback;
3929
3930
3931typedef struct IEMMEDIAF2XMMSRC
3932{
3933 X86XMMREG uSrc1;
3934 X86XMMREG uSrc2;
3935} IEMMEDIAF2XMMSRC;
3936typedef IEMMEDIAF2XMMSRC *PIEMMEDIAF2XMMSRC;
3937typedef const IEMMEDIAF2XMMSRC *PCIEMMEDIAF2XMMSRC;
3938
3939typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMXCSRF2XMMIMM8,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCIEMMEDIAF2XMMSRC puSrc, uint8_t bEvil));
3940typedef FNIEMAIMPLMXCSRF2XMMIMM8 *PFNIEMAIMPLMXCSRF2XMMIMM8;
3941
3942FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpps_u128;
3943FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmppd_u128;
3944FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpss_u128;
3945FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpsd_u128;
3946FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundss_u128;
3947FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundsd_u128;
3948
3949FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundps_u128, iemAImpl_roundps_u128_fallback;
3950FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundpd_u128, iemAImpl_roundpd_u128_fallback;
3951
3952FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dpps_u128, iemAImpl_dpps_u128_fallback;
3953FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dppd_u128, iemAImpl_dppd_u128_fallback;
3954
3955typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMXCSRU64U128,(uint32_t fMxCsrIn, uint64_t *pu64Dst, PCX86XMMREG pSrc));
3956typedef FNIEMAIMPLMXCSRU64U128 *PFNIEMAIMPLMXCSRU64U128;
3957
3958FNIEMAIMPLMXCSRU64U128 iemAImpl_cvtpd2pi_u128;
3959FNIEMAIMPLMXCSRU64U128 iemAImpl_cvttpd2pi_u128;
3960
3961typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMXCSRU128U64,(uint32_t fMxCsrIn, PX86XMMREG pDst, uint64_t u64Src));
3962typedef FNIEMAIMPLMXCSRU128U64 *PFNIEMAIMPLMXCSRU128U64;
3963
3964FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2ps_u128;
3965FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2pd_u128;
3966
3967typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMXCSRU64U64,(uint32_t fMxCsrIn, uint64_t *pu64Dst, uint64_t u64Src));
3968typedef FNIEMAIMPLMXCSRU64U64 *PFNIEMAIMPLMXCSRU64U64;
3969
3970FNIEMAIMPLMXCSRU64U64 iemAImpl_cvtps2pi_u128;
3971FNIEMAIMPLMXCSRU64U64 iemAImpl_cvttps2pi_u128;
3972
3973/** @} */
3974
3975
3976/** @name Function tables.
3977 * @{
3978 */
3979
3980/**
3981 * Function table for a binary operator providing implementation based on
3982 * operand size.
3983 */
3984typedef struct IEMOPBINSIZES
3985{
3986 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
3987 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
3988 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
3989 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
3990} IEMOPBINSIZES;
3991/** Pointer to a binary operator function table. */
3992typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
3993
3994
3995/**
3996 * Function table for a binary operator providing implementation based on
3997 * operand size.
3998 */
3999typedef struct IEMOPBINTODOSIZES
4000{
4001 PFNIEMAIMPLBINTODOU8 pfnNormalU8, pfnLockedU8;
4002 PFNIEMAIMPLBINTODOU16 pfnNormalU16, pfnLockedU16;
4003 PFNIEMAIMPLBINTODOU32 pfnNormalU32, pfnLockedU32;
4004 PFNIEMAIMPLBINTODOU64 pfnNormalU64, pfnLockedU64;
4005} IEMOPBINTODOSIZES;
4006/** Pointer to a binary operator function table. */
4007typedef IEMOPBINTODOSIZES const *PCIEMOPBINTODOSIZES;
4008
4009
4010/**
4011 * Function table for a unary operator providing implementation based on
4012 * operand size.
4013 */
4014typedef struct IEMOPUNARYSIZES
4015{
4016 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
4017 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
4018 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
4019 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
4020} IEMOPUNARYSIZES;
4021/** Pointer to a unary operator function table. */
4022typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
4023
4024
4025/**
4026 * Function table for a shift operator providing implementation based on
4027 * operand size.
4028 */
4029typedef struct IEMOPSHIFTSIZES
4030{
4031 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
4032 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
4033 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
4034 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
4035} IEMOPSHIFTSIZES;
4036/** Pointer to a shift operator function table. */
4037typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
4038
4039
4040/**
4041 * Function table for a multiplication or division operation.
4042 */
4043typedef struct IEMOPMULDIVSIZES
4044{
4045 PFNIEMAIMPLMULDIVU8 pfnU8;
4046 PFNIEMAIMPLMULDIVU16 pfnU16;
4047 PFNIEMAIMPLMULDIVU32 pfnU32;
4048 PFNIEMAIMPLMULDIVU64 pfnU64;
4049} IEMOPMULDIVSIZES;
4050/** Pointer to a multiplication or division operation function table. */
4051typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
4052
4053
4054/**
4055 * Function table for a double precision shift operator providing implementation
4056 * based on operand size.
4057 */
4058typedef struct IEMOPSHIFTDBLSIZES
4059{
4060 PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16;
4061 PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32;
4062 PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64;
4063} IEMOPSHIFTDBLSIZES;
4064/** Pointer to a double precision shift function table. */
4065typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
4066
4067
4068/**
4069 * Function table for media instruction taking two full sized media source
4070 * registers and one full sized destination register (AVX).
4071 */
4072typedef struct IEMOPMEDIAF3
4073{
4074 PFNIEMAIMPLMEDIAF3U128 pfnU128;
4075 PFNIEMAIMPLMEDIAF3U256 pfnU256;
4076} IEMOPMEDIAF3;
4077/** Pointer to a media operation function table for 3 full sized ops (AVX). */
4078typedef IEMOPMEDIAF3 const *PCIEMOPMEDIAF3;
4079
4080/** @def IEMOPMEDIAF3_INIT_VARS_EX
4081 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4082 * given functions as initializers. For use in AVX functions where a pair of
4083 * functions are only used once and the function table need not be public. */
4084#ifndef TST_IEM_CHECK_MC
4085# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4086# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4087 static IEMOPMEDIAF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4088 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4089# else
4090# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4091 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4092# endif
4093#else
4094# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4095#endif
4096/** @def IEMOPMEDIAF3_INIT_VARS
4097 * Generate AVX function tables for the @a a_InstrNm instruction.
4098 * @sa IEMOPMEDIAF3_INIT_VARS_EX */
4099#define IEMOPMEDIAF3_INIT_VARS(a_InstrNm) \
4100 IEMOPMEDIAF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4101 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4102
4103/**
4104 * Function table for media instruction taking two full sized media source
4105 * registers and one full sized destination register, but no additional state
4106 * (AVX).
4107 */
4108typedef struct IEMOPMEDIAOPTF3
4109{
4110 PFNIEMAIMPLMEDIAOPTF3U128 pfnU128;
4111 PFNIEMAIMPLMEDIAOPTF3U256 pfnU256;
4112} IEMOPMEDIAOPTF3;
4113/** Pointer to a media operation function table for 3 full sized ops (AVX). */
4114typedef IEMOPMEDIAOPTF3 const *PCIEMOPMEDIAOPTF3;
4115
4116/** @def IEMOPMEDIAOPTF3_INIT_VARS_EX
4117 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4118 * given functions as initializers. For use in AVX functions where a pair of
4119 * functions are only used once and the function table need not be public. */
4120#ifndef TST_IEM_CHECK_MC
4121# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4122# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4123 static IEMOPMEDIAOPTF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4124 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4125# else
4126# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4127 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4128# endif
4129#else
4130# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4131#endif
4132/** @def IEMOPMEDIAOPTF3_INIT_VARS
4133 * Generate AVX function tables for the @a a_InstrNm instruction.
4134 * @sa IEMOPMEDIAOPTF3_INIT_VARS_EX */
4135#define IEMOPMEDIAOPTF3_INIT_VARS(a_InstrNm) \
4136 IEMOPMEDIAOPTF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4137 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4138
4139/**
4140 * Function table for media instruction taking one full sized media source
4141 * registers and one full sized destination register, but no additional state
4142 * (AVX).
4143 */
4144typedef struct IEMOPMEDIAOPTF2
4145{
4146 PFNIEMAIMPLMEDIAOPTF2U128 pfnU128;
4147 PFNIEMAIMPLMEDIAOPTF2U256 pfnU256;
4148} IEMOPMEDIAOPTF2;
4149/** Pointer to a media operation function table for 2 full sized ops (AVX). */
4150typedef IEMOPMEDIAOPTF2 const *PCIEMOPMEDIAOPTF2;
4151
4152/** @def IEMOPMEDIAOPTF2_INIT_VARS_EX
4153 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4154 * given functions as initializers. For use in AVX functions where a pair of
4155 * functions are only used once and the function table need not be public. */
4156#ifndef TST_IEM_CHECK_MC
4157# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4158# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4159 static IEMOPMEDIAOPTF2 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4160 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4161# else
4162# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4163 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4164# endif
4165#else
4166# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4167#endif
4168/** @def IEMOPMEDIAOPTF2_INIT_VARS
4169 * Generate AVX function tables for the @a a_InstrNm instruction.
4170 * @sa IEMOPMEDIAOPTF2_INIT_VARS_EX */
4171#define IEMOPMEDIAOPTF2_INIT_VARS(a_InstrNm) \
4172 IEMOPMEDIAOPTF2_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4173 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4174
4175/**
4176 * Function table for media instruction taking one full sized media source
4177 * register and one full sized destination register and an 8-bit immediate, but no additional state
4178 * (AVX).
4179 */
4180typedef struct IEMOPMEDIAOPTF2IMM8
4181{
4182 PFNIEMAIMPLMEDIAOPTF2U128IMM8 pfnU128;
4183 PFNIEMAIMPLMEDIAOPTF2U256IMM8 pfnU256;
4184} IEMOPMEDIAOPTF2IMM8;
4185/** Pointer to a media operation function table for 2 full sized ops (AVX). */
4186typedef IEMOPMEDIAOPTF2IMM8 const *PCIEMOPMEDIAOPTF2IMM8;
4187
4188/** @def IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX
4189 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4190 * given functions as initializers. For use in AVX functions where a pair of
4191 * functions are only used once and the function table need not be public. */
4192#ifndef TST_IEM_CHECK_MC
4193# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4194# define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4195 static IEMOPMEDIAOPTF2IMM8 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4196 static IEMOPMEDIAOPTF2IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4197# else
4198# define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4199 static IEMOPMEDIAOPTF2IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4200# endif
4201#else
4202# define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4203#endif
4204/** @def IEMOPMEDIAOPTF2IMM8_INIT_VARS
4205 * Generate AVX function tables for the @a a_InstrNm instruction.
4206 * @sa IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX */
4207#define IEMOPMEDIAOPTF2IMM8_INIT_VARS(a_InstrNm) \
4208 IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u256),\
4209 RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u256_fallback))
4210
4211/**
4212 * Function table for media instruction taking two full sized media source
4213 * registers and one full sized destination register and an 8-bit immediate, but no additional state
4214 * (AVX).
4215 */
4216typedef struct IEMOPMEDIAOPTF3IMM8
4217{
4218 PFNIEMAIMPLMEDIAOPTF3U128IMM8 pfnU128;
4219 PFNIEMAIMPLMEDIAOPTF3U256IMM8 pfnU256;
4220} IEMOPMEDIAOPTF3IMM8;
4221/** Pointer to a media operation function table for 3 full sized ops (AVX). */
4222typedef IEMOPMEDIAOPTF3IMM8 const *PCIEMOPMEDIAOPTF3IMM8;
4223
4224/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX
4225 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4226 * given functions as initializers. For use in AVX functions where a pair of
4227 * functions are only used once and the function table need not be public. */
4228#ifndef TST_IEM_CHECK_MC
4229# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4230# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4231 static IEMOPMEDIAOPTF3IMM8 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4232 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4233# else
4234# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4235 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4236# endif
4237#else
4238# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4239#endif
4240/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS
4241 * Generate AVX function tables for the @a a_InstrNm instruction.
4242 * @sa IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX */
4243#define IEMOPMEDIAOPTF3IMM8_INIT_VARS(a_InstrNm) \
4244 IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4245 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4246/** @} */
4247
4248
4249/**
4250 * Function table for blend type instruction taking three full sized media source
4251 * registers and one full sized destination register, but no additional state
4252 * (AVX).
4253 */
4254typedef struct IEMOPBLENDOP
4255{
4256 PFNIEMAIMPLAVXBLENDU128 pfnU128;
4257 PFNIEMAIMPLAVXBLENDU256 pfnU256;
4258} IEMOPBLENDOP;
4259/** Pointer to a media operation function table for 4 full sized ops (AVX). */
4260typedef IEMOPBLENDOP const *PCIEMOPBLENDOP;
4261
4262/** @def IEMOPBLENDOP_INIT_VARS_EX
4263 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4264 * given functions as initializers. For use in AVX functions where a pair of
4265 * functions are only used once and the function table need not be public. */
4266#ifndef TST_IEM_CHECK_MC
4267# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4268# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4269 static IEMOPBLENDOP const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4270 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4271# else
4272# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4273 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4274# endif
4275#else
4276# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4277#endif
4278/** @def IEMOPBLENDOP_INIT_VARS
4279 * Generate AVX function tables for the @a a_InstrNm instruction.
4280 * @sa IEMOPBLENDOP_INIT_VARS_EX */
4281#define IEMOPBLENDOP_INIT_VARS(a_InstrNm) \
4282 IEMOPBLENDOP_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4283 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4284
4285
4286/** @name SSE/AVX single/double precision floating point operations.
4287 * @{ */
4288typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPSSEF2U128,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
4289typedef FNIEMAIMPLFPSSEF2U128 *PFNIEMAIMPLFPSSEF2U128;
4290typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPSSEF2U128R32,(uint32_t uMxCsrIn, PX86XMMREG Result, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
4291typedef FNIEMAIMPLFPSSEF2U128R32 *PFNIEMAIMPLFPSSEF2U128R32;
4292typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPSSEF2U128R64,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
4293typedef FNIEMAIMPLFPSSEF2U128R64 *PFNIEMAIMPLFPSSEF2U128R64;
4294
4295typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U128,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
4296typedef FNIEMAIMPLFPAVXF3U128 *PFNIEMAIMPLFPAVXF3U128;
4297typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U128R32,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
4298typedef FNIEMAIMPLFPAVXF3U128R32 *PFNIEMAIMPLFPAVXF3U128R32;
4299typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U128R64,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
4300typedef FNIEMAIMPLFPAVXF3U128R64 *PFNIEMAIMPLFPAVXF3U128R64;
4301
4302typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U256,(uint32_t uMxCsrIn, PX86YMMREG pResult, PCX86YMMREG puSrc1, PCX86YMMREG puSrc2));
4303typedef FNIEMAIMPLFPAVXF3U256 *PFNIEMAIMPLFPAVXF3U256;
4304
4305FNIEMAIMPLFPSSEF2U128 iemAImpl_addps_u128;
4306FNIEMAIMPLFPSSEF2U128 iemAImpl_addpd_u128;
4307FNIEMAIMPLFPSSEF2U128 iemAImpl_mulps_u128;
4308FNIEMAIMPLFPSSEF2U128 iemAImpl_mulpd_u128;
4309FNIEMAIMPLFPSSEF2U128 iemAImpl_subps_u128;
4310FNIEMAIMPLFPSSEF2U128 iemAImpl_subpd_u128;
4311FNIEMAIMPLFPSSEF2U128 iemAImpl_minps_u128;
4312FNIEMAIMPLFPSSEF2U128 iemAImpl_minpd_u128;
4313FNIEMAIMPLFPSSEF2U128 iemAImpl_divps_u128;
4314FNIEMAIMPLFPSSEF2U128 iemAImpl_divpd_u128;
4315FNIEMAIMPLFPSSEF2U128 iemAImpl_maxps_u128;
4316FNIEMAIMPLFPSSEF2U128 iemAImpl_maxpd_u128;
4317FNIEMAIMPLFPSSEF2U128 iemAImpl_haddps_u128;
4318FNIEMAIMPLFPSSEF2U128 iemAImpl_haddpd_u128;
4319FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubps_u128;
4320FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubpd_u128;
4321FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtps_u128;
4322FNIEMAIMPLFPSSEF2U128 iemAImpl_rsqrtps_u128;
4323FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtpd_u128;
4324FNIEMAIMPLFPSSEF2U128 iemAImpl_rcpps_u128;
4325FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubps_u128;
4326FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubpd_u128;
4327FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2ps_u128;
4328FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2pd_u128;
4329
4330FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2ps_u128;
4331FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2dq_u128;
4332FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttps2dq_u128;
4333FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttpd2dq_u128;
4334FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2pd_u128;
4335FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2dq_u128;
4336
4337FNIEMAIMPLFPSSEF2U128R32 iemAImpl_addss_u128_r32;
4338FNIEMAIMPLFPSSEF2U128R64 iemAImpl_addsd_u128_r64;
4339FNIEMAIMPLFPSSEF2U128R32 iemAImpl_mulss_u128_r32;
4340FNIEMAIMPLFPSSEF2U128R64 iemAImpl_mulsd_u128_r64;
4341FNIEMAIMPLFPSSEF2U128R32 iemAImpl_subss_u128_r32;
4342FNIEMAIMPLFPSSEF2U128R64 iemAImpl_subsd_u128_r64;
4343FNIEMAIMPLFPSSEF2U128R32 iemAImpl_minss_u128_r32;
4344FNIEMAIMPLFPSSEF2U128R64 iemAImpl_minsd_u128_r64;
4345FNIEMAIMPLFPSSEF2U128R32 iemAImpl_divss_u128_r32;
4346FNIEMAIMPLFPSSEF2U128R64 iemAImpl_divsd_u128_r64;
4347FNIEMAIMPLFPSSEF2U128R32 iemAImpl_maxss_u128_r32;
4348FNIEMAIMPLFPSSEF2U128R64 iemAImpl_maxsd_u128_r64;
4349FNIEMAIMPLFPSSEF2U128R32 iemAImpl_cvtss2sd_u128_r32;
4350FNIEMAIMPLFPSSEF2U128R64 iemAImpl_cvtsd2ss_u128_r64;
4351FNIEMAIMPLFPSSEF2U128R32 iemAImpl_sqrtss_u128_r32;
4352FNIEMAIMPLFPSSEF2U128R64 iemAImpl_sqrtsd_u128_r64;
4353FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rsqrtss_u128_r32;
4354FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rcpss_u128_r32;
4355
4356FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddps_u128, iemAImpl_vaddps_u128_fallback;
4357FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddpd_u128, iemAImpl_vaddpd_u128_fallback;
4358FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulps_u128, iemAImpl_vmulps_u128_fallback;
4359FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulpd_u128, iemAImpl_vmulpd_u128_fallback;
4360FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubps_u128, iemAImpl_vsubps_u128_fallback;
4361FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubpd_u128, iemAImpl_vsubpd_u128_fallback;
4362FNIEMAIMPLFPAVXF3U128 iemAImpl_vminps_u128, iemAImpl_vminps_u128_fallback;
4363FNIEMAIMPLFPAVXF3U128 iemAImpl_vminpd_u128, iemAImpl_vminpd_u128_fallback;
4364FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivps_u128, iemAImpl_vdivps_u128_fallback;
4365FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivpd_u128, iemAImpl_vdivpd_u128_fallback;
4366FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxps_u128, iemAImpl_vmaxps_u128_fallback;
4367FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxpd_u128, iemAImpl_vmaxpd_u128_fallback;
4368FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddps_u128, iemAImpl_vhaddps_u128_fallback;
4369FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddpd_u128, iemAImpl_vhaddpd_u128_fallback;
4370FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubps_u128, iemAImpl_vhsubps_u128_fallback;
4371FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubpd_u128, iemAImpl_vhsubpd_u128_fallback;
4372FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtps_u128, iemAImpl_vsqrtps_u128_fallback;
4373FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtpd_u128, iemAImpl_vsqrtpd_u128_fallback;
4374FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubps_u128, iemAImpl_vaddsubps_u128_fallback;
4375FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubpd_u128, iemAImpl_vaddsubpd_u128_fallback;
4376FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtpd2ps_u128, iemAImpl_vcvtpd2ps_u128_fallback;
4377FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtps2pd_u128, iemAImpl_vcvtps2pd_u128_fallback;
4378
4379FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vaddss_u128_r32, iemAImpl_vaddss_u128_r32_fallback;
4380FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vaddsd_u128_r64, iemAImpl_vaddsd_u128_r64_fallback;
4381FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmulss_u128_r32, iemAImpl_vmulss_u128_r32_fallback;
4382FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmulsd_u128_r64, iemAImpl_vmulsd_u128_r64_fallback;
4383FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsubss_u128_r32, iemAImpl_vsubss_u128_r32_fallback;
4384FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsubsd_u128_r64, iemAImpl_vsubsd_u128_r64_fallback;
4385FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vminss_u128_r32, iemAImpl_vminss_u128_r32_fallback;
4386FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vminsd_u128_r64, iemAImpl_vminsd_u128_r64_fallback;
4387FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vdivss_u128_r32, iemAImpl_vdivss_u128_r32_fallback;
4388FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vdivsd_u128_r64, iemAImpl_vdivsd_u128_r64_fallback;
4389FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmaxss_u128_r32, iemAImpl_vmaxss_u128_r32_fallback;
4390FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmaxsd_u128_r64, iemAImpl_vmaxsd_u128_r64_fallback;
4391FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsqrtss_u128_r32, iemAImpl_vsqrtss_u128_r32_fallback;
4392FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsqrtsd_u128_r64, iemAImpl_vsqrtsd_u128_r64_fallback;
4393
4394FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddps_u256, iemAImpl_vaddps_u256_fallback;
4395FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddpd_u256, iemAImpl_vaddpd_u256_fallback;
4396FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulps_u256, iemAImpl_vmulps_u256_fallback;
4397FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulpd_u256, iemAImpl_vmulpd_u256_fallback;
4398FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubps_u256, iemAImpl_vsubps_u256_fallback;
4399FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubpd_u256, iemAImpl_vsubpd_u256_fallback;
4400FNIEMAIMPLFPAVXF3U256 iemAImpl_vminps_u256, iemAImpl_vminps_u256_fallback;
4401FNIEMAIMPLFPAVXF3U256 iemAImpl_vminpd_u256, iemAImpl_vminpd_u256_fallback;
4402FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivps_u256, iemAImpl_vdivps_u256_fallback;
4403FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivpd_u256, iemAImpl_vdivpd_u256_fallback;
4404FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxps_u256, iemAImpl_vmaxps_u256_fallback;
4405FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxpd_u256, iemAImpl_vmaxpd_u256_fallback;
4406FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddps_u256, iemAImpl_vhaddps_u256_fallback;
4407FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddpd_u256, iemAImpl_vhaddpd_u256_fallback;
4408FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubps_u256, iemAImpl_vhsubps_u256_fallback;
4409FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubpd_u256, iemAImpl_vhsubpd_u256_fallback;
4410FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubps_u256, iemAImpl_vhaddsubps_u256_fallback;
4411FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubpd_u256, iemAImpl_vhaddsubpd_u256_fallback;
4412FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtpd2ps_u256, iemAImpl_vcvtpd2ps_u256_fallback;
4413FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtps2pd_u256, iemAImpl_vcvtps2pd_u256_fallback;
4414/** @} */
4415
4416/** @name C instruction implementations for anything slightly complicated.
4417 * @{ */
4418
4419/**
4420 * For typedef'ing or declaring a C instruction implementation function taking
4421 * no extra arguments.
4422 *
4423 * @param a_Name The name of the type.
4424 */
4425# define IEM_CIMPL_DECL_TYPE_0(a_Name) \
4426 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4427/**
4428 * For defining a C instruction implementation function taking no extra
4429 * arguments.
4430 *
4431 * @param a_Name The name of the function
4432 */
4433# define IEM_CIMPL_DEF_0(a_Name) \
4434 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4435/**
4436 * Prototype version of IEM_CIMPL_DEF_0.
4437 */
4438# define IEM_CIMPL_PROTO_0(a_Name) \
4439 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4440/**
4441 * For calling a C instruction implementation function taking no extra
4442 * arguments.
4443 *
4444 * This special call macro adds default arguments to the call and allow us to
4445 * change these later.
4446 *
4447 * @param a_fn The name of the function.
4448 */
4449# define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr)
4450
4451/** Type for a C instruction implementation function taking no extra
4452 * arguments. */
4453typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
4454/** Function pointer type for a C instruction implementation function taking
4455 * no extra arguments. */
4456typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
4457
4458/**
4459 * For typedef'ing or declaring a C instruction implementation function taking
4460 * one extra argument.
4461 *
4462 * @param a_Name The name of the type.
4463 * @param a_Type0 The argument type.
4464 * @param a_Arg0 The argument name.
4465 */
4466# define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
4467 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4468/**
4469 * For defining a C instruction implementation function taking one extra
4470 * argument.
4471 *
4472 * @param a_Name The name of the function
4473 * @param a_Type0 The argument type.
4474 * @param a_Arg0 The argument name.
4475 */
4476# define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
4477 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4478/**
4479 * Prototype version of IEM_CIMPL_DEF_1.
4480 */
4481# define IEM_CIMPL_PROTO_1(a_Name, a_Type0, a_Arg0) \
4482 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4483/**
4484 * For calling a C instruction implementation function taking one extra
4485 * argument.
4486 *
4487 * This special call macro adds default arguments to the call and allow us to
4488 * change these later.
4489 *
4490 * @param a_fn The name of the function.
4491 * @param a0 The name of the 1st argument.
4492 */
4493# define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0))
4494
4495/**
4496 * For typedef'ing or declaring a C instruction implementation function taking
4497 * two extra arguments.
4498 *
4499 * @param a_Name The name of the type.
4500 * @param a_Type0 The type of the 1st argument
4501 * @param a_Arg0 The name of the 1st argument.
4502 * @param a_Type1 The type of the 2nd argument.
4503 * @param a_Arg1 The name of the 2nd argument.
4504 */
4505# define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4506 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4507/**
4508 * For defining a C instruction implementation function taking two extra
4509 * arguments.
4510 *
4511 * @param a_Name The name of the function.
4512 * @param a_Type0 The type of the 1st argument
4513 * @param a_Arg0 The name of the 1st argument.
4514 * @param a_Type1 The type of the 2nd argument.
4515 * @param a_Arg1 The name of the 2nd argument.
4516 */
4517# define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4518 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4519/**
4520 * Prototype version of IEM_CIMPL_DEF_2.
4521 */
4522# define IEM_CIMPL_PROTO_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4523 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4524/**
4525 * For calling a C instruction implementation function taking two extra
4526 * arguments.
4527 *
4528 * This special call macro adds default arguments to the call and allow us to
4529 * change these later.
4530 *
4531 * @param a_fn The name of the function.
4532 * @param a0 The name of the 1st argument.
4533 * @param a1 The name of the 2nd argument.
4534 */
4535# define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1))
4536
4537/**
4538 * For typedef'ing or declaring a C instruction implementation function taking
4539 * three extra arguments.
4540 *
4541 * @param a_Name The name of the type.
4542 * @param a_Type0 The type of the 1st argument
4543 * @param a_Arg0 The name of the 1st argument.
4544 * @param a_Type1 The type of the 2nd argument.
4545 * @param a_Arg1 The name of the 2nd argument.
4546 * @param a_Type2 The type of the 3rd argument.
4547 * @param a_Arg2 The name of the 3rd argument.
4548 */
4549# define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4550 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4551/**
4552 * For defining a C instruction implementation function taking three extra
4553 * arguments.
4554 *
4555 * @param a_Name The name of the function.
4556 * @param a_Type0 The type of the 1st argument
4557 * @param a_Arg0 The name of the 1st argument.
4558 * @param a_Type1 The type of the 2nd argument.
4559 * @param a_Arg1 The name of the 2nd argument.
4560 * @param a_Type2 The type of the 3rd argument.
4561 * @param a_Arg2 The name of the 3rd argument.
4562 */
4563# define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4564 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4565/**
4566 * Prototype version of IEM_CIMPL_DEF_3.
4567 */
4568# define IEM_CIMPL_PROTO_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4569 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4570/**
4571 * For calling a C instruction implementation function taking three extra
4572 * arguments.
4573 *
4574 * This special call macro adds default arguments to the call and allow us to
4575 * change these later.
4576 *
4577 * @param a_fn The name of the function.
4578 * @param a0 The name of the 1st argument.
4579 * @param a1 The name of the 2nd argument.
4580 * @param a2 The name of the 3rd argument.
4581 */
4582# define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
4583
4584
4585/**
4586 * For typedef'ing or declaring a C instruction implementation function taking
4587 * four extra arguments.
4588 *
4589 * @param a_Name The name of the type.
4590 * @param a_Type0 The type of the 1st argument
4591 * @param a_Arg0 The name of the 1st argument.
4592 * @param a_Type1 The type of the 2nd argument.
4593 * @param a_Arg1 The name of the 2nd argument.
4594 * @param a_Type2 The type of the 3rd argument.
4595 * @param a_Arg2 The name of the 3rd argument.
4596 * @param a_Type3 The type of the 4th argument.
4597 * @param a_Arg3 The name of the 4th argument.
4598 */
4599# define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4600 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
4601/**
4602 * For defining a C instruction implementation function taking four extra
4603 * arguments.
4604 *
4605 * @param a_Name The name of the function.
4606 * @param a_Type0 The type of the 1st argument
4607 * @param a_Arg0 The name of the 1st argument.
4608 * @param a_Type1 The type of the 2nd argument.
4609 * @param a_Arg1 The name of the 2nd argument.
4610 * @param a_Type2 The type of the 3rd argument.
4611 * @param a_Arg2 The name of the 3rd argument.
4612 * @param a_Type3 The type of the 4th argument.
4613 * @param a_Arg3 The name of the 4th argument.
4614 */
4615# define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4616 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4617 a_Type2 a_Arg2, a_Type3 a_Arg3))
4618/**
4619 * Prototype version of IEM_CIMPL_DEF_4.
4620 */
4621# define IEM_CIMPL_PROTO_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4622 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4623 a_Type2 a_Arg2, a_Type3 a_Arg3))
4624/**
4625 * For calling a C instruction implementation function taking four extra
4626 * arguments.
4627 *
4628 * This special call macro adds default arguments to the call and allow us to
4629 * change these later.
4630 *
4631 * @param a_fn The name of the function.
4632 * @param a0 The name of the 1st argument.
4633 * @param a1 The name of the 2nd argument.
4634 * @param a2 The name of the 3rd argument.
4635 * @param a3 The name of the 4th argument.
4636 */
4637# define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
4638
4639
4640/**
4641 * For typedef'ing or declaring a C instruction implementation function taking
4642 * five extra arguments.
4643 *
4644 * @param a_Name The name of the type.
4645 * @param a_Type0 The type of the 1st argument
4646 * @param a_Arg0 The name of the 1st argument.
4647 * @param a_Type1 The type of the 2nd argument.
4648 * @param a_Arg1 The name of the 2nd argument.
4649 * @param a_Type2 The type of the 3rd argument.
4650 * @param a_Arg2 The name of the 3rd argument.
4651 * @param a_Type3 The type of the 4th argument.
4652 * @param a_Arg3 The name of the 4th argument.
4653 * @param a_Type4 The type of the 5th argument.
4654 * @param a_Arg4 The name of the 5th argument.
4655 */
4656# define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4657 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, \
4658 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
4659 a_Type3 a_Arg3, a_Type4 a_Arg4))
4660/**
4661 * For defining a C instruction implementation function taking five extra
4662 * arguments.
4663 *
4664 * @param a_Name The name of the function.
4665 * @param a_Type0 The type of the 1st argument
4666 * @param a_Arg0 The name of the 1st argument.
4667 * @param a_Type1 The type of the 2nd argument.
4668 * @param a_Arg1 The name of the 2nd argument.
4669 * @param a_Type2 The type of the 3rd argument.
4670 * @param a_Arg2 The name of the 3rd argument.
4671 * @param a_Type3 The type of the 4th argument.
4672 * @param a_Arg3 The name of the 4th argument.
4673 * @param a_Type4 The type of the 5th argument.
4674 * @param a_Arg4 The name of the 5th argument.
4675 */
4676# define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4677 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4678 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4679/**
4680 * Prototype version of IEM_CIMPL_DEF_5.
4681 */
4682# define IEM_CIMPL_PROTO_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4683 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4684 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4685/**
4686 * For calling a C instruction implementation function taking five extra
4687 * arguments.
4688 *
4689 * This special call macro adds default arguments to the call and allow us to
4690 * change these later.
4691 *
4692 * @param a_fn The name of the function.
4693 * @param a0 The name of the 1st argument.
4694 * @param a1 The name of the 2nd argument.
4695 * @param a2 The name of the 3rd argument.
4696 * @param a3 The name of the 4th argument.
4697 * @param a4 The name of the 5th argument.
4698 */
4699# define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
4700
4701/** @} */
4702
4703
4704/** @name Opcode Decoder Function Types.
4705 * @{ */
4706
4707/** @typedef PFNIEMOP
4708 * Pointer to an opcode decoder function.
4709 */
4710
4711/** @def FNIEMOP_DEF
4712 * Define an opcode decoder function.
4713 *
4714 * We're using macors for this so that adding and removing parameters as well as
4715 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
4716 *
4717 * @param a_Name The function name.
4718 */
4719
4720/** @typedef PFNIEMOPRM
4721 * Pointer to an opcode decoder function with RM byte.
4722 */
4723
4724/** @def FNIEMOPRM_DEF
4725 * Define an opcode decoder function with RM byte.
4726 *
4727 * We're using macors for this so that adding and removing parameters as well as
4728 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
4729 *
4730 * @param a_Name The function name.
4731 */
4732
4733#if defined(__GNUC__) && defined(RT_ARCH_X86)
4734typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
4735typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4736# define FNIEMOP_DEF(a_Name) \
4737 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
4738# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4739 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4740# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4741 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4742
4743#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
4744typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
4745typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4746# define FNIEMOP_DEF(a_Name) \
4747 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4748# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4749 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4750# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4751 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4752
4753#elif defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
4754typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4755typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4756# define FNIEMOP_DEF(a_Name) \
4757 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
4758# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4759 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4760# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4761 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4762
4763#else
4764typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4765typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4766# define FNIEMOP_DEF(a_Name) \
4767 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4768# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4769 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4770# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4771 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4772
4773#endif
4774#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
4775
4776/**
4777 * Call an opcode decoder function.
4778 *
4779 * We're using macors for this so that adding and removing parameters can be
4780 * done as we please. See FNIEMOP_DEF.
4781 */
4782#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
4783
4784/**
4785 * Call a common opcode decoder function taking one extra argument.
4786 *
4787 * We're using macors for this so that adding and removing parameters can be
4788 * done as we please. See FNIEMOP_DEF_1.
4789 */
4790#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
4791
4792/**
4793 * Call a common opcode decoder function taking one extra argument.
4794 *
4795 * We're using macors for this so that adding and removing parameters can be
4796 * done as we please. See FNIEMOP_DEF_1.
4797 */
4798#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
4799/** @} */
4800
4801
4802/** @name Misc Helpers
4803 * @{ */
4804
4805/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
4806 * due to GCC lacking knowledge about the value range of a switch. */
4807#if RT_CPLUSPLUS_PREREQ(202000)
4808# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: [[unlikely]] AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4809#else
4810# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4811#endif
4812
4813/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
4814#if RT_CPLUSPLUS_PREREQ(202000)
4815# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: [[unlikely]] AssertFailedReturn(a_RetValue)
4816#else
4817# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
4818#endif
4819
4820/**
4821 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4822 * occation.
4823 */
4824#ifdef LOG_ENABLED
4825# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4826 do { \
4827 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
4828 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4829 } while (0)
4830#else
4831# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4832 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4833#endif
4834
4835/**
4836 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4837 * occation using the supplied logger statement.
4838 *
4839 * @param a_LoggerArgs What to log on failure.
4840 */
4841#ifdef LOG_ENABLED
4842# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4843 do { \
4844 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
4845 /*LogFunc(a_LoggerArgs);*/ \
4846 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4847 } while (0)
4848#else
4849# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4850 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4851#endif
4852
4853/**
4854 * Gets the CPU mode (from fExec) as a IEMMODE value.
4855 *
4856 * @returns IEMMODE
4857 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4858 */
4859#define IEM_GET_CPU_MODE(a_pVCpu) ((a_pVCpu)->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK)
4860
4861/**
4862 * Check if we're currently executing in real or virtual 8086 mode.
4863 *
4864 * @returns @c true if it is, @c false if not.
4865 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4866 */
4867#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (( ((a_pVCpu)->iem.s.fExec ^ IEM_F_MODE_X86_PROT_MASK) \
4868 & (IEM_F_MODE_X86_V86_MASK | IEM_F_MODE_X86_PROT_MASK)) != 0)
4869
4870/**
4871 * Check if we're currently executing in virtual 8086 mode.
4872 *
4873 * @returns @c true if it is, @c false if not.
4874 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4875 */
4876#define IEM_IS_V86_MODE(a_pVCpu) (((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_V86_MASK) != 0)
4877
4878/**
4879 * Check if we're currently executing in long mode.
4880 *
4881 * @returns @c true if it is, @c false if not.
4882 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4883 */
4884#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
4885
4886/**
4887 * Check if we're currently executing in a 16-bit code segment.
4888 *
4889 * @returns @c true if it is, @c false if not.
4890 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4891 */
4892#define IEM_IS_16BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_16BIT)
4893
4894/**
4895 * Check if we're currently executing in a 32-bit code segment.
4896 *
4897 * @returns @c true if it is, @c false if not.
4898 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4899 */
4900#define IEM_IS_32BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_32BIT)
4901
4902/**
4903 * Check if we're currently executing in a 64-bit code segment.
4904 *
4905 * @returns @c true if it is, @c false if not.
4906 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4907 */
4908#define IEM_IS_64BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_64BIT)
4909
4910/**
4911 * Check if we're currently executing in real mode.
4912 *
4913 * @returns @c true if it is, @c false if not.
4914 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4915 */
4916#define IEM_IS_REAL_MODE(a_pVCpu) (!((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_PROT_MASK))
4917
4918/**
4919 * Gets the current protection level (CPL).
4920 *
4921 * @returns 0..3
4922 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4923 */
4924#define IEM_GET_CPL(a_pVCpu) (((a_pVCpu)->iem.s.fExec >> IEM_F_X86_CPL_SHIFT) & IEM_F_X86_CPL_SMASK)
4925
4926/**
4927 * Sets the current protection level (CPL).
4928 *
4929 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4930 */
4931#define IEM_SET_CPL(a_pVCpu, a_uCpl) \
4932 do { (a_pVCpu)->iem.s.fExec = ((a_pVCpu)->iem.s.fExec & ~IEM_F_X86_CPL_MASK) | ((a_uCpl) << IEM_F_X86_CPL_SHIFT); } while (0)
4933
4934/**
4935 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
4936 * @returns PCCPUMFEATURES
4937 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4938 */
4939#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
4940
4941/**
4942 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
4943 * @returns PCCPUMFEATURES
4944 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4945 */
4946#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&g_CpumHostFeatures.s)
4947
4948/**
4949 * Evaluates to true if we're presenting an Intel CPU to the guest.
4950 */
4951#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
4952
4953/**
4954 * Evaluates to true if we're presenting an AMD CPU to the guest.
4955 */
4956#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
4957
4958/**
4959 * Check if the address is canonical.
4960 */
4961#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
4962
4963/** Checks if the ModR/M byte is in register mode or not. */
4964#define IEM_IS_MODRM_REG_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT) )
4965/** Checks if the ModR/M byte is in memory mode or not. */
4966#define IEM_IS_MODRM_MEM_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT) )
4967
4968/**
4969 * Gets the register (reg) part of a ModR/M encoding, with REX.R added in.
4970 *
4971 * For use during decoding.
4972 */
4973#define IEM_GET_MODRM_REG(a_pVCpu, a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | (a_pVCpu)->iem.s.uRexReg )
4974/**
4975 * Gets the r/m part of a ModR/M encoding as a register index, with REX.B added in.
4976 *
4977 * For use during decoding.
4978 */
4979#define IEM_GET_MODRM_RM(a_pVCpu, a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) | (a_pVCpu)->iem.s.uRexB )
4980
4981/**
4982 * Gets the register (reg) part of a ModR/M encoding, without REX.R.
4983 *
4984 * For use during decoding.
4985 */
4986#define IEM_GET_MODRM_REG_8(a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) )
4987/**
4988 * Gets the r/m part of a ModR/M encoding as a register index, without REX.B.
4989 *
4990 * For use during decoding.
4991 */
4992#define IEM_GET_MODRM_RM_8(a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) )
4993
4994/**
4995 * Gets the register (reg) part of a ModR/M encoding as an extended 8-bit
4996 * register index, with REX.R added in.
4997 *
4998 * For use during decoding.
4999 *
5000 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
5001 */
5002#define IEM_GET_MODRM_REG_EX8(a_pVCpu, a_bRm) \
5003 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
5004 || !((a_bRm) & (4 << X86_MODRM_REG_SHIFT)) /* IEM_GET_MODRM_REG(pVCpu, a_bRm) < 4 */ \
5005 ? IEM_GET_MODRM_REG(pVCpu, a_bRm) : (((a_bRm) >> X86_MODRM_REG_SHIFT) & 3) | 16)
5006/**
5007 * Gets the r/m part of a ModR/M encoding as an extended 8-bit register index,
5008 * with REX.B added in.
5009 *
5010 * For use during decoding.
5011 *
5012 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
5013 */
5014#define IEM_GET_MODRM_RM_EX8(a_pVCpu, a_bRm) \
5015 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
5016 || !((a_bRm) & 4) /* IEM_GET_MODRM_RM(pVCpu, a_bRm) < 4 */ \
5017 ? IEM_GET_MODRM_RM(pVCpu, a_bRm) : ((a_bRm) & 3) | 16)
5018
5019/**
5020 * Combines the prefix REX and ModR/M byte for passing to
5021 * iemOpHlpCalcRmEffAddrThreadedAddr64().
5022 *
5023 * @returns The ModRM byte but with bit 3 set to REX.B and bit 4 to REX.X.
5024 * The two bits are part of the REG sub-field, which isn't needed in
5025 * iemOpHlpCalcRmEffAddrThreadedAddr64().
5026 *
5027 * For use during decoding/recompiling.
5028 */
5029#define IEM_GET_MODRM_EX(a_pVCpu, a_bRm) \
5030 ( ((a_bRm) & ~X86_MODRM_REG_MASK) \
5031 | (uint8_t)( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X)) >> (25 - 3) ) )
5032AssertCompile(IEM_OP_PRF_REX_B == RT_BIT_32(25));
5033AssertCompile(IEM_OP_PRF_REX_X == RT_BIT_32(26));
5034
5035/**
5036 * Gets the effective VEX.VVVV value.
5037 *
5038 * The 4th bit is ignored if not 64-bit code.
5039 * @returns effective V-register value.
5040 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
5041 */
5042#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
5043 (IEM_IS_64BIT_CODE(a_pVCpu) ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
5044
5045
5046/**
5047 * Gets the register (reg) part of a the special 4th register byte used by
5048 * vblendvps and vblendvpd.
5049 *
5050 * For use during decoding.
5051 */
5052#define IEM_GET_IMM8_REG(a_pVCpu, a_bRegImm8) \
5053 (IEM_IS_64BIT_CODE(a_pVCpu) ? (a_bRegImm8) >> 4 : ((a_bRegImm8) >> 4) & 7)
5054
5055
5056/**
5057 * Checks if we're executing inside an AMD-V or VT-x guest.
5058 */
5059#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
5060# define IEM_IS_IN_GUEST(a_pVCpu) RT_BOOL((a_pVCpu)->iem.s.fExec & IEM_F_X86_CTX_IN_GUEST)
5061#else
5062# define IEM_IS_IN_GUEST(a_pVCpu) false
5063#endif
5064
5065
5066#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5067
5068/**
5069 * Check if the guest has entered VMX root operation.
5070 */
5071# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
5072
5073/**
5074 * Check if the guest has entered VMX non-root operation.
5075 */
5076# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) ( ((a_pVCpu)->iem.s.fExec & (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST)) \
5077 == (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST) )
5078
5079/**
5080 * Check if the nested-guest has the given Pin-based VM-execution control set.
5081 */
5082# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
5083
5084/**
5085 * Check if the nested-guest has the given Processor-based VM-execution control set.
5086 */
5087# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
5088
5089/**
5090 * Check if the nested-guest has the given Secondary Processor-based VM-execution
5091 * control set.
5092 */
5093# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
5094
5095/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
5096# define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
5097
5098/** Whether a shadow VMCS is present for the given VCPU. */
5099# define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
5100
5101/** Gets the VMXON region pointer. */
5102# define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
5103
5104/** Gets the guest-physical address of the current VMCS for the given VCPU. */
5105# define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
5106
5107/** Whether a current VMCS is present for the given VCPU. */
5108# define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
5109
5110/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
5111# define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
5112 do \
5113 { \
5114 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
5115 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
5116 } while (0)
5117
5118/** Clears any current VMCS for the given VCPU. */
5119# define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
5120 do \
5121 { \
5122 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
5123 } while (0)
5124
5125/**
5126 * Invokes the VMX VM-exit handler for an instruction intercept.
5127 */
5128# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
5129 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
5130
5131/**
5132 * Invokes the VMX VM-exit handler for an instruction intercept where the
5133 * instruction provides additional VM-exit information.
5134 */
5135# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
5136 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
5137
5138/**
5139 * Invokes the VMX VM-exit handler for a task switch.
5140 */
5141# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
5142 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
5143
5144/**
5145 * Invokes the VMX VM-exit handler for MWAIT.
5146 */
5147# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
5148 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
5149
5150/**
5151 * Invokes the VMX VM-exit handler for EPT faults.
5152 */
5153# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
5154 do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
5155
5156/**
5157 * Invokes the VMX VM-exit handler.
5158 */
5159# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
5160 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
5161
5162#else
5163# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
5164# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
5165# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
5166# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
5167# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
5168# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5169# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5170# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5171# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5172# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5173# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
5174
5175#endif
5176
5177#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5178/**
5179 * Checks if we're executing a guest using AMD-V.
5180 */
5181# define IEM_SVM_IS_IN_GUEST(a_pVCpu) ( (a_pVCpu->iem.s.fExec & (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST)) \
5182 == (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST))
5183/**
5184 * Check if an SVM control/instruction intercept is set.
5185 */
5186# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
5187 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
5188
5189/**
5190 * Check if an SVM read CRx intercept is set.
5191 */
5192# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
5193 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
5194
5195/**
5196 * Check if an SVM write CRx intercept is set.
5197 */
5198# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
5199 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
5200
5201/**
5202 * Check if an SVM read DRx intercept is set.
5203 */
5204# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
5205 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
5206
5207/**
5208 * Check if an SVM write DRx intercept is set.
5209 */
5210# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
5211 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
5212
5213/**
5214 * Check if an SVM exception intercept is set.
5215 */
5216# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
5217 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
5218
5219/**
5220 * Invokes the SVM \#VMEXIT handler for the nested-guest.
5221 */
5222# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
5223 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
5224
5225/**
5226 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
5227 * corresponding decode assist information.
5228 */
5229# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
5230 do \
5231 { \
5232 uint64_t uExitInfo1; \
5233 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
5234 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
5235 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
5236 else \
5237 uExitInfo1 = 0; \
5238 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
5239 } while (0)
5240
5241/** Check and handles SVM nested-guest instruction intercept and updates
5242 * NRIP if needed.
5243 */
5244# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
5245 do \
5246 { \
5247 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
5248 { \
5249 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
5250 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
5251 } \
5252 } while (0)
5253
5254/** Checks and handles SVM nested-guest CR0 read intercept. */
5255# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
5256 do \
5257 { \
5258 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
5259 { /* probably likely */ } \
5260 else \
5261 { \
5262 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
5263 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
5264 } \
5265 } while (0)
5266
5267/**
5268 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
5269 */
5270# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) \
5271 do { \
5272 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
5273 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_cbInstr)); \
5274 } while (0)
5275
5276#else
5277# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
5278# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
5279# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
5280# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
5281# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
5282# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
5283# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
5284# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
5285# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, \
5286 a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
5287# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
5288# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) do { } while (0)
5289
5290#endif
5291
5292/** @} */
5293
5294uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu);
5295VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu);
5296
5297
5298/**
5299 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
5300 */
5301typedef union IEMSELDESC
5302{
5303 /** The legacy view. */
5304 X86DESC Legacy;
5305 /** The long mode view. */
5306 X86DESC64 Long;
5307} IEMSELDESC;
5308/** Pointer to a selector descriptor table entry. */
5309typedef IEMSELDESC *PIEMSELDESC;
5310
5311/** @name Raising Exceptions.
5312 * @{ */
5313VBOXSTRICTRC iemTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, uint32_t uNextEip, uint32_t fFlags,
5314 uint16_t uErr, uint64_t uCr2, RTSEL SelTSS, PIEMSELDESC pNewDescTSS) RT_NOEXCEPT;
5315
5316VBOXSTRICTRC iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
5317 uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
5318#ifdef IEM_WITH_SETJMP
5319DECL_NO_RETURN(void) iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
5320 uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
5321#endif
5322VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
5323#ifdef IEM_WITH_SETJMP
5324DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5325#endif
5326VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
5327VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT;
5328VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
5329#ifdef IEM_WITH_SETJMP
5330DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5331#endif
5332VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT;
5333#ifdef IEM_WITH_SETJMP
5334DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5335#endif
5336VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5337VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT;
5338VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
5339VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5340/*VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;*/
5341VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5342VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5343VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5344VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5345VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5346VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
5347#ifdef IEM_WITH_SETJMP
5348DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5349#endif
5350VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
5351VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT;
5352VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
5353#ifdef IEM_WITH_SETJMP
5354DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
5355#endif
5356VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
5357#ifdef IEM_WITH_SETJMP
5358DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP;
5359#endif
5360VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
5361#ifdef IEM_WITH_SETJMP
5362DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
5363#endif
5364VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
5365#ifdef IEM_WITH_SETJMP
5366DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
5367#endif
5368VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
5369#ifdef IEM_WITH_SETJMP
5370DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5371#endif
5372VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
5373#ifdef IEM_WITH_SETJMP
5374DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5375#endif
5376VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
5377#ifdef IEM_WITH_SETJMP
5378DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5379#endif
5380
5381void iemLogSyscallRealModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
5382void iemLogSyscallProtModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
5383
5384IEM_CIMPL_DEF_0(iemCImplRaiseDivideError);
5385IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix);
5386IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode);
5387
5388/**
5389 * Macro for calling iemCImplRaiseDivideError().
5390 *
5391 * This is for things that will _always_ decode to an \#DE, taking the
5392 * recompiler into consideration and everything.
5393 *
5394 * @return Strict VBox status code.
5395 */
5396#define IEMOP_RAISE_DIVIDE_ERROR_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseDivideError)
5397
5398/**
5399 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5400 *
5401 * This is for things that will _always_ decode to an \#UD, taking the
5402 * recompiler into consideration and everything.
5403 *
5404 * @return Strict VBox status code.
5405 */
5406#define IEMOP_RAISE_INVALID_LOCK_PREFIX_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidLockPrefix)
5407
5408/**
5409 * Macro for calling iemCImplRaiseInvalidOpcode() for decode/static \#UDs.
5410 *
5411 * This is for things that will _always_ decode to an \#UD, taking the
5412 * recompiler into consideration and everything.
5413 *
5414 * @return Strict VBox status code.
5415 */
5416#define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidOpcode)
5417
5418/**
5419 * Macro for calling iemCImplRaiseInvalidOpcode() for runtime-style \#UDs.
5420 *
5421 * Using this macro means you've got _buggy_ _code_ and are doing things that
5422 * belongs exclusively in IEMAllCImpl.cpp during decoding.
5423 *
5424 * @return Strict VBox status code.
5425 * @see IEMOP_RAISE_INVALID_OPCODE_RET
5426 */
5427#define IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidOpcode)
5428
5429/** @} */
5430
5431/** @name Register Access.
5432 * @{ */
5433VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
5434 IEMMODE enmEffOpSize) RT_NOEXCEPT;
5435VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT;
5436VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5437 IEMMODE enmEffOpSize) RT_NOEXCEPT;
5438/** @} */
5439
5440/** @name FPU access and helpers.
5441 * @{ */
5442void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
5443void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5444void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
5445void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5446void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5447void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5448 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5449void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5450 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5451void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5452void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5453void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5454void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5455void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5456void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5457void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5458void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5459void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5460void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5461void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5462void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5463void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5464void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5465void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5466/** @} */
5467
5468/** @name SSE+AVX SIMD access and helpers.
5469 * @{ */
5470void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT;
5471/** @} */
5472
5473/** @name Memory access.
5474 * @{ */
5475
5476/** Report a \#GP instead of \#AC and do not restrict to ring-3 */
5477#define IEM_MEMMAP_F_ALIGN_GP RT_BIT_32(16)
5478/** SSE access that should report a \#GP instead of \#AC, unless MXCSR.MM=1
5479 * when it works like normal \#AC. Always used with IEM_MEMMAP_F_ALIGN_GP. */
5480#define IEM_MEMMAP_F_ALIGN_SSE RT_BIT_32(17)
5481/** If \#AC is applicable, raise it. Always used with IEM_MEMMAP_F_ALIGN_GP.
5482 * Users include FXSAVE & FXRSTOR. */
5483#define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18)
5484
5485VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5486 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
5487VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5488#ifndef IN_RING3
5489VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5490#endif
5491void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5492void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
5493VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
5494VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5495VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT;
5496
5497void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr);
5498void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr);
5499#ifdef IEM_WITH_CODE_TLB
5500void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP;
5501#else
5502VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT;
5503#endif
5504#ifdef IEM_WITH_SETJMP
5505uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5506uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5507uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5508uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5509#else
5510VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT;
5511VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5512VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5513VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5514VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5515VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5516VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5517VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5518VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5519VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5520VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5521#endif
5522
5523VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5524VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5525VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5526VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5527VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5528VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5529VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5530VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5531VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5532VBOXSTRICTRC iemMemFetchDataU128NoAc(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5533VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5534VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5535VBOXSTRICTRC iemMemFetchDataU256NoAc(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5536VBOXSTRICTRC iemMemFetchDataU256AlignedAvx(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5537VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
5538 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT;
5539#ifdef IEM_WITH_SETJMP
5540uint8_t iemMemFetchDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5541uint16_t iemMemFetchDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5542uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5543uint32_t iemMemFlatFetchDataU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5544uint64_t iemMemFetchDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5545uint64_t iemMemFetchDataU64AlignedU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5546void iemMemFetchDataR80SafeJmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5547void iemMemFetchDataD80SafeJmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5548void iemMemFetchDataU128SafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5549void iemMemFetchDataU128NoAcSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5550void iemMemFetchDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5551void iemMemFetchDataU256SafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5552void iemMemFetchDataU256NoAcSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5553void iemMemFetchDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5554# if 0 /* these are inlined now */
5555uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5556uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5557uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5558uint32_t iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5559uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5560uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5561void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5562void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5563void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5564void iemMemFetchDataU128NoAcJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5565void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5566void iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5567void iemMemFetchDataU256AlignedAvxJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5568# endif
5569void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5570#endif
5571
5572VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5573VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5574VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5575VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5576VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT;
5577
5578VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT;
5579VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT;
5580VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT;
5581VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT;
5582VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5583VBOXSTRICTRC iemMemStoreDataU128NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5584VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5585VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5586VBOXSTRICTRC iemMemStoreDataU256NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5587VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5588VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5589#ifdef IEM_WITH_SETJMP
5590void iemMemStoreDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5591void iemMemStoreDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5592void iemMemStoreDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5593void iemMemStoreDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5594void iemMemStoreDataU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5595void iemMemStoreDataU128NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U pu128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5596void iemMemStoreDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U pu128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5597void iemMemStoreDataU256SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5598void iemMemStoreDataU256NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5599void iemMemStoreDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5600void iemMemStoreDataR80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTFLOAT80U pr80Value) IEM_NOEXCEPT_MAY_LONGJMP;
5601void iemMemStoreDataD80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTPBCD80U pd80Value) IEM_NOEXCEPT_MAY_LONGJMP;
5602#if 0
5603void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5604void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5605void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5606void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5607void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5608void iemMemStoreDataNoAcU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5609void iemMemStoreDataU256NoAcJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5610void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5611#endif
5612void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5613void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5614#endif
5615
5616#ifdef IEM_WITH_SETJMP
5617uint8_t *iemMemMapDataU8RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5618uint8_t *iemMemMapDataU8AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5619uint8_t *iemMemMapDataU8WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5620uint8_t const *iemMemMapDataU8RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5621uint16_t *iemMemMapDataU16RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5622uint16_t *iemMemMapDataU16AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5623uint16_t *iemMemMapDataU16WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5624uint16_t const *iemMemMapDataU16RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5625uint32_t *iemMemMapDataU32RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5626uint32_t *iemMemMapDataU32AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5627uint32_t *iemMemMapDataU32WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5628uint32_t const *iemMemMapDataU32RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5629uint64_t *iemMemMapDataU64RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5630uint64_t *iemMemMapDataU64AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5631uint64_t *iemMemMapDataU64WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5632uint64_t const *iemMemMapDataU64RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5633PRTFLOAT80U iemMemMapDataR80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5634PRTFLOAT80U iemMemMapDataR80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5635PCRTFLOAT80U iemMemMapDataR80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5636PRTPBCD80U iemMemMapDataD80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5637PRTPBCD80U iemMemMapDataD80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5638PCRTPBCD80U iemMemMapDataD80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5639PRTUINT128U iemMemMapDataU128RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5640PRTUINT128U iemMemMapDataU128AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5641PRTUINT128U iemMemMapDataU128WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5642PCRTUINT128U iemMemMapDataU128RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5643
5644void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5645void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5646void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5647void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5648void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5649void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5650#endif
5651
5652VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5653 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
5654VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT;
5655VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
5656VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5657VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT;
5658VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5659VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5660VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5661VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5662VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5663 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
5664VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
5665 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT;
5666VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5667VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT;
5668VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT;
5669VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT;
5670VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5671VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5672VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5673
5674#ifdef IEM_WITH_SETJMP
5675void iemMemStackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5676void iemMemStackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5677void iemMemStackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5678void iemMemStackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5679void iemMemStackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5680void iemMemStackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5681void iemMemStackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5682
5683void iemMemFlat32StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5684void iemMemFlat32StackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5685void iemMemFlat32StackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5686void iemMemFlat32StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5687void iemMemFlat32StackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5688
5689void iemMemFlat64StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5690void iemMemFlat64StackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5691void iemMemFlat64StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5692void iemMemFlat64StackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5693
5694void iemMemStoreStackU16SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5695void iemMemStoreStackU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5696void iemMemStoreStackU32SRegSafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5697void iemMemStoreStackU64SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5698
5699uint16_t iemMemFetchStackU16SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5700uint32_t iemMemFetchStackU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5701uint64_t iemMemFetchStackU64SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5702
5703#endif
5704
5705/** @} */
5706
5707/** @name IEMAllCImpl.cpp
5708 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/'
5709 * @{ */
5710IEM_CIMPL_PROTO_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5711IEM_CIMPL_PROTO_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5712IEM_CIMPL_PROTO_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5713IEM_CIMPL_PROTO_0(iemCImpl_popa_16);
5714IEM_CIMPL_PROTO_0(iemCImpl_popa_32);
5715IEM_CIMPL_PROTO_0(iemCImpl_pusha_16);
5716IEM_CIMPL_PROTO_0(iemCImpl_pusha_32);
5717IEM_CIMPL_PROTO_1(iemCImpl_pushf, IEMMODE, enmEffOpSize);
5718IEM_CIMPL_PROTO_1(iemCImpl_popf, IEMMODE, enmEffOpSize);
5719IEM_CIMPL_PROTO_1(iemCImpl_call_16, uint16_t, uNewPC);
5720IEM_CIMPL_PROTO_1(iemCImpl_call_rel_16, int16_t, offDisp);
5721IEM_CIMPL_PROTO_1(iemCImpl_call_32, uint32_t, uNewPC);
5722IEM_CIMPL_PROTO_1(iemCImpl_call_rel_32, int32_t, offDisp);
5723IEM_CIMPL_PROTO_1(iemCImpl_call_64, uint64_t, uNewPC);
5724IEM_CIMPL_PROTO_1(iemCImpl_call_rel_64, int64_t, offDisp);
5725IEM_CIMPL_PROTO_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5726IEM_CIMPL_PROTO_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5727typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5728typedef FNIEMCIMPLFARBRANCH *PFNIEMCIMPLFARBRANCH;
5729IEM_CIMPL_PROTO_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop);
5730IEM_CIMPL_PROTO_0(iemCImpl_retn_16);
5731IEM_CIMPL_PROTO_0(iemCImpl_retn_32);
5732IEM_CIMPL_PROTO_0(iemCImpl_retn_64);
5733IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_16, uint16_t, cbPop);
5734IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_32, uint16_t, cbPop);
5735IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_64, uint16_t, cbPop);
5736IEM_CIMPL_PROTO_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters);
5737IEM_CIMPL_PROTO_1(iemCImpl_leave, IEMMODE, enmEffOpSize);
5738IEM_CIMPL_PROTO_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt);
5739IEM_CIMPL_PROTO_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize);
5740IEM_CIMPL_PROTO_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp);
5741IEM_CIMPL_PROTO_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize);
5742IEM_CIMPL_PROTO_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize);
5743IEM_CIMPL_PROTO_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize);
5744IEM_CIMPL_PROTO_1(iemCImpl_iret, IEMMODE, enmEffOpSize);
5745IEM_CIMPL_PROTO_0(iemCImpl_loadall286);
5746IEM_CIMPL_PROTO_0(iemCImpl_syscall);
5747IEM_CIMPL_PROTO_1(iemCImpl_sysret, IEMMODE, enmEffOpSize);
5748IEM_CIMPL_PROTO_0(iemCImpl_sysenter);
5749IEM_CIMPL_PROTO_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize);
5750IEM_CIMPL_PROTO_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel);
5751IEM_CIMPL_PROTO_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel);
5752IEM_CIMPL_PROTO_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize);
5753IEM_CIMPL_PROTO_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize);
5754IEM_CIMPL_PROTO_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite);
5755IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar);
5756IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar);
5757IEM_CIMPL_PROTO_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5758IEM_CIMPL_PROTO_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5759IEM_CIMPL_PROTO_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5760IEM_CIMPL_PROTO_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5761IEM_CIMPL_PROTO_1(iemCImpl_lldt, uint16_t, uNewLdt);
5762IEM_CIMPL_PROTO_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5763IEM_CIMPL_PROTO_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5764IEM_CIMPL_PROTO_1(iemCImpl_ltr, uint16_t, uNewTr);
5765IEM_CIMPL_PROTO_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5766IEM_CIMPL_PROTO_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5767IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg);
5768IEM_CIMPL_PROTO_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5769IEM_CIMPL_PROTO_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5770IEM_CIMPL_PROTO_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg);
5771IEM_CIMPL_PROTO_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg);
5772IEM_CIMPL_PROTO_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst);
5773IEM_CIMPL_PROTO_0(iemCImpl_clts);
5774IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg);
5775IEM_CIMPL_PROTO_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg);
5776IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg);
5777IEM_CIMPL_PROTO_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg);
5778IEM_CIMPL_PROTO_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage);
5779IEM_CIMPL_PROTO_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType);
5780IEM_CIMPL_PROTO_0(iemCImpl_invd);
5781IEM_CIMPL_PROTO_0(iemCImpl_wbinvd);
5782IEM_CIMPL_PROTO_0(iemCImpl_rsm);
5783IEM_CIMPL_PROTO_0(iemCImpl_rdtsc);
5784IEM_CIMPL_PROTO_0(iemCImpl_rdtscp);
5785IEM_CIMPL_PROTO_0(iemCImpl_rdpmc);
5786IEM_CIMPL_PROTO_0(iemCImpl_rdmsr);
5787IEM_CIMPL_PROTO_0(iemCImpl_wrmsr);
5788IEM_CIMPL_PROTO_3(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5789IEM_CIMPL_PROTO_2(iemCImpl_in_eAX_DX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5790IEM_CIMPL_PROTO_3(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5791IEM_CIMPL_PROTO_2(iemCImpl_out_DX_eAX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5792IEM_CIMPL_PROTO_0(iemCImpl_cli);
5793IEM_CIMPL_PROTO_0(iemCImpl_sti);
5794IEM_CIMPL_PROTO_0(iemCImpl_hlt);
5795IEM_CIMPL_PROTO_1(iemCImpl_monitor, uint8_t, iEffSeg);
5796IEM_CIMPL_PROTO_0(iemCImpl_mwait);
5797IEM_CIMPL_PROTO_0(iemCImpl_swapgs);
5798IEM_CIMPL_PROTO_0(iemCImpl_cpuid);
5799IEM_CIMPL_PROTO_1(iemCImpl_aad, uint8_t, bImm);
5800IEM_CIMPL_PROTO_1(iemCImpl_aam, uint8_t, bImm);
5801IEM_CIMPL_PROTO_0(iemCImpl_daa);
5802IEM_CIMPL_PROTO_0(iemCImpl_das);
5803IEM_CIMPL_PROTO_0(iemCImpl_aaa);
5804IEM_CIMPL_PROTO_0(iemCImpl_aas);
5805IEM_CIMPL_PROTO_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound);
5806IEM_CIMPL_PROTO_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound);
5807IEM_CIMPL_PROTO_0(iemCImpl_xgetbv);
5808IEM_CIMPL_PROTO_0(iemCImpl_xsetbv);
5809IEM_CIMPL_PROTO_5(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
5810 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags, uint8_t, bUnmapInfo);
5811IEM_CIMPL_PROTO_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5812IEM_CIMPL_PROTO_1(iemCImpl_finit, bool, fCheckXcpts);
5813IEM_CIMPL_PROTO_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5814IEM_CIMPL_PROTO_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5815IEM_CIMPL_PROTO_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5816IEM_CIMPL_PROTO_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5817IEM_CIMPL_PROTO_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5818IEM_CIMPL_PROTO_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5819IEM_CIMPL_PROTO_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5820IEM_CIMPL_PROTO_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5821IEM_CIMPL_PROTO_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5822IEM_CIMPL_PROTO_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5823IEM_CIMPL_PROTO_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5824IEM_CIMPL_PROTO_1(iemCImpl_fldcw, uint16_t, u16Fcw);
5825IEM_CIMPL_PROTO_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode);
5826IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode);
5827IEM_CIMPL_PROTO_2(iemCImpl_rdseed, uint8_t, iReg, IEMMODE, enmEffOpSize);
5828IEM_CIMPL_PROTO_2(iemCImpl_rdrand, uint8_t, iReg, IEMMODE, enmEffOpSize);
5829/** @} */
5830
5831/** @name IEMAllCImplStrInstr.cpp.h
5832 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/' -e 's/RT_CONCAT4(//' \
5833 * -e 's/,ADDR_SIZE)/64/g' -e 's/,OP_SIZE,/64/g' -e 's/,OP_rAX,/rax/g' IEMAllCImplStrInstr.cpp.h
5834 * @{ */
5835IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr16, uint8_t, iEffSeg);
5836IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr16, uint8_t, iEffSeg);
5837IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m16);
5838IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m16);
5839IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr16, uint8_t, iEffSeg);
5840IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m16);
5841IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m16, int8_t, iEffSeg);
5842IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr16, bool, fIoChecked);
5843IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr16, bool, fIoChecked);
5844IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5845IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5846
5847IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr16, uint8_t, iEffSeg);
5848IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr16, uint8_t, iEffSeg);
5849IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m16);
5850IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m16);
5851IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr16, uint8_t, iEffSeg);
5852IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m16);
5853IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m16, int8_t, iEffSeg);
5854IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr16, bool, fIoChecked);
5855IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr16, bool, fIoChecked);
5856IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5857IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5858
5859IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr16, uint8_t, iEffSeg);
5860IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr16, uint8_t, iEffSeg);
5861IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m16);
5862IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m16);
5863IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr16, uint8_t, iEffSeg);
5864IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m16);
5865IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m16, int8_t, iEffSeg);
5866IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr16, bool, fIoChecked);
5867IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr16, bool, fIoChecked);
5868IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5869IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5870
5871
5872IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr32, uint8_t, iEffSeg);
5873IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr32, uint8_t, iEffSeg);
5874IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m32);
5875IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m32);
5876IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr32, uint8_t, iEffSeg);
5877IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m32);
5878IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m32, int8_t, iEffSeg);
5879IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr32, bool, fIoChecked);
5880IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr32, bool, fIoChecked);
5881IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5882IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5883
5884IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr32, uint8_t, iEffSeg);
5885IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr32, uint8_t, iEffSeg);
5886IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m32);
5887IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m32);
5888IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr32, uint8_t, iEffSeg);
5889IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m32);
5890IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m32, int8_t, iEffSeg);
5891IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr32, bool, fIoChecked);
5892IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr32, bool, fIoChecked);
5893IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5894IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5895
5896IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr32, uint8_t, iEffSeg);
5897IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr32, uint8_t, iEffSeg);
5898IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m32);
5899IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m32);
5900IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr32, uint8_t, iEffSeg);
5901IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m32);
5902IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m32, int8_t, iEffSeg);
5903IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr32, bool, fIoChecked);
5904IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr32, bool, fIoChecked);
5905IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5906IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5907
5908IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr32, uint8_t, iEffSeg);
5909IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr32, uint8_t, iEffSeg);
5910IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m32);
5911IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m32);
5912IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr32, uint8_t, iEffSeg);
5913IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m32);
5914IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m32, int8_t, iEffSeg);
5915IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr32, bool, fIoChecked);
5916IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr32, bool, fIoChecked);
5917IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5918IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5919
5920
5921IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr64, uint8_t, iEffSeg);
5922IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr64, uint8_t, iEffSeg);
5923IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m64);
5924IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m64);
5925IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr64, uint8_t, iEffSeg);
5926IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m64);
5927IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m64, int8_t, iEffSeg);
5928IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr64, bool, fIoChecked);
5929IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr64, bool, fIoChecked);
5930IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5931IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5932
5933IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr64, uint8_t, iEffSeg);
5934IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr64, uint8_t, iEffSeg);
5935IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m64);
5936IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m64);
5937IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr64, uint8_t, iEffSeg);
5938IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m64);
5939IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m64, int8_t, iEffSeg);
5940IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr64, bool, fIoChecked);
5941IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr64, bool, fIoChecked);
5942IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5943IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5944
5945IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr64, uint8_t, iEffSeg);
5946IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr64, uint8_t, iEffSeg);
5947IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m64);
5948IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m64);
5949IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr64, uint8_t, iEffSeg);
5950IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m64);
5951IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m64, int8_t, iEffSeg);
5952IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr64, bool, fIoChecked);
5953IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr64, bool, fIoChecked);
5954IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5955IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5956
5957IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr64, uint8_t, iEffSeg);
5958IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr64, uint8_t, iEffSeg);
5959IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m64);
5960IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m64);
5961IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr64, uint8_t, iEffSeg);
5962IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m64);
5963IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m64, int8_t, iEffSeg);
5964IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr64, bool, fIoChecked);
5965IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr64, bool, fIoChecked);
5966IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5967IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5968/** @} */
5969
5970#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5971VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual) RT_NOEXCEPT;
5972VBOXSTRICTRC iemVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr) RT_NOEXCEPT;
5973VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr) RT_NOEXCEPT;
5974VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr) RT_NOEXCEPT;
5975VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr) RT_NOEXCEPT;
5976VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
5977VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT;
5978VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu) RT_NOEXCEPT;
5979VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPUCC pVCpu, bool fMonitorHwArmed, uint8_t cbInstr) RT_NOEXCEPT;
5980VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port,
5981 bool fImm, uint8_t cbAccess, uint8_t cbInstr) RT_NOEXCEPT;
5982VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess,
5983 bool fRep, VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr) RT_NOEXCEPT;
5984VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5985VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5986VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5987VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPUCC pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5988VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5989VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPUCC pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5990VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT;
5991VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPUCC pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw,
5992 RTGCPTR GCPtrEffDst, uint8_t cbInstr) RT_NOEXCEPT;
5993VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr) RT_NOEXCEPT;
5994VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPUCC pVCpu) RT_NOEXCEPT;
5995VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess, size_t cbAccess, uint32_t fAccess) RT_NOEXCEPT;
5996uint32_t iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg) RT_NOEXCEPT;
5997void iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg) RT_NOEXCEPT;
5998VBOXSTRICTRC iemVmxInvvpid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
5999 uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT;
6000bool iemVmxIsRdmsrWrmsrInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr) RT_NOEXCEPT;
6001IEM_CIMPL_PROTO_0(iemCImpl_vmxoff);
6002IEM_CIMPL_PROTO_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon);
6003IEM_CIMPL_PROTO_0(iemCImpl_vmlaunch);
6004IEM_CIMPL_PROTO_0(iemCImpl_vmresume);
6005IEM_CIMPL_PROTO_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
6006IEM_CIMPL_PROTO_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
6007IEM_CIMPL_PROTO_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
6008IEM_CIMPL_PROTO_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64VmcsField);
6009IEM_CIMPL_PROTO_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64VmcsField);
6010IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg64, uint64_t *, pu64Dst, uint64_t, u64VmcsField);
6011IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg32, uint64_t *, pu32Dst, uint32_t, u32VmcsField);
6012IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64VmcsField);
6013IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32VmcsField);
6014IEM_CIMPL_PROTO_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType);
6015IEM_CIMPL_PROTO_3(iemCImpl_invept, uint8_t, iEffSeg, RTGCPTR, GCPtrInveptDesc, uint64_t, uInveptType);
6016IEM_CIMPL_PROTO_0(iemCImpl_vmx_pause);
6017#endif
6018
6019#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6020VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) RT_NOEXCEPT;
6021VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2) RT_NOEXCEPT;
6022VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPUCC pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
6023 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) RT_NOEXCEPT;
6024VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPUCC pVCpu, uint32_t idMsr, bool fWrite, uint8_t cbInstr) RT_NOEXCEPT;
6025IEM_CIMPL_PROTO_0(iemCImpl_vmrun);
6026IEM_CIMPL_PROTO_0(iemCImpl_vmload);
6027IEM_CIMPL_PROTO_0(iemCImpl_vmsave);
6028IEM_CIMPL_PROTO_0(iemCImpl_clgi);
6029IEM_CIMPL_PROTO_0(iemCImpl_stgi);
6030IEM_CIMPL_PROTO_0(iemCImpl_invlpga);
6031IEM_CIMPL_PROTO_0(iemCImpl_skinit);
6032IEM_CIMPL_PROTO_0(iemCImpl_svm_pause);
6033#endif
6034
6035IEM_CIMPL_PROTO_0(iemCImpl_vmcall); /* vmx */
6036IEM_CIMPL_PROTO_0(iemCImpl_vmmcall); /* svm */
6037IEM_CIMPL_PROTO_1(iemCImpl_Hypercall, uint16_t, uDisOpcode); /* both */
6038
6039extern const PFNIEMOP g_apfnIemInterpretOnlyOneByteMap[256];
6040extern const PFNIEMOP g_apfnIemInterpretOnlyTwoByteMap[1024];
6041extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f3a[1024];
6042extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f38[1024];
6043extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap1[1024];
6044extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap2[1024];
6045extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap3[1024];
6046
6047/*
6048 * Recompiler related stuff.
6049 */
6050extern const PFNIEMOP g_apfnIemThreadedRecompilerOneByteMap[256];
6051extern const PFNIEMOP g_apfnIemThreadedRecompilerTwoByteMap[1024];
6052extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f3a[1024];
6053extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f38[1024];
6054extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap1[1024];
6055extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap2[1024];
6056extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap3[1024];
6057
6058DECLCALLBACK(int) iemTbInit(PVMCC pVM, uint32_t cInitialTbs, uint32_t cMaxTbs,
6059 uint64_t cbInitialExec, uint64_t cbMaxExec, uint32_t cbChunkExec);
6060void iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb, bool fSafeToFree);
6061DECLHIDDEN(void) iemTbAllocatorFree(PVMCPUCC pVCpu, PIEMTB pTb);
6062void iemTbAllocatorProcessDelayedFrees(PVMCPUCC pVCpu, PIEMTBALLOCATOR pTbAllocator);
6063void iemTbAllocatorFreeupNativeSpace(PVMCPUCC pVCpu, uint32_t cNeededInstrs);
6064DECLHIDDEN(const char *) iemTbFlagsToString(uint32_t fFlags, char *pszBuf, size_t cbBuf) RT_NOEXCEPT;
6065DECLHIDDEN(void) iemThreadedDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT;
6066
6067
6068/** @todo FNIEMTHREADEDFUNC and friends may need more work... */
6069#if defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
6070typedef VBOXSTRICTRC /*__attribute__((__nothrow__))*/ FNIEMTHREADEDFUNC(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
6071typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
6072# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
6073 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
6074# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
6075 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
6076
6077#else
6078typedef VBOXSTRICTRC (FNIEMTHREADEDFUNC)(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
6079typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
6080# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
6081 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
6082# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
6083 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
6084#endif
6085
6086
6087IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_Nop);
6088IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_LogCpuState);
6089
6090IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_DeferToCImpl0);
6091
6092IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckIrq);
6093IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckMode);
6094IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckHwInstrBps);
6095IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLim);
6096
6097IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes);
6098IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodes);
6099IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim);
6100
6101/* Branching: */
6102IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes);
6103IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodes);
6104IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim);
6105
6106IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb);
6107IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb);
6108IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim);
6109
6110/* Natural page crossing: */
6111IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb);
6112IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb);
6113IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim);
6114
6115IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb);
6116IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb);
6117IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim);
6118
6119IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb);
6120IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb);
6121IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim);
6122
6123bool iemThreadedCompileEmitIrqCheckBefore(PVMCPUCC pVCpu, PIEMTB pTb);
6124bool iemThreadedCompileBeginEmitCallsComplications(PVMCPUCC pVCpu, PIEMTB pTb);
6125
6126/* Native recompiler public bits: */
6127DECLHIDDEN(PIEMTB) iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT;
6128DECLHIDDEN(void) iemNativeDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT;
6129int iemExecMemAllocatorInit(PVMCPU pVCpu, uint64_t cbMax, uint64_t cbInitial, uint32_t cbChunk) RT_NOEXCEPT;
6130DECLHIDDEN(void *) iemExecMemAllocatorAlloc(PVMCPU pVCpu, uint32_t cbReq, PIEMTB pTb) RT_NOEXCEPT;
6131DECLHIDDEN(void) iemExecMemAllocatorReadyForUse(PVMCPUCC pVCpu, void *pv, size_t cb) RT_NOEXCEPT;
6132void iemExecMemAllocatorFree(PVMCPU pVCpu, void *pv, size_t cb) RT_NOEXCEPT;
6133DECLASM(DECL_NO_RETURN(void)) iemNativeTbLongJmp(void *pvFramePointer, int rc) RT_NOEXCEPT;
6134
6135
6136/** @} */
6137
6138RT_C_DECLS_END
6139
6140#endif /* !VMM_INCLUDED_SRC_include_IEMInternal_h */
6141
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette