VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInternal.h@ 104858

Last change on this file since 104858 was 104858, checked in by vboxsync, 6 months ago

VMM/IEM: Optimize executable memory allocation on macOS by removing the need for calling RTMemProtect() to switch between RW and RX memory, bugref:10555.

On macOS it is impossible to have memory allocated RWX which is the reason for the current RTMemProtect() trickery which induces additional overhead. However the mach
VM API allows remapping the physical memory backed by a virtual address into another region which can have different protection flags. This allows having a virtual
memory region with readable/writeable permissions and a second region with readable/executable permissions both backed by the same physical memory.
A profiling build before this optimization took 76 ticks on average (taken before any memory pruning started to take place because the maximum amount of executable
memory was reached) when allocating executable memory, which translates to 3166.7ns given the 24MHz frequency of CNTVCT_EL0 used as the time source.
With the optimization in place the average is now 15 ticks, or 625ns.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 324.2 KB
Line 
1/* $Id: IEMInternal.h 104858 2024-06-05 18:10:20Z vboxsync $ */
2/** @file
3 * IEM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInternal_h
29#define VMM_INCLUDED_SRC_include_IEMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#ifndef RT_IN_ASSEMBLER
35# include <VBox/vmm/cpum.h>
36# include <VBox/vmm/iem.h>
37# include <VBox/vmm/pgm.h>
38# include <VBox/vmm/stam.h>
39# include <VBox/param.h>
40
41# include <iprt/setjmp-without-sigmask.h>
42# include <iprt/list.h>
43#endif /* !RT_IN_ASSEMBLER */
44
45
46RT_C_DECLS_BEGIN
47
48
49/** @defgroup grp_iem_int Internals
50 * @ingroup grp_iem
51 * @internal
52 * @{
53 */
54
55/* Make doxygen happy w/o overcomplicating the #if checks. */
56#ifdef DOXYGEN_RUNNING
57# define IEM_WITH_THROW_CATCH
58# define VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
59#endif
60
61/** For expanding symbol in slickedit and other products tagging and
62 * crossreferencing IEM symbols. */
63#ifndef IEM_STATIC
64# define IEM_STATIC static
65#endif
66
67/** @def IEM_WITH_SETJMP
68 * Enables alternative status code handling using setjmps.
69 *
70 * This adds a bit of expense via the setjmp() call since it saves all the
71 * non-volatile registers. However, it eliminates return code checks and allows
72 * for more optimal return value passing (return regs instead of stack buffer).
73 */
74#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
75# define IEM_WITH_SETJMP
76#endif
77
78/** @def IEM_WITH_THROW_CATCH
79 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
80 * mode code when IEM_WITH_SETJMP is in effect.
81 *
82 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
83 * setjmp/long resulted in bs2-test-1 running 3.00% faster and all but on test
84 * result value improving by more than 1%. (Best out of three.)
85 *
86 * With Visual C++ 2019 and code TLB on windows, using throw/catch instead of
87 * setjmp/long resulted in bs2-test-1 running 3.68% faster and all but some of
88 * the MMIO and CPUID tests ran noticeably faster. Variation is greater than on
89 * Linux, but it should be quite a bit faster for normal code.
90 */
91#if defined(__cplusplus) && defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER)) /* ASM-NOINC-START */
92# define IEM_WITH_THROW_CATCH
93#endif /*ASM-NOINC-END*/
94
95/** @def IEMNATIVE_WITH_DELAYED_PC_UPDATING
96 * Enables the delayed PC updating optimization (see @bugref{10373}).
97 */
98#if defined(DOXYGEN_RUNNING) || 1
99# define IEMNATIVE_WITH_DELAYED_PC_UPDATING
100#endif
101
102/** Enables the SIMD register allocator @bugref{10614}. */
103#if defined(DOXYGEN_RUNNING) || 1
104# define IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
105#endif
106/** Enables access to even callee saved registers. */
107//# define IEMNATIVE_WITH_SIMD_REG_ACCESS_ALL_REGISTERS
108
109#if defined(DOXYGEN_RUNNING) || 1
110/** @def IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
111 * Delay the writeback or dirty registers as long as possible. */
112# define IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
113#endif
114
115/** @def VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
116 * Enables a quicker alternative to throw/longjmp for IEM_DO_LONGJMP when
117 * executing native translation blocks.
118 *
119 * This exploits the fact that we save all non-volatile registers in the TB
120 * prologue and thus just need to do the same as the TB epilogue to get the
121 * effect of a longjmp/throw. Since MSC marks XMM6 thru XMM15 as
122 * non-volatile (and does something even more crazy for ARM), this probably
123 * won't work reliably on Windows. */
124#ifdef RT_ARCH_ARM64
125# ifndef RT_OS_WINDOWS
126# define VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
127# endif
128#endif
129/* ASM-NOINC-START */
130#ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
131# if !defined(IN_RING3) \
132 || !defined(VBOX_WITH_IEM_RECOMPILER) \
133 || !defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
134# undef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
135# elif defined(RT_OS_WINDOWS)
136# pragma message("VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is not safe to use on windows")
137# endif
138#endif
139
140
141/** @def IEM_DO_LONGJMP
142 *
143 * Wrapper around longjmp / throw.
144 *
145 * @param a_pVCpu The CPU handle.
146 * @param a_rc The status code jump back with / throw.
147 */
148#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
149# ifdef IEM_WITH_THROW_CATCH
150# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
151# define IEM_DO_LONGJMP(a_pVCpu, a_rc) do { \
152 if ((a_pVCpu)->iem.s.pvTbFramePointerR3) \
153 iemNativeTbLongJmp((a_pVCpu)->iem.s.pvTbFramePointerR3, (a_rc)); \
154 throw int(a_rc); \
155 } while (0)
156# else
157# define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
158# endif
159# else
160# define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
161# endif
162#endif
163
164/** For use with IEM function that may do a longjmp (when enabled).
165 *
166 * Visual C++ has trouble longjmp'ing from/over functions with the noexcept
167 * attribute. So, we indicate that function that may be part of a longjmp may
168 * throw "exceptions" and that the compiler should definitely not generate and
169 * std::terminate calling unwind code.
170 *
171 * Here is one example of this ending in std::terminate:
172 * @code{.txt}
17300 00000041`cadfda10 00007ffc`5d5a1f9f ucrtbase!abort+0x4e
17401 00000041`cadfda40 00007ffc`57af229a ucrtbase!terminate+0x1f
17502 00000041`cadfda70 00007ffb`eec91030 VCRUNTIME140!__std_terminate+0xa [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\ehhelpers.cpp @ 192]
17603 00000041`cadfdaa0 00007ffb`eec92c6d VCRUNTIME140_1!_CallSettingFrame+0x20 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\handlers.asm @ 50]
17704 00000041`cadfdad0 00007ffb`eec93ae5 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToState+0x241 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 1085]
17805 00000041`cadfdc00 00007ffb`eec92258 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToEmptyState+0x2d [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 218]
17906 00000041`cadfdc30 00007ffb`eec940e9 VCRUNTIME140_1!__InternalCxxFrameHandler<__FrameHandler4>+0x194 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 304]
18007 00000041`cadfdcd0 00007ffc`5f9f249f VCRUNTIME140_1!__CxxFrameHandler4+0xa9 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 290]
18108 00000041`cadfdd40 00007ffc`5f980939 ntdll!RtlpExecuteHandlerForUnwind+0xf
18209 00000041`cadfdd70 00007ffc`5f9a0edd ntdll!RtlUnwindEx+0x339
1830a 00000041`cadfe490 00007ffc`57aff976 ntdll!RtlUnwind+0xcd
1840b 00000041`cadfea00 00007ffb`e1b5de01 VCRUNTIME140!__longjmp_internal+0xe6 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\longjmp.asm @ 140]
1850c (Inline Function) --------`-------- VBoxVMM!iemOpcodeGetNextU8SlowJmp+0x95 [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 1155]
1860d 00000041`cadfea50 00007ffb`e1b60f6b VBoxVMM!iemOpcodeGetNextU8Jmp+0xc1 [L:\vbox-intern\src\VBox\VMM\include\IEMInline.h @ 402]
1870e 00000041`cadfea90 00007ffb`e1cc6201 VBoxVMM!IEMExecForExits+0xdb [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 10185]
1880f 00000041`cadfec70 00007ffb`e1d0df8d VBoxVMM!EMHistoryExec+0x4f1 [L:\vbox-intern\src\VBox\VMM\VMMAll\EMAll.cpp @ 452]
18910 00000041`cadfed60 00007ffb`e1d0d4c0 VBoxVMM!nemR3WinHandleExitCpuId+0x79d [L:\vbox-intern\src\VBox\VMM\VMMAll\NEMAllNativeTemplate-win.cpp.h @ 1829] @encode
190 @endcode
191 *
192 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
193 */
194#if defined(IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))
195# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false)
196#else
197# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT
198#endif
199/* ASM-NOINC-END */
200
201#define IEM_IMPLEMENTS_TASKSWITCH
202
203/** @def IEM_WITH_3DNOW
204 * Includes the 3DNow decoding. */
205#if !defined(IEM_WITH_3DNOW) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
206# ifndef IEM_WITHOUT_3DNOW
207# define IEM_WITH_3DNOW
208# endif
209#endif
210
211/** @def IEM_WITH_THREE_0F_38
212 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
213#if !defined(IEM_WITH_THREE_0F_38) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
214# ifndef IEM_WITHOUT_THREE_0F_38
215# define IEM_WITH_THREE_0F_38
216# endif
217#endif
218
219/** @def IEM_WITH_THREE_0F_3A
220 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
221#if !defined(IEM_WITH_THREE_0F_3A) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
222# ifndef IEM_WITHOUT_THREE_0F_3A
223# define IEM_WITH_THREE_0F_3A
224# endif
225#endif
226
227/** @def IEM_WITH_VEX
228 * Includes the VEX decoding. */
229#if !defined(IEM_WITH_VEX) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
230# ifndef IEM_WITHOUT_VEX
231# define IEM_WITH_VEX
232# endif
233#endif
234
235/** @def IEM_CFG_TARGET_CPU
236 * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
237 *
238 * By default we allow this to be configured by the user via the
239 * CPUM/GuestCpuName config string, but this comes at a slight cost during
240 * decoding. So, for applications of this code where there is no need to
241 * be dynamic wrt target CPU, just modify this define.
242 */
243#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
244# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
245#endif
246
247//#define IEM_WITH_CODE_TLB // - work in progress
248//#define IEM_WITH_DATA_TLB // - work in progress
249
250
251/** @def IEM_USE_UNALIGNED_DATA_ACCESS
252 * Use unaligned accesses instead of elaborate byte assembly. */
253#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING) /*ASM-NOINC*/
254# define IEM_USE_UNALIGNED_DATA_ACCESS
255#endif /*ASM-NOINC*/
256
257//#define IEM_LOG_MEMORY_WRITES
258
259
260
261#ifndef RT_IN_ASSEMBLER /* ASM-NOINC-START - the rest of the file */
262
263# if !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
264/** Instruction statistics. */
265typedef struct IEMINSTRSTATS
266{
267# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
268# include "IEMInstructionStatisticsTmpl.h"
269# undef IEM_DO_INSTR_STAT
270} IEMINSTRSTATS;
271#else
272struct IEMINSTRSTATS;
273typedef struct IEMINSTRSTATS IEMINSTRSTATS;
274#endif
275/** Pointer to IEM instruction statistics. */
276typedef IEMINSTRSTATS *PIEMINSTRSTATS;
277
278
279/** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour
280 * @{ */
281#define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 0 /**< Native x86 EFLAGS result; Intel EFLAGS when on non-x86 hosts. */
282#define IEMTARGETCPU_EFL_BEHAVIOR_INTEL 1 /**< Intel EFLAGS result. */
283#define IEMTARGETCPU_EFL_BEHAVIOR_AMD 2 /**< AMD EFLAGS result */
284#define IEMTARGETCPU_EFL_BEHAVIOR_RESERVED 3 /**< Reserved/dummy entry slot that's the same as 0. */
285#define IEMTARGETCPU_EFL_BEHAVIOR_MASK 3 /**< For masking the index before use. */
286/** Selects the right variant from a_aArray.
287 * pVCpu is implicit in the caller context. */
288#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \
289 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
290/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can
291 * be used because the host CPU does not support the operation. */
292#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \
293 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
294/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional
295 * array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index
296 * into the two.
297 * @sa IEM_SELECT_NATIVE_OR_FALLBACK */
298#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
299# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
300 (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
301#else
302# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
303 (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
304#endif
305/** @} */
306
307/**
308 * Picks @a a_pfnNative or @a a_pfnFallback according to the host CPU feature
309 * indicator given by @a a_fCpumFeatureMember (CPUMFEATURES member).
310 *
311 * On non-x86 hosts, this will shortcut to the fallback w/o checking the
312 * indicator.
313 *
314 * @sa IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX
315 */
316#if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
317# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) \
318 (g_CpumHostFeatures.s.a_fCpumFeatureMember ? a_pfnNative : a_pfnFallback)
319#else
320# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) (a_pfnFallback)
321#endif
322
323
324/**
325 * Branch types.
326 */
327typedef enum IEMBRANCH
328{
329 IEMBRANCH_JUMP = 1,
330 IEMBRANCH_CALL,
331 IEMBRANCH_TRAP,
332 IEMBRANCH_SOFTWARE_INT,
333 IEMBRANCH_HARDWARE_INT
334} IEMBRANCH;
335AssertCompileSize(IEMBRANCH, 4);
336
337
338/**
339 * INT instruction types.
340 */
341typedef enum IEMINT
342{
343 /** INT n instruction (opcode 0xcd imm). */
344 IEMINT_INTN = 0,
345 /** Single byte INT3 instruction (opcode 0xcc). */
346 IEMINT_INT3 = IEM_XCPT_FLAGS_BP_INSTR,
347 /** Single byte INTO instruction (opcode 0xce). */
348 IEMINT_INTO = IEM_XCPT_FLAGS_OF_INSTR,
349 /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
350 IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
351} IEMINT;
352AssertCompileSize(IEMINT, 4);
353
354
355/**
356 * A FPU result.
357 */
358typedef struct IEMFPURESULT
359{
360 /** The output value. */
361 RTFLOAT80U r80Result;
362 /** The output status. */
363 uint16_t FSW;
364} IEMFPURESULT;
365AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
366/** Pointer to a FPU result. */
367typedef IEMFPURESULT *PIEMFPURESULT;
368/** Pointer to a const FPU result. */
369typedef IEMFPURESULT const *PCIEMFPURESULT;
370
371
372/**
373 * A FPU result consisting of two output values and FSW.
374 */
375typedef struct IEMFPURESULTTWO
376{
377 /** The first output value. */
378 RTFLOAT80U r80Result1;
379 /** The output status. */
380 uint16_t FSW;
381 /** The second output value. */
382 RTFLOAT80U r80Result2;
383} IEMFPURESULTTWO;
384AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
385AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
386/** Pointer to a FPU result consisting of two output values and FSW. */
387typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
388/** Pointer to a const FPU result consisting of two output values and FSW. */
389typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
390
391
392/**
393 * IEM TLB entry.
394 *
395 * Lookup assembly:
396 * @code{.asm}
397 ; Calculate tag.
398 mov rax, [VA]
399 shl rax, 16
400 shr rax, 16 + X86_PAGE_SHIFT
401 or rax, [uTlbRevision]
402
403 ; Do indexing.
404 movzx ecx, al
405 lea rcx, [pTlbEntries + rcx]
406
407 ; Check tag.
408 cmp [rcx + IEMTLBENTRY.uTag], rax
409 jne .TlbMiss
410
411 ; Check access.
412 mov rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
413 and rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
414 cmp rax, [uTlbPhysRev]
415 jne .TlbMiss
416
417 ; Calc address and we're done.
418 mov eax, X86_PAGE_OFFSET_MASK
419 and eax, [VA]
420 or rax, [rcx + IEMTLBENTRY.pMappingR3]
421 %ifdef VBOX_WITH_STATISTICS
422 inc qword [cTlbHits]
423 %endif
424 jmp .Done
425
426 .TlbMiss:
427 mov r8d, ACCESS_FLAGS
428 mov rdx, [VA]
429 mov rcx, [pVCpu]
430 call iemTlbTypeMiss
431 .Done:
432
433 @endcode
434 *
435 */
436typedef struct IEMTLBENTRY
437{
438 /** The TLB entry tag.
439 * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
440 * is ASSUMING a virtual address width of 48 bits.
441 *
442 * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
443 *
444 * The TLB lookup code uses the current TLB revision, which won't ever be zero,
445 * enabling an extremely cheap TLB invalidation most of the time. When the TLB
446 * revision wraps around though, the tags needs to be zeroed.
447 *
448 * @note Try use SHRD instruction? After seeing
449 * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
450 *
451 * @todo This will need to be reorganized for 57-bit wide virtual address and
452 * PCID (currently 12 bits) and ASID (currently 6 bits) support. We'll
453 * have to move the TLB entry versioning entirely to the
454 * fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
455 * 19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
456 * consumed by PCID and ASID (12 + 6 = 18).
457 */
458 uint64_t uTag;
459 /** Access flags and physical TLB revision.
460 *
461 * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
462 * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
463 * - Bit 2 - page tables - not user (complemented X86_PTE_US).
464 * - Bit 3 - pgm phys/virt - not directly writable.
465 * - Bit 4 - pgm phys page - not directly readable.
466 * - Bit 5 - page tables - not accessed (complemented X86_PTE_A).
467 * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
468 * - Bit 7 - tlb entry - pMappingR3 member not valid.
469 * - Bits 63 thru 8 are used for the physical TLB revision number.
470 *
471 * We're using complemented bit meanings here because it makes it easy to check
472 * whether special action is required. For instance a user mode write access
473 * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
474 * non-zero result would mean special handling needed because either it wasn't
475 * writable, or it wasn't user, or the page wasn't dirty. A user mode read
476 * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
477 * need to check any PTE flag.
478 */
479 uint64_t fFlagsAndPhysRev;
480 /** The guest physical page address. */
481 uint64_t GCPhys;
482 /** Pointer to the ring-3 mapping. */
483 R3PTRTYPE(uint8_t *) pbMappingR3;
484#if HC_ARCH_BITS == 32
485 uint32_t u32Padding1;
486#endif
487} IEMTLBENTRY;
488AssertCompileSize(IEMTLBENTRY, 32);
489/** Pointer to an IEM TLB entry. */
490typedef IEMTLBENTRY *PIEMTLBENTRY;
491
492/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
493 * @{ */
494#define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
495#define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
496#define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
497#define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
498#define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
499#define IEMTLBE_F_PT_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */
500#define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
501#define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(7) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
502#define IEMTLBE_F_PG_UNASSIGNED RT_BIT_64(8) /**< Phys page: Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
503#define IEMTLBE_F_PG_CODE_PAGE RT_BIT_64(9) /**< Phys page: Code page. */
504#define IEMTLBE_F_PHYS_REV UINT64_C(0xfffffffffffffc00) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
505/** @} */
506
507
508/**
509 * An IEM TLB.
510 *
511 * We've got two of these, one for data and one for instructions.
512 */
513typedef struct IEMTLB
514{
515 /** The TLB revision.
516 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
517 * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
518 * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
519 * (The revision zero indicates an invalid TLB entry.)
520 *
521 * The initial value is choosen to cause an early wraparound. */
522 uint64_t uTlbRevision;
523 /** The TLB physical address revision - shadow of PGM variable.
524 *
525 * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
526 * incremented by adding RT_BIT_64(8). When it wraps around and becomes zero,
527 * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
528 * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
529 *
530 * The initial value is choosen to cause an early wraparound. */
531 uint64_t volatile uTlbPhysRev;
532
533 /* Statistics: */
534
535 /** TLB hits (VBOX_WITH_STATISTICS only). */
536 uint64_t cTlbHits;
537 /** TLB misses. */
538 uint32_t cTlbMisses;
539 /** Slow read path. */
540 uint32_t cTlbSlowReadPath;
541 /** Safe read path. */
542 uint32_t cTlbSafeReadPath;
543 /** Safe write path. */
544 uint32_t cTlbSafeWritePath;
545#if 0
546 /** TLB misses because of tag mismatch. */
547 uint32_t cTlbMissesTag;
548 /** TLB misses because of virtual access violation. */
549 uint32_t cTlbMissesVirtAccess;
550 /** TLB misses because of dirty bit. */
551 uint32_t cTlbMissesDirty;
552 /** TLB misses because of MMIO */
553 uint32_t cTlbMissesMmio;
554 /** TLB misses because of write access handlers. */
555 uint32_t cTlbMissesWriteHandler;
556 /** TLB misses because no r3(/r0) mapping. */
557 uint32_t cTlbMissesMapping;
558#endif
559 /** Alignment padding. */
560 uint32_t au32Padding[6];
561
562 /** The TLB entries.
563 * We've choosen 256 because that way we can obtain the result directly from a
564 * 8-bit register without an additional AND instruction. */
565 IEMTLBENTRY aEntries[256];
566} IEMTLB;
567AssertCompileSizeAlignment(IEMTLB, 64);
568/** IEMTLB::uTlbRevision increment. */
569#define IEMTLB_REVISION_INCR RT_BIT_64(36)
570/** IEMTLB::uTlbRevision mask. */
571#define IEMTLB_REVISION_MASK (~(RT_BIT_64(36) - 1))
572/** IEMTLB::uTlbPhysRev increment.
573 * @sa IEMTLBE_F_PHYS_REV */
574#define IEMTLB_PHYS_REV_INCR RT_BIT_64(10)
575/**
576 * Calculates the TLB tag for a virtual address.
577 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
578 * @param a_pTlb The TLB.
579 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
580 * the clearing of the top 16 bits won't work (if 32-bit
581 * we'll end up with mostly zeros).
582 */
583#define IEMTLB_CALC_TAG(a_pTlb, a_GCPtr) ( IEMTLB_CALC_TAG_NO_REV(a_GCPtr) | (a_pTlb)->uTlbRevision )
584/**
585 * Calculates the TLB tag for a virtual address but without TLB revision.
586 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
587 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
588 * the clearing of the top 16 bits won't work (if 32-bit
589 * we'll end up with mostly zeros).
590 */
591#define IEMTLB_CALC_TAG_NO_REV(a_GCPtr) ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
592/**
593 * Converts a TLB tag value into a TLB index.
594 * @returns Index into IEMTLB::aEntries.
595 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
596 */
597#define IEMTLB_TAG_TO_INDEX(a_uTag) ( (uint8_t)(a_uTag) )
598/**
599 * Converts a TLB tag value into a TLB index.
600 * @returns Index into IEMTLB::aEntries.
601 * @param a_pTlb The TLB.
602 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
603 */
604#define IEMTLB_TAG_TO_ENTRY(a_pTlb, a_uTag) ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_INDEX(a_uTag)] )
605
606
607/** @name IEM_MC_F_XXX - MC block flags/clues.
608 * @todo Merge with IEM_CIMPL_F_XXX
609 * @{ */
610#define IEM_MC_F_ONLY_8086 RT_BIT_32(0)
611#define IEM_MC_F_MIN_186 RT_BIT_32(1)
612#define IEM_MC_F_MIN_286 RT_BIT_32(2)
613#define IEM_MC_F_NOT_286_OR_OLDER IEM_MC_F_MIN_386
614#define IEM_MC_F_MIN_386 RT_BIT_32(3)
615#define IEM_MC_F_MIN_486 RT_BIT_32(4)
616#define IEM_MC_F_MIN_PENTIUM RT_BIT_32(5)
617#define IEM_MC_F_MIN_PENTIUM_II IEM_MC_F_MIN_PENTIUM
618#define IEM_MC_F_MIN_CORE IEM_MC_F_MIN_PENTIUM
619#define IEM_MC_F_64BIT RT_BIT_32(6)
620#define IEM_MC_F_NOT_64BIT RT_BIT_32(7)
621/** This is set by IEMAllN8vePython.py to indicate a variation without the
622 * flags-clearing-and-checking, when there is also a variation with that.
623 * @note Do not use this manully, it's only for python and for testing in
624 * the native recompiler! */
625#define IEM_MC_F_WITHOUT_FLAGS RT_BIT_32(8)
626/** @} */
627
628/** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls.
629 *
630 * These clues are mainly for the recompiler, so that it can emit correct code.
631 *
632 * They are processed by the python script and which also automatically
633 * calculates flags for MC blocks based on the statements, extending the use of
634 * these flags to describe MC block behavior to the recompiler core. The python
635 * script pass the flags to the IEM_MC2_END_EMIT_CALLS macro, but mainly for
636 * error checking purposes. The script emits the necessary fEndTb = true and
637 * similar statements as this reduces compile time a tiny bit.
638 *
639 * @{ */
640/** Flag set if direct branch, clear if absolute or indirect. */
641#define IEM_CIMPL_F_BRANCH_DIRECT RT_BIT_32(0)
642/** Flag set if indirect branch, clear if direct or relative.
643 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
644 * as well as for return instructions (RET, IRET, RETF). */
645#define IEM_CIMPL_F_BRANCH_INDIRECT RT_BIT_32(1)
646/** Flag set if relative branch, clear if absolute or indirect. */
647#define IEM_CIMPL_F_BRANCH_RELATIVE RT_BIT_32(2)
648/** Flag set if conditional branch, clear if unconditional. */
649#define IEM_CIMPL_F_BRANCH_CONDITIONAL RT_BIT_32(3)
650/** Flag set if it's a far branch (changes CS). */
651#define IEM_CIMPL_F_BRANCH_FAR RT_BIT_32(4)
652/** Convenience: Testing any kind of branch. */
653#define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
654
655/** Execution flags may change (IEMCPU::fExec). */
656#define IEM_CIMPL_F_MODE RT_BIT_32(5)
657/** May change significant portions of RFLAGS. */
658#define IEM_CIMPL_F_RFLAGS RT_BIT_32(6)
659/** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS. */
660#define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(7)
661/** May trigger interrupt shadowing. */
662#define IEM_CIMPL_F_INHIBIT_SHADOW RT_BIT_32(8)
663/** May enable interrupts, so recheck IRQ immediately afterwards executing
664 * the instruction. */
665#define IEM_CIMPL_F_CHECK_IRQ_AFTER RT_BIT_32(9)
666/** May disable interrupts, so recheck IRQ immediately before executing the
667 * instruction. */
668#define IEM_CIMPL_F_CHECK_IRQ_BEFORE RT_BIT_32(10)
669/** Convenience: Check for IRQ both before and after an instruction. */
670#define IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER (IEM_CIMPL_F_CHECK_IRQ_BEFORE | IEM_CIMPL_F_CHECK_IRQ_AFTER)
671/** May trigger a VM exit (treated like IEM_CIMPL_F_MODE atm). */
672#define IEM_CIMPL_F_VMEXIT RT_BIT_32(11)
673/** May modify FPU state.
674 * @todo Not sure if this is useful yet. */
675#define IEM_CIMPL_F_FPU RT_BIT_32(12)
676/** REP prefixed instruction which may yield before updating PC.
677 * @todo Not sure if this is useful, REP functions now return non-zero
678 * status if they don't update the PC. */
679#define IEM_CIMPL_F_REP RT_BIT_32(13)
680/** I/O instruction.
681 * @todo Not sure if this is useful yet. */
682#define IEM_CIMPL_F_IO RT_BIT_32(14)
683/** Force end of TB after the instruction. */
684#define IEM_CIMPL_F_END_TB RT_BIT_32(15)
685/** Flag set if a branch may also modify the stack (push/pop return address). */
686#define IEM_CIMPL_F_BRANCH_STACK RT_BIT_32(16)
687/** Flag set if a branch may also modify the stack (push/pop return address)
688 * and switch it (load/restore SS:RSP). */
689#define IEM_CIMPL_F_BRANCH_STACK_FAR RT_BIT_32(17)
690/** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
691#define IEM_CIMPL_F_XCPT \
692 (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_STACK_FAR \
693 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
694
695/** The block calls a C-implementation instruction function with two implicit arguments.
696 * Mutually exclusive with IEM_CIMPL_F_CALLS_AIMPL and
697 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
698 * @note The python scripts will add this if missing. */
699#define IEM_CIMPL_F_CALLS_CIMPL RT_BIT_32(18)
700/** The block calls an ASM-implementation instruction function.
701 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and
702 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
703 * @note The python scripts will add this if missing. */
704#define IEM_CIMPL_F_CALLS_AIMPL RT_BIT_32(19)
705/** The block calls an ASM-implementation instruction function with an implicit
706 * X86FXSTATE pointer argument.
707 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL, IEM_CIMPL_F_CALLS_AIMPL and
708 * IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE.
709 * @note The python scripts will add this if missing. */
710#define IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE RT_BIT_32(20)
711/** The block calls an ASM-implementation instruction function with an implicit
712 * X86XSAVEAREA pointer argument.
713 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL, IEM_CIMPL_F_CALLS_AIMPL and
714 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
715 * @note No different from IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE, so same value.
716 * @note The python scripts will add this if missing. */
717#define IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE
718/** @} */
719
720
721/** @name IEM_F_XXX - Execution mode flags (IEMCPU::fExec, IEMTB::fFlags).
722 *
723 * These flags are set when entering IEM and adjusted as code is executed, such
724 * that they will always contain the current values as instructions are
725 * finished.
726 *
727 * In recompiled execution mode, (most of) these flags are included in the
728 * translation block selection key and stored in IEMTB::fFlags alongside the
729 * IEMTB_F_XXX flags. The latter flags uses bits 31 thru 24, which are all zero
730 * in IEMCPU::fExec.
731 *
732 * @{ */
733/** Mode: The block target mode mask. */
734#define IEM_F_MODE_MASK UINT32_C(0x0000001f)
735/** Mode: The IEMMODE part of the IEMTB_F_MODE_MASK value. */
736#define IEM_F_MODE_CPUMODE_MASK UINT32_C(0x00000003)
737/** X86 Mode: Bit used to indicating pre-386 CPU in 16-bit mode (for eliminating
738 * conditional in EIP/IP updating), and flat wide open CS, SS, DS, and ES in
739 * 32-bit mode (for simplifying most memory accesses). */
740#define IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK UINT32_C(0x00000004)
741/** X86 Mode: Bit indicating protected mode, real mode (or SMM) when not set. */
742#define IEM_F_MODE_X86_PROT_MASK UINT32_C(0x00000008)
743/** X86 Mode: Bit used to indicate virtual 8086 mode (only 16-bit). */
744#define IEM_F_MODE_X86_V86_MASK UINT32_C(0x00000010)
745
746/** X86 Mode: 16-bit on 386 or later. */
747#define IEM_F_MODE_X86_16BIT UINT32_C(0x00000000)
748/** X86 Mode: 80286, 80186 and 8086/88 targetting blocks (EIP update opt). */
749#define IEM_F_MODE_X86_16BIT_PRE_386 UINT32_C(0x00000004)
750/** X86 Mode: 16-bit protected mode on 386 or later. */
751#define IEM_F_MODE_X86_16BIT_PROT UINT32_C(0x00000008)
752/** X86 Mode: 16-bit protected mode on 386 or later. */
753#define IEM_F_MODE_X86_16BIT_PROT_PRE_386 UINT32_C(0x0000000c)
754/** X86 Mode: 16-bit virtual 8086 protected mode (on 386 or later). */
755#define IEM_F_MODE_X86_16BIT_PROT_V86 UINT32_C(0x00000018)
756
757/** X86 Mode: 32-bit on 386 or later. */
758#define IEM_F_MODE_X86_32BIT UINT32_C(0x00000001)
759/** X86 Mode: 32-bit mode with wide open flat CS, SS, DS and ES. */
760#define IEM_F_MODE_X86_32BIT_FLAT UINT32_C(0x00000005)
761/** X86 Mode: 32-bit protected mode. */
762#define IEM_F_MODE_X86_32BIT_PROT UINT32_C(0x00000009)
763/** X86 Mode: 32-bit protected mode with wide open flat CS, SS, DS and ES. */
764#define IEM_F_MODE_X86_32BIT_PROT_FLAT UINT32_C(0x0000000d)
765
766/** X86 Mode: 64-bit (includes protected, but not the flat bit). */
767#define IEM_F_MODE_X86_64BIT UINT32_C(0x0000000a)
768
769/** X86 Mode: Checks if @a a_fExec represent a FLAT mode. */
770#define IEM_F_MODE_X86_IS_FLAT(a_fExec) ( ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT \
771 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT \
772 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT)
773
774/** Bypass access handlers when set. */
775#define IEM_F_BYPASS_HANDLERS UINT32_C(0x00010000)
776/** Have pending hardware instruction breakpoints. */
777#define IEM_F_PENDING_BRK_INSTR UINT32_C(0x00020000)
778/** Have pending hardware data breakpoints. */
779#define IEM_F_PENDING_BRK_DATA UINT32_C(0x00040000)
780
781/** X86: Have pending hardware I/O breakpoints. */
782#define IEM_F_PENDING_BRK_X86_IO UINT32_C(0x00000400)
783/** X86: Disregard the lock prefix (implied or not) when set. */
784#define IEM_F_X86_DISREGARD_LOCK UINT32_C(0x00000800)
785
786/** Pending breakpoint mask (what iemCalcExecDbgFlags works out). */
787#define IEM_F_PENDING_BRK_MASK (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA | IEM_F_PENDING_BRK_X86_IO)
788
789/** Caller configurable options. */
790#define IEM_F_USER_OPTS (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK)
791
792/** X86: The current protection level (CPL) shift factor. */
793#define IEM_F_X86_CPL_SHIFT 8
794/** X86: The current protection level (CPL) mask. */
795#define IEM_F_X86_CPL_MASK UINT32_C(0x00000300)
796/** X86: The current protection level (CPL) shifted mask. */
797#define IEM_F_X86_CPL_SMASK UINT32_C(0x00000003)
798
799/** X86 execution context.
800 * The IEM_F_X86_CTX_XXX values are individual flags that can be combined (with
801 * the exception of IEM_F_X86_CTX_NORMAL). This allows running VMs from SMM
802 * mode. */
803#define IEM_F_X86_CTX_MASK UINT32_C(0x0000f000)
804/** X86 context: Plain regular execution context. */
805#define IEM_F_X86_CTX_NORMAL UINT32_C(0x00000000)
806/** X86 context: VT-x enabled. */
807#define IEM_F_X86_CTX_VMX UINT32_C(0x00001000)
808/** X86 context: AMD-V enabled. */
809#define IEM_F_X86_CTX_SVM UINT32_C(0x00002000)
810/** X86 context: In AMD-V or VT-x guest mode. */
811#define IEM_F_X86_CTX_IN_GUEST UINT32_C(0x00004000)
812/** X86 context: System management mode (SMM). */
813#define IEM_F_X86_CTX_SMM UINT32_C(0x00008000)
814
815/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
816 * iemRegFinishClearingRF() most for most situations (CPUMCTX_DBG_HIT_DRX_MASK
817 * and CPUMCTX_DBG_DBGF_MASK are covered by the IEM_F_PENDING_BRK_XXX bits
818 * alread). */
819
820/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
821 * iemRegFinishClearingRF() most for most situations
822 * (CPUMCTX_DBG_HIT_DRX_MASK and CPUMCTX_DBG_DBGF_MASK are covered by
823 * the IEM_F_PENDING_BRK_XXX bits alread). */
824
825/** @} */
826
827
828/** @name IEMTB_F_XXX - Translation block flags (IEMTB::fFlags).
829 *
830 * Extends the IEM_F_XXX flags (subject to IEMTB_F_IEM_F_MASK) to make up the
831 * translation block flags. The combined flag mask (subject to
832 * IEMTB_F_KEY_MASK) is used as part of the lookup key for translation blocks.
833 *
834 * @{ */
835/** Mask of IEM_F_XXX flags included in IEMTB_F_XXX. */
836#define IEMTB_F_IEM_F_MASK UINT32_C(0x00ffffff)
837
838/** Type: The block type mask. */
839#define IEMTB_F_TYPE_MASK UINT32_C(0x03000000)
840/** Type: Purly threaded recompiler (via tables). */
841#define IEMTB_F_TYPE_THREADED UINT32_C(0x01000000)
842/** Type: Native recompilation. */
843#define IEMTB_F_TYPE_NATIVE UINT32_C(0x02000000)
844
845/** Set when we're starting the block in an "interrupt shadow".
846 * We don't need to distingish between the two types of this mask, thus the one.
847 * @see CPUMCTX_INHIBIT_SHADOW, CPUMIsInInterruptShadow() */
848#define IEMTB_F_INHIBIT_SHADOW UINT32_C(0x04000000)
849/** Set when we're currently inhibiting NMIs
850 * @see CPUMCTX_INHIBIT_NMI, CPUMAreInterruptsInhibitedByNmi() */
851#define IEMTB_F_INHIBIT_NMI UINT32_C(0x08000000)
852
853/** Checks that EIP/IP is wihin CS.LIM before each instruction. Used when
854 * we're close the limit before starting a TB, as determined by
855 * iemGetTbFlagsForCurrentPc(). */
856#define IEMTB_F_CS_LIM_CHECKS UINT32_C(0x10000000)
857
858/** Mask of the IEMTB_F_XXX flags that are part of the TB lookup key.
859 *
860 * @note We skip all of IEM_F_X86_CTX_MASK, with the exception of SMM (which we
861 * don't implement), because we don't currently generate any context
862 * specific code - that's all handled in CIMPL functions.
863 *
864 * For the threaded recompiler we don't generate any CPL specific code
865 * either, but the native recompiler does for memory access (saves getting
866 * the CPL from fExec and turning it into IEMTLBE_F_PT_NO_USER).
867 * Since most OSes will not share code between rings, this shouldn't
868 * have any real effect on TB/memory/recompiling load.
869 */
870#define IEMTB_F_KEY_MASK ((UINT32_MAX & ~(IEM_F_X86_CTX_MASK | IEMTB_F_TYPE_MASK)) | IEM_F_X86_CTX_SMM)
871/** @} */
872
873AssertCompile( (IEM_F_MODE_X86_16BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
874AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
875AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_PROT_MASK));
876AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_V86_MASK));
877AssertCompile( (IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
878AssertCompile( IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
879AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_PROT_MASK));
880AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
881AssertCompile( (IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
882AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
883AssertCompile( IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
884AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_V86_MASK));
885AssertCompile( (IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
886AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
887AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_PROT_MASK);
888AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
889AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_PROT_MASK);
890AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
891AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_V86_MASK);
892
893AssertCompile( (IEM_F_MODE_X86_32BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
894AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
895AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_PROT_MASK));
896AssertCompile( (IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
897AssertCompile( IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
898AssertCompile(!(IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_PROT_MASK));
899AssertCompile( (IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
900AssertCompile(!(IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
901AssertCompile( IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
902AssertCompile( (IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
903AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
904AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_PROT_MASK);
905
906AssertCompile( (IEM_F_MODE_X86_64BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT);
907AssertCompile( IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_PROT_MASK);
908AssertCompile(!(IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
909
910/** Native instruction type for use with the native code generator.
911 * This is a byte (uint8_t) for x86 and amd64 and uint32_t for the other(s). */
912#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
913typedef uint8_t IEMNATIVEINSTR;
914#else
915typedef uint32_t IEMNATIVEINSTR;
916#endif
917/** Pointer to a native instruction unit. */
918typedef IEMNATIVEINSTR *PIEMNATIVEINSTR;
919/** Pointer to a const native instruction unit. */
920typedef IEMNATIVEINSTR const *PCIEMNATIVEINSTR;
921
922/**
923 * A call for the threaded call table.
924 */
925typedef struct IEMTHRDEDCALLENTRY
926{
927 /** The function to call (IEMTHREADEDFUNCS). */
928 uint16_t enmFunction;
929
930 /** Instruction number in the TB (for statistics). */
931 uint8_t idxInstr;
932 /** The opcode length. */
933 uint8_t cbOpcode;
934 /** Offset into IEMTB::pabOpcodes. */
935 uint16_t offOpcode;
936
937 /** TB lookup table index (7 bits) and large size (1 bits).
938 *
939 * The default size is 1 entry, but for indirect calls and returns we set the
940 * top bit and allocate 4 (IEM_TB_LOOKUP_TAB_LARGE_SIZE) entries. The large
941 * tables uses RIP for selecting the entry to use, as it is assumed a hash table
942 * lookup isn't that slow compared to sequentially trying out 4 TBs.
943 *
944 * By default lookup table entry 0 for a TB is reserved as a fallback for
945 * calltable entries w/o explicit entreis, so this member will be non-zero if
946 * there is a lookup entry associated with this call.
947 *
948 * @sa IEM_TB_LOOKUP_TAB_GET_SIZE, IEM_TB_LOOKUP_TAB_GET_IDX
949 */
950 uint8_t uTbLookup;
951
952 /** Unused atm. */
953 uint8_t uUnused0;
954
955 /** Generic parameters. */
956 uint64_t auParams[3];
957} IEMTHRDEDCALLENTRY;
958AssertCompileSize(IEMTHRDEDCALLENTRY, sizeof(uint64_t) * 4);
959/** Pointer to a threaded call entry. */
960typedef struct IEMTHRDEDCALLENTRY *PIEMTHRDEDCALLENTRY;
961/** Pointer to a const threaded call entry. */
962typedef IEMTHRDEDCALLENTRY const *PCIEMTHRDEDCALLENTRY;
963
964/** The number of TB lookup table entries for a large allocation
965 * (IEMTHRDEDCALLENTRY::uTbLookup bit 7 set). */
966#define IEM_TB_LOOKUP_TAB_LARGE_SIZE 4
967/** Get the lookup table size from IEMTHRDEDCALLENTRY::uTbLookup. */
968#define IEM_TB_LOOKUP_TAB_GET_SIZE(a_uTbLookup) (!((a_uTbLookup) & 0x80) ? 1 : IEM_TB_LOOKUP_TAB_LARGE_SIZE)
969/** Get the first lookup table index from IEMTHRDEDCALLENTRY::uTbLookup. */
970#define IEM_TB_LOOKUP_TAB_GET_IDX(a_uTbLookup) ((a_uTbLookup) & 0x7f)
971/** Get the lookup table index from IEMTHRDEDCALLENTRY::uTbLookup and RIP. */
972#define IEM_TB_LOOKUP_TAB_GET_IDX_WITH_RIP(a_uTbLookup, a_Rip) \
973 (!((a_uTbLookup) & 0x80) ? (a_uTbLookup) & 0x7f : ((a_uTbLookup) & 0x7f) + ((a_Rip) & (IEM_TB_LOOKUP_TAB_LARGE_SIZE - 1)) )
974
975/** Make a IEMTHRDEDCALLENTRY::uTbLookup value. */
976#define IEM_TB_LOOKUP_TAB_MAKE(a_idxTable, a_fLarge) ((a_idxTable) | ((a_fLarge) ? 0x80 : 0))
977
978/**
979 * Native IEM TB 'function' typedef.
980 *
981 * This will throw/longjmp on occation.
982 *
983 * @note AMD64 doesn't have that many non-volatile registers and does sport
984 * 32-bit address displacments, so we don't need pCtx.
985 *
986 * On ARM64 pCtx allows us to directly address the whole register
987 * context without requiring a separate indexing register holding the
988 * offset. This saves an instruction loading the offset for each guest
989 * CPU context access, at the cost of a non-volatile register.
990 * Fortunately, ARM64 has quite a lot more registers.
991 */
992typedef
993#ifdef RT_ARCH_AMD64
994int FNIEMTBNATIVE(PVMCPUCC pVCpu)
995#else
996int FNIEMTBNATIVE(PVMCPUCC pVCpu, PCPUMCTX pCtx)
997#endif
998#if RT_CPLUSPLUS_PREREQ(201700)
999 IEM_NOEXCEPT_MAY_LONGJMP
1000#endif
1001 ;
1002/** Pointer to a native IEM TB entry point function.
1003 * This will throw/longjmp on occation. */
1004typedef FNIEMTBNATIVE *PFNIEMTBNATIVE;
1005
1006
1007/**
1008 * Translation block debug info entry type.
1009 */
1010typedef enum IEMTBDBGENTRYTYPE
1011{
1012 kIemTbDbgEntryType_Invalid = 0,
1013 /** The entry is for marking a native code position.
1014 * Entries following this all apply to this position. */
1015 kIemTbDbgEntryType_NativeOffset,
1016 /** The entry is for a new guest instruction. */
1017 kIemTbDbgEntryType_GuestInstruction,
1018 /** Marks the start of a threaded call. */
1019 kIemTbDbgEntryType_ThreadedCall,
1020 /** Marks the location of a label. */
1021 kIemTbDbgEntryType_Label,
1022 /** Info about a host register shadowing a guest register. */
1023 kIemTbDbgEntryType_GuestRegShadowing,
1024#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
1025 /** Info about a host SIMD register shadowing a guest SIMD register. */
1026 kIemTbDbgEntryType_GuestSimdRegShadowing,
1027#endif
1028#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
1029 /** Info about a delayed RIP update. */
1030 kIemTbDbgEntryType_DelayedPcUpdate,
1031#endif
1032#if defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK) || defined(IEMNATIVE_WITH_SIMD_REG_ALLOCATOR)
1033 /** Info about a shadowed guest register becoming dirty. */
1034 kIemTbDbgEntryType_GuestRegDirty,
1035 /** Info about register writeback/flush oepration. */
1036 kIemTbDbgEntryType_GuestRegWriteback,
1037#endif
1038 kIemTbDbgEntryType_End
1039} IEMTBDBGENTRYTYPE;
1040
1041/**
1042 * Translation block debug info entry.
1043 */
1044typedef union IEMTBDBGENTRY
1045{
1046 /** Plain 32-bit view. */
1047 uint32_t u;
1048
1049 /** Generic view for getting at the type field. */
1050 struct
1051 {
1052 /** IEMTBDBGENTRYTYPE */
1053 uint32_t uType : 4;
1054 uint32_t uTypeSpecific : 28;
1055 } Gen;
1056
1057 struct
1058 {
1059 /** kIemTbDbgEntryType_ThreadedCall1. */
1060 uint32_t uType : 4;
1061 /** Native code offset. */
1062 uint32_t offNative : 28;
1063 } NativeOffset;
1064
1065 struct
1066 {
1067 /** kIemTbDbgEntryType_GuestInstruction. */
1068 uint32_t uType : 4;
1069 uint32_t uUnused : 4;
1070 /** The IEM_F_XXX flags. */
1071 uint32_t fExec : 24;
1072 } GuestInstruction;
1073
1074 struct
1075 {
1076 /* kIemTbDbgEntryType_ThreadedCall. */
1077 uint32_t uType : 4;
1078 /** Set if the call was recompiled to native code, clear if just calling
1079 * threaded function. */
1080 uint32_t fRecompiled : 1;
1081 uint32_t uUnused : 11;
1082 /** The threaded call number (IEMTHREADEDFUNCS). */
1083 uint32_t enmCall : 16;
1084 } ThreadedCall;
1085
1086 struct
1087 {
1088 /* kIemTbDbgEntryType_Label. */
1089 uint32_t uType : 4;
1090 uint32_t uUnused : 4;
1091 /** The label type (IEMNATIVELABELTYPE). */
1092 uint32_t enmLabel : 8;
1093 /** The label data. */
1094 uint32_t uData : 16;
1095 } Label;
1096
1097 struct
1098 {
1099 /* kIemTbDbgEntryType_GuestRegShadowing. */
1100 uint32_t uType : 4;
1101 uint32_t uUnused : 4;
1102 /** The guest register being shadowed (IEMNATIVEGSTREG). */
1103 uint32_t idxGstReg : 8;
1104 /** The host new register number, UINT8_MAX if dropped. */
1105 uint32_t idxHstReg : 8;
1106 /** The previous host register number, UINT8_MAX if new. */
1107 uint32_t idxHstRegPrev : 8;
1108 } GuestRegShadowing;
1109
1110#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
1111 struct
1112 {
1113 /* kIemTbDbgEntryType_GuestSimdRegShadowing. */
1114 uint32_t uType : 4;
1115 uint32_t uUnused : 4;
1116 /** The guest register being shadowed (IEMNATIVEGSTSIMDREG). */
1117 uint32_t idxGstSimdReg : 8;
1118 /** The host new register number, UINT8_MAX if dropped. */
1119 uint32_t idxHstSimdReg : 8;
1120 /** The previous host register number, UINT8_MAX if new. */
1121 uint32_t idxHstSimdRegPrev : 8;
1122 } GuestSimdRegShadowing;
1123#endif
1124
1125#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING
1126 struct
1127 {
1128 /* kIemTbDbgEntryType_DelayedPcUpdate. */
1129 uint32_t uType : 4;
1130 /* The instruction offset added to the program counter. */
1131 uint32_t offPc : 14;
1132 /** Number of instructions skipped. */
1133 uint32_t cInstrSkipped : 14;
1134 } DelayedPcUpdate;
1135#endif
1136
1137#if defined(IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK) || defined(IEMNATIVE_WITH_SIMD_REG_ALLOCATOR)
1138 struct
1139 {
1140 /* kIemTbDbgEntryType_GuestRegDirty. */
1141 uint32_t uType : 4;
1142 uint32_t uUnused : 11;
1143 /** Flag whether this is about a SIMD (true) or general (false) register. */
1144 uint32_t fSimdReg : 1;
1145 /** The guest register index being marked as dirty. */
1146 uint32_t idxGstReg : 8;
1147 /** The host register number this register is shadowed in .*/
1148 uint32_t idxHstReg : 8;
1149 } GuestRegDirty;
1150
1151 struct
1152 {
1153 /* kIemTbDbgEntryType_GuestRegWriteback. */
1154 uint32_t uType : 4;
1155 /** Flag whether this is about a SIMD (true) or general (false) register flush. */
1156 uint32_t fSimdReg : 1;
1157 /** The mask shift. */
1158 uint32_t cShift : 2;
1159 /** The guest register mask being written back. */
1160 uint32_t fGstReg : 25;
1161 } GuestRegWriteback;
1162#endif
1163
1164} IEMTBDBGENTRY;
1165AssertCompileSize(IEMTBDBGENTRY, sizeof(uint32_t));
1166/** Pointer to a debug info entry. */
1167typedef IEMTBDBGENTRY *PIEMTBDBGENTRY;
1168/** Pointer to a const debug info entry. */
1169typedef IEMTBDBGENTRY const *PCIEMTBDBGENTRY;
1170
1171/**
1172 * Translation block debug info.
1173 */
1174typedef struct IEMTBDBG
1175{
1176 /** Number of entries in aEntries. */
1177 uint32_t cEntries;
1178 /** The offset of the last kIemTbDbgEntryType_NativeOffset record. */
1179 uint32_t offNativeLast;
1180 /** Debug info entries. */
1181 RT_FLEXIBLE_ARRAY_EXTENSION
1182 IEMTBDBGENTRY aEntries[RT_FLEXIBLE_ARRAY];
1183} IEMTBDBG;
1184/** Pointer to TB debug info. */
1185typedef IEMTBDBG *PIEMTBDBG;
1186/** Pointer to const TB debug info. */
1187typedef IEMTBDBG const *PCIEMTBDBG;
1188
1189
1190/**
1191 * Translation block.
1192 *
1193 * The current plan is to just keep TBs and associated lookup hash table private
1194 * to each VCpu as that simplifies TB removal greatly (no races) and generally
1195 * avoids using expensive atomic primitives for updating lists and stuff.
1196 */
1197#pragma pack(2) /* to prevent the Thrd structure from being padded unnecessarily */
1198typedef struct IEMTB
1199{
1200 /** Next block with the same hash table entry. */
1201 struct IEMTB *pNext;
1202 /** Usage counter. */
1203 uint32_t cUsed;
1204 /** The IEMCPU::msRecompilerPollNow last time it was used. */
1205 uint32_t msLastUsed;
1206
1207 /** @name What uniquely identifies the block.
1208 * @{ */
1209 RTGCPHYS GCPhysPc;
1210 /** IEMTB_F_XXX (i.e. IEM_F_XXX ++). */
1211 uint32_t fFlags;
1212 union
1213 {
1214 struct
1215 {
1216 /**< Relevant CS X86DESCATTR_XXX bits. */
1217 uint16_t fAttr;
1218 } x86;
1219 };
1220 /** @} */
1221
1222 /** Number of opcode ranges. */
1223 uint8_t cRanges;
1224 /** Statistics: Number of instructions in the block. */
1225 uint8_t cInstructions;
1226
1227 /** Type specific info. */
1228 union
1229 {
1230 struct
1231 {
1232 /** The call sequence table. */
1233 PIEMTHRDEDCALLENTRY paCalls;
1234 /** Number of calls in paCalls. */
1235 uint16_t cCalls;
1236 /** Number of calls allocated. */
1237 uint16_t cAllocated;
1238 } Thrd;
1239 struct
1240 {
1241 /** The native instructions (PFNIEMTBNATIVE). */
1242 PIEMNATIVEINSTR paInstructions;
1243 /** Number of instructions pointed to by paInstructions. */
1244 uint32_t cInstructions;
1245 } Native;
1246 /** Generic view for zeroing when freeing. */
1247 struct
1248 {
1249 uintptr_t uPtr;
1250 uint32_t uData;
1251 } Gen;
1252 };
1253
1254 /** The allocation chunk this TB belongs to. */
1255 uint8_t idxAllocChunk;
1256 /** The number of entries in the lookup table.
1257 * Because we're out of space, the TB lookup table is located before the
1258 * opcodes pointed to by pabOpcodes. */
1259 uint8_t cTbLookupEntries;
1260
1261 /** Number of bytes of opcodes stored in pabOpcodes.
1262 * @todo this field isn't really needed, aRanges keeps the actual info. */
1263 uint16_t cbOpcodes;
1264 /** Pointer to the opcode bytes this block was recompiled from.
1265 * This also points to the TB lookup table, which starts cTbLookupEntries
1266 * entries before the opcodes (we don't have room atm for another point). */
1267 uint8_t *pabOpcodes;
1268
1269 /** Debug info if enabled.
1270 * This is only generated by the native recompiler. */
1271 PIEMTBDBG pDbgInfo;
1272
1273 /* --- 64 byte cache line end --- */
1274
1275 /** Opcode ranges.
1276 *
1277 * The opcode checkers and maybe TLB loading functions will use this to figure
1278 * out what to do. The parameter will specify an entry and the opcode offset to
1279 * start at and the minimum number of bytes to verify (instruction length).
1280 *
1281 * When VT-x and AMD-V looks up the opcode bytes for an exitting instruction,
1282 * they'll first translate RIP (+ cbInstr - 1) to a physical address using the
1283 * code TLB (must have a valid entry for that address) and scan the ranges to
1284 * locate the corresponding opcodes. Probably.
1285 */
1286 struct IEMTBOPCODERANGE
1287 {
1288 /** Offset within pabOpcodes. */
1289 uint16_t offOpcodes;
1290 /** Number of bytes. */
1291 uint16_t cbOpcodes;
1292 /** The page offset. */
1293 RT_GCC_EXTENSION
1294 uint16_t offPhysPage : 12;
1295 /** Unused bits. */
1296 RT_GCC_EXTENSION
1297 uint16_t u2Unused : 2;
1298 /** Index into GCPhysPc + aGCPhysPages for the physical page address. */
1299 RT_GCC_EXTENSION
1300 uint16_t idxPhysPage : 2;
1301 } aRanges[8];
1302
1303 /** Physical pages that this TB covers.
1304 * The GCPhysPc w/o page offset is element zero, so starting here with 1. */
1305 RTGCPHYS aGCPhysPages[2];
1306} IEMTB;
1307#pragma pack()
1308AssertCompileMemberAlignment(IEMTB, GCPhysPc, sizeof(RTGCPHYS));
1309AssertCompileMemberAlignment(IEMTB, Thrd, sizeof(void *));
1310AssertCompileMemberAlignment(IEMTB, pabOpcodes, sizeof(void *));
1311AssertCompileMemberAlignment(IEMTB, pDbgInfo, sizeof(void *));
1312AssertCompileMemberAlignment(IEMTB, aGCPhysPages, sizeof(RTGCPHYS));
1313AssertCompileMemberOffset(IEMTB, aRanges, 64);
1314AssertCompileMemberSize(IEMTB, aRanges[0], 6);
1315#if 1
1316AssertCompileSize(IEMTB, 128);
1317# define IEMTB_SIZE_IS_POWER_OF_TWO /**< The IEMTB size is a power of two. */
1318#else
1319AssertCompileSize(IEMTB, 168);
1320# undef IEMTB_SIZE_IS_POWER_OF_TWO
1321#endif
1322
1323/** Pointer to a translation block. */
1324typedef IEMTB *PIEMTB;
1325/** Pointer to a const translation block. */
1326typedef IEMTB const *PCIEMTB;
1327
1328/** Gets address of the given TB lookup table entry. */
1329#define IEMTB_GET_TB_LOOKUP_TAB_ENTRY(a_pTb, a_idx) \
1330 ((PIEMTB *)&(a_pTb)->pabOpcodes[-(int)((a_pTb)->cTbLookupEntries - (a_idx)) * sizeof(PIEMTB)])
1331
1332/**
1333 * Gets the physical address for a TB opcode range.
1334 */
1335DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
1336{
1337 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
1338 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
1339 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
1340 if (idxPage == 0)
1341 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1342 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
1343 return pTb->aGCPhysPages[idxPage - 1];
1344}
1345
1346
1347/**
1348 * A chunk of memory in the TB allocator.
1349 */
1350typedef struct IEMTBCHUNK
1351{
1352 /** Pointer to the translation blocks in this chunk. */
1353 PIEMTB paTbs;
1354#ifdef IN_RING0
1355 /** Allocation handle. */
1356 RTR0MEMOBJ hMemObj;
1357#endif
1358} IEMTBCHUNK;
1359
1360/**
1361 * A per-CPU translation block allocator.
1362 *
1363 * Because of how the IEMTBCACHE uses the lower 6 bits of the TB address to keep
1364 * the length of the collision list, and of course also for cache line alignment
1365 * reasons, the TBs must be allocated with at least 64-byte alignment.
1366 * Memory is there therefore allocated using one of the page aligned allocators.
1367 *
1368 *
1369 * To avoid wasting too much memory, it is allocated piecemeal as needed,
1370 * in chunks (IEMTBCHUNK) of 2 MiB or more. The TB has an 8-bit chunk index
1371 * that enables us to quickly calculate the allocation bitmap position when
1372 * freeing the translation block.
1373 */
1374typedef struct IEMTBALLOCATOR
1375{
1376 /** Magic value (IEMTBALLOCATOR_MAGIC). */
1377 uint32_t uMagic;
1378
1379#ifdef IEMTB_SIZE_IS_POWER_OF_TWO
1380 /** Mask corresponding to cTbsPerChunk - 1. */
1381 uint32_t fChunkMask;
1382 /** Shift count corresponding to cTbsPerChunk. */
1383 uint8_t cChunkShift;
1384#else
1385 uint32_t uUnused;
1386 uint8_t bUnused;
1387#endif
1388 /** Number of chunks we're allowed to allocate. */
1389 uint8_t cMaxChunks;
1390 /** Number of chunks currently populated. */
1391 uint16_t cAllocatedChunks;
1392 /** Number of translation blocks per chunk. */
1393 uint32_t cTbsPerChunk;
1394 /** Chunk size. */
1395 uint32_t cbPerChunk;
1396
1397 /** The maximum number of TBs. */
1398 uint32_t cMaxTbs;
1399 /** Total number of TBs in the populated chunks.
1400 * (cAllocatedChunks * cTbsPerChunk) */
1401 uint32_t cTotalTbs;
1402 /** The current number of TBs in use.
1403 * The number of free TBs: cAllocatedTbs - cInUseTbs; */
1404 uint32_t cInUseTbs;
1405 /** Statistics: Number of the cInUseTbs that are native ones. */
1406 uint32_t cNativeTbs;
1407 /** Statistics: Number of the cInUseTbs that are threaded ones. */
1408 uint32_t cThreadedTbs;
1409
1410 /** Where to start pruning TBs from when we're out.
1411 * See iemTbAllocatorAllocSlow for details. */
1412 uint32_t iPruneFrom;
1413 /** Hint about which bit to start scanning the bitmap from. */
1414 uint32_t iStartHint;
1415 /** Where to start pruning native TBs from when we're out of executable memory.
1416 * See iemTbAllocatorFreeupNativeSpace for details. */
1417 uint32_t iPruneNativeFrom;
1418 uint32_t uPadding;
1419
1420 /** Statistics: Number of TB allocation calls. */
1421 STAMCOUNTER StatAllocs;
1422 /** Statistics: Number of TB free calls. */
1423 STAMCOUNTER StatFrees;
1424 /** Statistics: Time spend pruning. */
1425 STAMPROFILE StatPrune;
1426 /** Statistics: Time spend pruning native TBs. */
1427 STAMPROFILE StatPruneNative;
1428
1429 /** The delayed free list (see iemTbAlloctorScheduleForFree). */
1430 PIEMTB pDelayedFreeHead;
1431
1432 /** Allocation chunks. */
1433 IEMTBCHUNK aChunks[256];
1434
1435 /** Allocation bitmap for all possible chunk chunks. */
1436 RT_FLEXIBLE_ARRAY_EXTENSION
1437 uint64_t bmAllocated[RT_FLEXIBLE_ARRAY];
1438} IEMTBALLOCATOR;
1439/** Pointer to a TB allocator. */
1440typedef struct IEMTBALLOCATOR *PIEMTBALLOCATOR;
1441
1442/** Magic value for the TB allocator (Emmet Harley Cohen). */
1443#define IEMTBALLOCATOR_MAGIC UINT32_C(0x19900525)
1444
1445
1446/**
1447 * A per-CPU translation block cache (hash table).
1448 *
1449 * The hash table is allocated once during IEM initialization and size double
1450 * the max TB count, rounded up to the nearest power of two (so we can use and
1451 * AND mask rather than a rest division when hashing).
1452 */
1453typedef struct IEMTBCACHE
1454{
1455 /** Magic value (IEMTBCACHE_MAGIC). */
1456 uint32_t uMagic;
1457 /** Size of the hash table. This is a power of two. */
1458 uint32_t cHash;
1459 /** The mask corresponding to cHash. */
1460 uint32_t uHashMask;
1461 uint32_t uPadding;
1462
1463 /** @name Statistics
1464 * @{ */
1465 /** Number of collisions ever. */
1466 STAMCOUNTER cCollisions;
1467
1468 /** Statistics: Number of TB lookup misses. */
1469 STAMCOUNTER cLookupMisses;
1470 /** Statistics: Number of TB lookup hits via hash table (debug only). */
1471 STAMCOUNTER cLookupHits;
1472 /** Statistics: Number of TB lookup hits via TB associated lookup table (debug only). */
1473 STAMCOUNTER cLookupHitsViaTbLookupTable;
1474 STAMCOUNTER auPadding2[2];
1475 /** Statistics: Collision list length pruning. */
1476 STAMPROFILE StatPrune;
1477 /** @} */
1478
1479 /** The hash table itself.
1480 * @note The lower 6 bits of the pointer is used for keeping the collision
1481 * list length, so we can take action when it grows too long.
1482 * This works because TBs are allocated using a 64 byte (or
1483 * higher) alignment from page aligned chunks of memory, so the lower
1484 * 6 bits of the address will always be zero.
1485 * See IEMTBCACHE_PTR_COUNT_MASK, IEMTBCACHE_PTR_MAKE and friends.
1486 */
1487 RT_FLEXIBLE_ARRAY_EXTENSION
1488 PIEMTB apHash[RT_FLEXIBLE_ARRAY];
1489} IEMTBCACHE;
1490/** Pointer to a per-CPU translation block cahce. */
1491typedef IEMTBCACHE *PIEMTBCACHE;
1492
1493/** Magic value for IEMTBCACHE (Johnny O'Neal). */
1494#define IEMTBCACHE_MAGIC UINT32_C(0x19561010)
1495
1496/** The collision count mask for IEMTBCACHE::apHash entries. */
1497#define IEMTBCACHE_PTR_COUNT_MASK ((uintptr_t)0x3f)
1498/** The max collision count for IEMTBCACHE::apHash entries before pruning. */
1499#define IEMTBCACHE_PTR_MAX_COUNT ((uintptr_t)0x30)
1500/** Combine a TB pointer and a collision list length into a value for an
1501 * IEMTBCACHE::apHash entry. */
1502#define IEMTBCACHE_PTR_MAKE(a_pTb, a_cCount) (PIEMTB)((uintptr_t)(a_pTb) | (a_cCount))
1503/** Combine a TB pointer and a collision list length into a value for an
1504 * IEMTBCACHE::apHash entry. */
1505#define IEMTBCACHE_PTR_GET_TB(a_pHashEntry) (PIEMTB)((uintptr_t)(a_pHashEntry) & ~IEMTBCACHE_PTR_COUNT_MASK)
1506/** Combine a TB pointer and a collision list length into a value for an
1507 * IEMTBCACHE::apHash entry. */
1508#define IEMTBCACHE_PTR_GET_COUNT(a_pHashEntry) ((uintptr_t)(a_pHashEntry) & IEMTBCACHE_PTR_COUNT_MASK)
1509
1510/**
1511 * Calculates the hash table slot for a TB from physical PC address and TB flags.
1512 */
1513#define IEMTBCACHE_HASH(a_paCache, a_fTbFlags, a_GCPhysPc) \
1514 IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, (a_fTbFlags) & IEMTB_F_KEY_MASK, a_GCPhysPc)
1515
1516/**
1517 * Calculates the hash table slot for a TB from physical PC address and TB
1518 * flags, ASSUMING the caller has applied IEMTB_F_KEY_MASK to @a a_fTbFlags.
1519 */
1520#define IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, a_fTbFlags, a_GCPhysPc) \
1521 (((uint32_t)(a_GCPhysPc) ^ (a_fTbFlags)) & (a_paCache)->uHashMask)
1522
1523
1524/** @name IEMBRANCHED_F_XXX - Branched indicator (IEMCPU::fTbBranched).
1525 *
1526 * These flags parallels the main IEM_CIMPL_F_BRANCH_XXX flags.
1527 *
1528 * @{ */
1529/** Value if no branching happened recently. */
1530#define IEMBRANCHED_F_NO UINT8_C(0x00)
1531/** Flag set if direct branch, clear if absolute or indirect. */
1532#define IEMBRANCHED_F_DIRECT UINT8_C(0x01)
1533/** Flag set if indirect branch, clear if direct or relative. */
1534#define IEMBRANCHED_F_INDIRECT UINT8_C(0x02)
1535/** Flag set if relative branch, clear if absolute or indirect. */
1536#define IEMBRANCHED_F_RELATIVE UINT8_C(0x04)
1537/** Flag set if conditional branch, clear if unconditional. */
1538#define IEMBRANCHED_F_CONDITIONAL UINT8_C(0x08)
1539/** Flag set if it's a far branch. */
1540#define IEMBRANCHED_F_FAR UINT8_C(0x10)
1541/** Flag set if the stack pointer is modified. */
1542#define IEMBRANCHED_F_STACK UINT8_C(0x20)
1543/** Flag set if the stack pointer and (maybe) the stack segment are modified. */
1544#define IEMBRANCHED_F_STACK_FAR UINT8_C(0x40)
1545/** Flag set (by IEM_MC_REL_JMP_XXX) if it's a zero bytes relative jump. */
1546#define IEMBRANCHED_F_ZERO UINT8_C(0x80)
1547/** @} */
1548
1549
1550/**
1551 * The per-CPU IEM state.
1552 */
1553typedef struct IEMCPU
1554{
1555 /** Info status code that needs to be propagated to the IEM caller.
1556 * This cannot be passed internally, as it would complicate all success
1557 * checks within the interpreter making the code larger and almost impossible
1558 * to get right. Instead, we'll store status codes to pass on here. Each
1559 * source of these codes will perform appropriate sanity checks. */
1560 int32_t rcPassUp; /* 0x00 */
1561 /** Execution flag, IEM_F_XXX. */
1562 uint32_t fExec; /* 0x04 */
1563
1564 /** @name Decoder state.
1565 * @{ */
1566#ifdef IEM_WITH_CODE_TLB
1567 /** The offset of the next instruction byte. */
1568 uint32_t offInstrNextByte; /* 0x08 */
1569 /** The number of bytes available at pbInstrBuf for the current instruction.
1570 * This takes the max opcode length into account so that doesn't need to be
1571 * checked separately. */
1572 uint32_t cbInstrBuf; /* 0x0c */
1573 /** Pointer to the page containing RIP, user specified buffer or abOpcode.
1574 * This can be NULL if the page isn't mappable for some reason, in which
1575 * case we'll do fallback stuff.
1576 *
1577 * If we're executing an instruction from a user specified buffer,
1578 * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page
1579 * aligned pointer but pointer to the user data.
1580 *
1581 * For instructions crossing pages, this will start on the first page and be
1582 * advanced to the next page by the time we've decoded the instruction. This
1583 * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt>
1584 */
1585 uint8_t const *pbInstrBuf; /* 0x10 */
1586# if ARCH_BITS == 32
1587 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */
1588# endif
1589 /** The program counter corresponding to pbInstrBuf.
1590 * This is set to a non-canonical address when we need to invalidate it. */
1591 uint64_t uInstrBufPc; /* 0x18 */
1592 /** The guest physical address corresponding to pbInstrBuf. */
1593 RTGCPHYS GCPhysInstrBuf; /* 0x20 */
1594 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots).
1595 * This takes the CS segment limit into account.
1596 * @note Set to zero when the code TLB is flushed to trigger TLB reload. */
1597 uint16_t cbInstrBufTotal; /* 0x28 */
1598 /** Offset into pbInstrBuf of the first byte of the current instruction.
1599 * Can be negative to efficiently handle cross page instructions. */
1600 int16_t offCurInstrStart; /* 0x2a */
1601
1602# ifndef IEM_WITH_OPAQUE_DECODER_STATE
1603 /** The prefix mask (IEM_OP_PRF_XXX). */
1604 uint32_t fPrefixes; /* 0x2c */
1605 /** The extra REX ModR/M register field bit (REX.R << 3). */
1606 uint8_t uRexReg; /* 0x30 */
1607 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1608 * (REX.B << 3). */
1609 uint8_t uRexB; /* 0x31 */
1610 /** The extra REX SIB index field bit (REX.X << 3). */
1611 uint8_t uRexIndex; /* 0x32 */
1612
1613 /** The effective segment register (X86_SREG_XXX). */
1614 uint8_t iEffSeg; /* 0x33 */
1615
1616 /** The offset of the ModR/M byte relative to the start of the instruction. */
1617 uint8_t offModRm; /* 0x34 */
1618
1619# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1620 /** The current offset into abOpcode. */
1621 uint8_t offOpcode; /* 0x35 */
1622# else
1623 uint8_t bUnused; /* 0x35 */
1624# endif
1625# else /* IEM_WITH_OPAQUE_DECODER_STATE */
1626 uint8_t abOpaqueDecoderPart1[0x36 - 0x2c];
1627# endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1628
1629#else /* !IEM_WITH_CODE_TLB */
1630# ifndef IEM_WITH_OPAQUE_DECODER_STATE
1631 /** The size of what has currently been fetched into abOpcode. */
1632 uint8_t cbOpcode; /* 0x08 */
1633 /** The current offset into abOpcode. */
1634 uint8_t offOpcode; /* 0x09 */
1635 /** The offset of the ModR/M byte relative to the start of the instruction. */
1636 uint8_t offModRm; /* 0x0a */
1637
1638 /** The effective segment register (X86_SREG_XXX). */
1639 uint8_t iEffSeg; /* 0x0b */
1640
1641 /** The prefix mask (IEM_OP_PRF_XXX). */
1642 uint32_t fPrefixes; /* 0x0c */
1643 /** The extra REX ModR/M register field bit (REX.R << 3). */
1644 uint8_t uRexReg; /* 0x10 */
1645 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1646 * (REX.B << 3). */
1647 uint8_t uRexB; /* 0x11 */
1648 /** The extra REX SIB index field bit (REX.X << 3). */
1649 uint8_t uRexIndex; /* 0x12 */
1650
1651# else /* IEM_WITH_OPAQUE_DECODER_STATE */
1652 uint8_t abOpaqueDecoderPart1[0x13 - 0x08];
1653# endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1654#endif /* !IEM_WITH_CODE_TLB */
1655
1656#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1657 /** The effective operand mode. */
1658 IEMMODE enmEffOpSize; /* 0x36, 0x13 */
1659 /** The default addressing mode. */
1660 IEMMODE enmDefAddrMode; /* 0x37, 0x14 */
1661 /** The effective addressing mode. */
1662 IEMMODE enmEffAddrMode; /* 0x38, 0x15 */
1663 /** The default operand mode. */
1664 IEMMODE enmDefOpSize; /* 0x39, 0x16 */
1665
1666 /** Prefix index (VEX.pp) for two byte and three byte tables. */
1667 uint8_t idxPrefix; /* 0x3a, 0x17 */
1668 /** 3rd VEX/EVEX/XOP register.
1669 * Please use IEM_GET_EFFECTIVE_VVVV to access. */
1670 uint8_t uVex3rdReg; /* 0x3b, 0x18 */
1671 /** The VEX/EVEX/XOP length field. */
1672 uint8_t uVexLength; /* 0x3c, 0x19 */
1673 /** Additional EVEX stuff. */
1674 uint8_t fEvexStuff; /* 0x3d, 0x1a */
1675
1676# ifndef IEM_WITH_CODE_TLB
1677 /** Explicit alignment padding. */
1678 uint8_t abAlignment2a[1]; /* 0x1b */
1679# endif
1680 /** The FPU opcode (FOP). */
1681 uint16_t uFpuOpcode; /* 0x3e, 0x1c */
1682# ifndef IEM_WITH_CODE_TLB
1683 /** Explicit alignment padding. */
1684 uint8_t abAlignment2b[2]; /* 0x1e */
1685# endif
1686
1687 /** The opcode bytes. */
1688 uint8_t abOpcode[15]; /* 0x40, 0x20 */
1689 /** Explicit alignment padding. */
1690# ifdef IEM_WITH_CODE_TLB
1691 //uint8_t abAlignment2c[0x4f - 0x4f]; /* 0x4f */
1692# else
1693 uint8_t abAlignment2c[0x4f - 0x2f]; /* 0x2f */
1694# endif
1695
1696#else /* IEM_WITH_OPAQUE_DECODER_STATE */
1697# ifdef IEM_WITH_CODE_TLB
1698 uint8_t abOpaqueDecoderPart2[0x4f - 0x36];
1699# else
1700 uint8_t abOpaqueDecoderPart2[0x4f - 0x13];
1701# endif
1702#endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1703 /** @} */
1704
1705
1706 /** The number of active guest memory mappings. */
1707 uint8_t cActiveMappings; /* 0x4f, 0x4f */
1708
1709 /** Records for tracking guest memory mappings. */
1710 struct
1711 {
1712 /** The address of the mapped bytes. */
1713 R3R0PTRTYPE(void *) pv;
1714 /** The access flags (IEM_ACCESS_XXX).
1715 * IEM_ACCESS_INVALID if the entry is unused. */
1716 uint32_t fAccess;
1717#if HC_ARCH_BITS == 64
1718 uint32_t u32Alignment4; /**< Alignment padding. */
1719#endif
1720 } aMemMappings[3]; /* 0x50 LB 0x30 */
1721
1722 /** Locking records for the mapped memory. */
1723 union
1724 {
1725 PGMPAGEMAPLOCK Lock;
1726 uint64_t au64Padding[2];
1727 } aMemMappingLocks[3]; /* 0x80 LB 0x30 */
1728
1729 /** Bounce buffer info.
1730 * This runs in parallel to aMemMappings. */
1731 struct
1732 {
1733 /** The physical address of the first byte. */
1734 RTGCPHYS GCPhysFirst;
1735 /** The physical address of the second page. */
1736 RTGCPHYS GCPhysSecond;
1737 /** The number of bytes in the first page. */
1738 uint16_t cbFirst;
1739 /** The number of bytes in the second page. */
1740 uint16_t cbSecond;
1741 /** Whether it's unassigned memory. */
1742 bool fUnassigned;
1743 /** Explicit alignment padding. */
1744 bool afAlignment5[3];
1745 } aMemBbMappings[3]; /* 0xb0 LB 0x48 */
1746
1747 /** The flags of the current exception / interrupt. */
1748 uint32_t fCurXcpt; /* 0xf8 */
1749 /** The current exception / interrupt. */
1750 uint8_t uCurXcpt; /* 0xfc */
1751 /** Exception / interrupt recursion depth. */
1752 int8_t cXcptRecursions; /* 0xfb */
1753
1754 /** The next unused mapping index.
1755 * @todo try find room for this up with cActiveMappings. */
1756 uint8_t iNextMapping; /* 0xfd */
1757 uint8_t abAlignment7[1];
1758
1759 /** Bounce buffer storage.
1760 * This runs in parallel to aMemMappings and aMemBbMappings. */
1761 struct
1762 {
1763 uint8_t ab[512];
1764 } aBounceBuffers[3]; /* 0x100 LB 0x600 */
1765
1766
1767 /** Pointer set jump buffer - ring-3 context. */
1768 R3PTRTYPE(jmp_buf *) pJmpBufR3;
1769 /** Pointer set jump buffer - ring-0 context. */
1770 R0PTRTYPE(jmp_buf *) pJmpBufR0;
1771
1772 /** @todo Should move this near @a fCurXcpt later. */
1773 /** The CR2 for the current exception / interrupt. */
1774 uint64_t uCurXcptCr2;
1775 /** The error code for the current exception / interrupt. */
1776 uint32_t uCurXcptErr;
1777
1778 /** @name Statistics
1779 * @{ */
1780 /** The number of instructions we've executed. */
1781 uint32_t cInstructions;
1782 /** The number of potential exits. */
1783 uint32_t cPotentialExits;
1784 /** The number of bytes data or stack written (mostly for IEMExecOneEx).
1785 * This may contain uncommitted writes. */
1786 uint32_t cbWritten;
1787 /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
1788 uint32_t cRetInstrNotImplemented;
1789 /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
1790 uint32_t cRetAspectNotImplemented;
1791 /** Counts informational statuses returned (other than VINF_SUCCESS). */
1792 uint32_t cRetInfStatuses;
1793 /** Counts other error statuses returned. */
1794 uint32_t cRetErrStatuses;
1795 /** Number of times rcPassUp has been used. */
1796 uint32_t cRetPassUpStatus;
1797 /** Number of times RZ left with instruction commit pending for ring-3. */
1798 uint32_t cPendingCommit;
1799 /** Number of misaligned (host sense) atomic instruction accesses. */
1800 uint32_t cMisalignedAtomics;
1801 /** Number of long jumps. */
1802 uint32_t cLongJumps;
1803 /** @} */
1804
1805 /** @name Target CPU information.
1806 * @{ */
1807#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1808 /** The target CPU. */
1809 uint8_t uTargetCpu;
1810#else
1811 uint8_t bTargetCpuPadding;
1812#endif
1813 /** For selecting assembly works matching the target CPU EFLAGS behaviour, see
1814 * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no
1815 * native host support and the 2nd for when there is.
1816 *
1817 * The two values are typically indexed by a g_CpumHostFeatures bit.
1818 *
1819 * This is for instance used for the BSF & BSR instructions where AMD and
1820 * Intel CPUs produce different EFLAGS. */
1821 uint8_t aidxTargetCpuEflFlavour[2];
1822
1823 /** The CPU vendor. */
1824 CPUMCPUVENDOR enmCpuVendor;
1825 /** @} */
1826
1827 /** @name Host CPU information.
1828 * @{ */
1829 /** The CPU vendor. */
1830 CPUMCPUVENDOR enmHostCpuVendor;
1831 /** @} */
1832
1833 /** Counts RDMSR \#GP(0) LogRel(). */
1834 uint8_t cLogRelRdMsr;
1835 /** Counts WRMSR \#GP(0) LogRel(). */
1836 uint8_t cLogRelWrMsr;
1837 /** Alignment padding. */
1838 uint8_t abAlignment9[42];
1839
1840 /** @name Recompilation
1841 * @{ */
1842 /** Pointer to the current translation block.
1843 * This can either be one being executed or one being compiled. */
1844 R3PTRTYPE(PIEMTB) pCurTbR3;
1845#ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
1846 /** Frame pointer for the last native TB to execute. */
1847 R3PTRTYPE(void *) pvTbFramePointerR3;
1848#else
1849 R3PTRTYPE(void *) pvUnusedR3;
1850#endif
1851 /** Fixed TB used for threaded recompilation.
1852 * This is allocated once with maxed-out sizes and re-used afterwards. */
1853 R3PTRTYPE(PIEMTB) pThrdCompileTbR3;
1854 /** Pointer to the ring-3 TB cache for this EMT. */
1855 R3PTRTYPE(PIEMTBCACHE) pTbCacheR3;
1856 /** Pointer to the ring-3 TB lookup entry.
1857 * This either points to pTbLookupEntryDummyR3 or an actually lookuptable
1858 * entry, thus it can always safely be used w/o NULL checking. */
1859 R3PTRTYPE(PIEMTB *) ppTbLookupEntryR3;
1860 /** The PC (RIP) at the start of pCurTbR3/pCurTbR0.
1861 * The TBs are based on physical addresses, so this is needed to correleated
1862 * RIP to opcode bytes stored in the TB (AMD-V / VT-x). */
1863 uint64_t uCurTbStartPc;
1864 /** Number of threaded TBs executed. */
1865 uint64_t cTbExecThreaded;
1866 /** Number of native TBs executed. */
1867 uint64_t cTbExecNative;
1868 /** Whether we need to check the opcode bytes for the current instruction.
1869 * This is set by a previous instruction if it modified memory or similar. */
1870 bool fTbCheckOpcodes;
1871 /** Indicates whether and how we just branched - IEMBRANCHED_F_XXX. */
1872 uint8_t fTbBranched;
1873 /** Set when GCPhysInstrBuf is updated because of a page crossing. */
1874 bool fTbCrossedPage;
1875 /** Whether to end the current TB. */
1876 bool fEndTb;
1877 /** Number of instructions before we need emit an IRQ check call again.
1878 * This helps making sure we don't execute too long w/o checking for
1879 * interrupts and immediately following instructions that may enable
1880 * interrupts (e.g. POPF, IRET, STI). With STI an additional hack is
1881 * required to make sure we check following the next instruction as well, see
1882 * fTbCurInstrIsSti. */
1883 uint8_t cInstrTillIrqCheck;
1884 /** Indicates that the current instruction is an STI. This is set by the
1885 * iemCImpl_sti code and subsequently cleared by the recompiler. */
1886 bool fTbCurInstrIsSti;
1887 /** The size of the IEMTB::pabOpcodes allocation in pThrdCompileTbR3. */
1888 uint16_t cbOpcodesAllocated;
1889 /** The current instruction number in a native TB.
1890 * This is set by code that may trigger an unexpected TB exit (throw/longjmp)
1891 * and will be picked up by the TB execution loop. Only used when
1892 * IEMNATIVE_WITH_INSTRUCTION_COUNTING is defined. */
1893 uint8_t idxTbCurInstr;
1894 /** Spaced reserved for recompiler data / alignment. */
1895 bool afRecompilerStuff1[3];
1896 /** The virtual sync time at the last timer poll call. */
1897 uint32_t msRecompilerPollNow;
1898 /** The IEMTB::cUsed value when to attempt native recompilation of a TB. */
1899 uint32_t uTbNativeRecompileAtUsedCount;
1900 /** The IEM_CIMPL_F_XXX mask for the current instruction. */
1901 uint32_t fTbCurInstr;
1902 /** The IEM_CIMPL_F_XXX mask for the previous instruction. */
1903 uint32_t fTbPrevInstr;
1904 /** Strict: Tracking skipped EFLAGS calculations. Any bits set here are
1905 * currently not up to date in EFLAGS. */
1906 uint32_t fSkippingEFlags;
1907 /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set. */
1908 RTGCPHYS GCPhysInstrBufPrev;
1909 /** Pointer to the ring-3 TB allocator for this EMT. */
1910 R3PTRTYPE(PIEMTBALLOCATOR) pTbAllocatorR3;
1911 /** Pointer to the ring-3 executable memory allocator for this EMT. */
1912 R3PTRTYPE(struct IEMEXECMEMALLOCATOR *) pExecMemAllocatorR3;
1913 /** Pointer to the native recompiler state for ring-3. */
1914 R3PTRTYPE(struct IEMRECOMPILERSTATE *) pNativeRecompilerStateR3;
1915 /** Dummy entry for ppTbLookupEntryR3. */
1916 R3PTRTYPE(PIEMTB) pTbLookupEntryDummyR3;
1917
1918 /** Threaded TB statistics: Times TB execution was broken off before reaching the end. */
1919 STAMCOUNTER StatTbThreadedExecBreaks;
1920 /** Statistics: Times BltIn_CheckIrq breaks out of the TB. */
1921 STAMCOUNTER StatCheckIrqBreaks;
1922 /** Statistics: Times BltIn_CheckMode breaks out of the TB. */
1923 STAMCOUNTER StatCheckModeBreaks;
1924 /** Threaded TB statistics: Times execution break on call with lookup entries. */
1925 STAMCOUNTER StatTbThreadedExecBreaksWithLookup;
1926 /** Threaded TB statistics: Times execution break on call without lookup entries. */
1927 STAMCOUNTER StatTbThreadedExecBreaksWithoutLookup;
1928 /** Statistics: Times a post jump target check missed and had to find new TB. */
1929 STAMCOUNTER StatCheckBranchMisses;
1930 /** Statistics: Times a jump or page crossing required a TB with CS.LIM checking. */
1931 STAMCOUNTER StatCheckNeedCsLimChecking;
1932 /** Statistics: Times a loop was detected within a TB.. */
1933 STAMCOUNTER StatTbLoopInTbDetected;
1934 /** Exec memory allocator statistics: Number of times allocaintg executable memory failed. */
1935 STAMCOUNTER StatNativeExecMemInstrBufAllocFailed;
1936 /** Native TB statistics: Number of fully recompiled TBs. */
1937 STAMCOUNTER StatNativeFullyRecompiledTbs;
1938 /** TB statistics: Number of instructions per TB. */
1939 STAMPROFILE StatTbInstr;
1940 /** TB statistics: Number of TB lookup table entries per TB. */
1941 STAMPROFILE StatTbLookupEntries;
1942 /** Threaded TB statistics: Number of calls per TB. */
1943 STAMPROFILE StatTbThreadedCalls;
1944 /** Native TB statistics: Native code size per TB. */
1945 STAMPROFILE StatTbNativeCode;
1946 /** Native TB statistics: Profiling native recompilation. */
1947 STAMPROFILE StatNativeRecompilation;
1948 /** Native TB statistics: Number of calls per TB that were recompiled properly. */
1949 STAMPROFILE StatNativeCallsRecompiled;
1950 /** Native TB statistics: Number of threaded calls per TB that weren't recompiled. */
1951 STAMPROFILE StatNativeCallsThreaded;
1952 /** Native recompiled execution: TLB hits for data fetches. */
1953 STAMCOUNTER StatNativeTlbHitsForFetch;
1954 /** Native recompiled execution: TLB hits for data stores. */
1955 STAMCOUNTER StatNativeTlbHitsForStore;
1956 /** Native recompiled execution: TLB hits for stack accesses. */
1957 STAMCOUNTER StatNativeTlbHitsForStack;
1958 /** Native recompiled execution: TLB hits for mapped accesses. */
1959 STAMCOUNTER StatNativeTlbHitsForMapped;
1960 /** Native recompiled execution: Code TLB misses for new page. */
1961 STAMCOUNTER StatNativeCodeTlbMissesNewPage;
1962 /** Native recompiled execution: Code TLB hits for new page. */
1963 STAMCOUNTER StatNativeCodeTlbHitsForNewPage;
1964 /** Native recompiled execution: Code TLB misses for new page with offset. */
1965 STAMCOUNTER StatNativeCodeTlbMissesNewPageWithOffset;
1966 /** Native recompiled execution: Code TLB hits for new page with offset. */
1967 STAMCOUNTER StatNativeCodeTlbHitsForNewPageWithOffset;
1968
1969 /** Native recompiler: Number of calls to iemNativeRegAllocFindFree. */
1970 STAMCOUNTER StatNativeRegFindFree;
1971 /** Native recompiler: Number of times iemNativeRegAllocFindFree needed
1972 * to free a variable. */
1973 STAMCOUNTER StatNativeRegFindFreeVar;
1974 /** Native recompiler: Number of times iemNativeRegAllocFindFree did
1975 * not need to free any variables. */
1976 STAMCOUNTER StatNativeRegFindFreeNoVar;
1977 /** Native recompiler: Liveness info freed shadowed guest registers in
1978 * iemNativeRegAllocFindFree. */
1979 STAMCOUNTER StatNativeRegFindFreeLivenessUnshadowed;
1980 /** Native recompiler: Liveness info helped with the allocation in
1981 * iemNativeRegAllocFindFree. */
1982 STAMCOUNTER StatNativeRegFindFreeLivenessHelped;
1983
1984 /** Native recompiler: Number of times status flags calc has been skipped. */
1985 STAMCOUNTER StatNativeEflSkippedArithmetic;
1986 /** Native recompiler: Number of times status flags calc has been skipped. */
1987 STAMCOUNTER StatNativeEflSkippedLogical;
1988
1989 /** Native recompiler: Number of opportunities to skip EFLAGS.CF updating. */
1990 STAMCOUNTER StatNativeLivenessEflCfSkippable;
1991 /** Native recompiler: Number of opportunities to skip EFLAGS.PF updating. */
1992 STAMCOUNTER StatNativeLivenessEflPfSkippable;
1993 /** Native recompiler: Number of opportunities to skip EFLAGS.AF updating. */
1994 STAMCOUNTER StatNativeLivenessEflAfSkippable;
1995 /** Native recompiler: Number of opportunities to skip EFLAGS.ZF updating. */
1996 STAMCOUNTER StatNativeLivenessEflZfSkippable;
1997 /** Native recompiler: Number of opportunities to skip EFLAGS.SF updating. */
1998 STAMCOUNTER StatNativeLivenessEflSfSkippable;
1999 /** Native recompiler: Number of opportunities to skip EFLAGS.OF updating. */
2000 STAMCOUNTER StatNativeLivenessEflOfSkippable;
2001 /** Native recompiler: Number of required EFLAGS.CF updates. */
2002 STAMCOUNTER StatNativeLivenessEflCfRequired;
2003 /** Native recompiler: Number of required EFLAGS.PF updates. */
2004 STAMCOUNTER StatNativeLivenessEflPfRequired;
2005 /** Native recompiler: Number of required EFLAGS.AF updates. */
2006 STAMCOUNTER StatNativeLivenessEflAfRequired;
2007 /** Native recompiler: Number of required EFLAGS.ZF updates. */
2008 STAMCOUNTER StatNativeLivenessEflZfRequired;
2009 /** Native recompiler: Number of required EFLAGS.SF updates. */
2010 STAMCOUNTER StatNativeLivenessEflSfRequired;
2011 /** Native recompiler: Number of required EFLAGS.OF updates. */
2012 STAMCOUNTER StatNativeLivenessEflOfRequired;
2013 /** Native recompiler: Number of potentially delayable EFLAGS.CF updates. */
2014 STAMCOUNTER StatNativeLivenessEflCfDelayable;
2015 /** Native recompiler: Number of potentially delayable EFLAGS.PF updates. */
2016 STAMCOUNTER StatNativeLivenessEflPfDelayable;
2017 /** Native recompiler: Number of potentially delayable EFLAGS.AF updates. */
2018 STAMCOUNTER StatNativeLivenessEflAfDelayable;
2019 /** Native recompiler: Number of potentially delayable EFLAGS.ZF updates. */
2020 STAMCOUNTER StatNativeLivenessEflZfDelayable;
2021 /** Native recompiler: Number of potentially delayable EFLAGS.SF updates. */
2022 STAMCOUNTER StatNativeLivenessEflSfDelayable;
2023 /** Native recompiler: Number of potentially delayable EFLAGS.OF updates. */
2024 STAMCOUNTER StatNativeLivenessEflOfDelayable;
2025
2026 /** Native recompiler: Number of potential PC updates in total. */
2027 STAMCOUNTER StatNativePcUpdateTotal;
2028 /** Native recompiler: Number of PC updates which could be delayed. */
2029 STAMCOUNTER StatNativePcUpdateDelayed;
2030
2031//#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
2032 /** Native recompiler: Number of calls to iemNativeSimdRegAllocFindFree. */
2033 STAMCOUNTER StatNativeSimdRegFindFree;
2034 /** Native recompiler: Number of times iemNativeSimdRegAllocFindFree needed
2035 * to free a variable. */
2036 STAMCOUNTER StatNativeSimdRegFindFreeVar;
2037 /** Native recompiler: Number of times iemNativeSimdRegAllocFindFree did
2038 * not need to free any variables. */
2039 STAMCOUNTER StatNativeSimdRegFindFreeNoVar;
2040 /** Native recompiler: Liveness info freed shadowed guest registers in
2041 * iemNativeSimdRegAllocFindFree. */
2042 STAMCOUNTER StatNativeSimdRegFindFreeLivenessUnshadowed;
2043 /** Native recompiler: Liveness info helped with the allocation in
2044 * iemNativeSimdRegAllocFindFree. */
2045 STAMCOUNTER StatNativeSimdRegFindFreeLivenessHelped;
2046
2047 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks. */
2048 STAMCOUNTER StatNativeMaybeDeviceNotAvailXcptCheckPotential;
2049 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks. */
2050 STAMCOUNTER StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential;
2051 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks. */
2052 STAMCOUNTER StatNativeMaybeSseXcptCheckPotential;
2053 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks. */
2054 STAMCOUNTER StatNativeMaybeAvxXcptCheckPotential;
2055
2056 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted. */
2057 STAMCOUNTER StatNativeMaybeDeviceNotAvailXcptCheckOmitted;
2058 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted. */
2059 STAMCOUNTER StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted;
2060 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted. */
2061 STAMCOUNTER StatNativeMaybeSseXcptCheckOmitted;
2062 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted. */
2063 STAMCOUNTER StatNativeMaybeAvxXcptCheckOmitted;
2064//#endif
2065
2066 /** Native recompiler: The TB finished executing completely without jumping to a an exit label.
2067 * Not availabe in release builds. */
2068 STAMCOUNTER StatNativeTbFinished;
2069 /** Native recompiler: The TB finished executing jumping to the ReturnBreak label. */
2070 STAMCOUNTER StatNativeTbExitReturnBreak;
2071 /** Native recompiler: The TB finished executing jumping to the ReturnBreakFF label. */
2072 STAMCOUNTER StatNativeTbExitReturnBreakFF;
2073 /** Native recompiler: The TB finished executing jumping to the ReturnWithFlags label. */
2074 STAMCOUNTER StatNativeTbExitReturnWithFlags;
2075 /** Native recompiler: The TB finished executing with other non-zero status. */
2076 STAMCOUNTER StatNativeTbExitReturnOtherStatus;
2077 /** Native recompiler: The TB finished executing via throw / long jump. */
2078 STAMCOUNTER StatNativeTbExitLongJump;
2079 /** Native recompiler: The TB finished executing jumping to the ReturnBreak
2080 * label, but directly jumped to the next TB, scenario \#1 w/o IRQ checks. */
2081 STAMCOUNTER StatNativeTbExitDirectLinking1NoIrq;
2082 /** Native recompiler: The TB finished executing jumping to the ReturnBreak
2083 * label, but directly jumped to the next TB, scenario \#1 with IRQ checks. */
2084 STAMCOUNTER StatNativeTbExitDirectLinking1Irq;
2085 /** Native recompiler: The TB finished executing jumping to the ReturnBreak
2086 * label, but directly jumped to the next TB, scenario \#1 w/o IRQ checks. */
2087 STAMCOUNTER StatNativeTbExitDirectLinking2NoIrq;
2088 /** Native recompiler: The TB finished executing jumping to the ReturnBreak
2089 * label, but directly jumped to the next TB, scenario \#2 with IRQ checks. */
2090 STAMCOUNTER StatNativeTbExitDirectLinking2Irq;
2091
2092 /** Native recompiler: The TB finished executing jumping to the RaiseDe label. */
2093 STAMCOUNTER StatNativeTbExitRaiseDe;
2094 /** Native recompiler: The TB finished executing jumping to the RaiseUd label. */
2095 STAMCOUNTER StatNativeTbExitRaiseUd;
2096 /** Native recompiler: The TB finished executing jumping to the RaiseSseRelated label. */
2097 STAMCOUNTER StatNativeTbExitRaiseSseRelated;
2098 /** Native recompiler: The TB finished executing jumping to the RaiseAvxRelated label. */
2099 STAMCOUNTER StatNativeTbExitRaiseAvxRelated;
2100 /** Native recompiler: The TB finished executing jumping to the RaiseSseAvxFpRelated label. */
2101 STAMCOUNTER StatNativeTbExitRaiseSseAvxFpRelated;
2102 /** Native recompiler: The TB finished executing jumping to the RaiseNm label. */
2103 STAMCOUNTER StatNativeTbExitRaiseNm;
2104 /** Native recompiler: The TB finished executing jumping to the RaiseGp0 label. */
2105 STAMCOUNTER StatNativeTbExitRaiseGp0;
2106 /** Native recompiler: The TB finished executing jumping to the RaiseMf label. */
2107 STAMCOUNTER StatNativeTbExitRaiseMf;
2108 /** Native recompiler: The TB finished executing jumping to the RaiseXf label. */
2109 STAMCOUNTER StatNativeTbExitRaiseXf;
2110 /** Native recompiler: The TB finished executing jumping to the ObsoleteTb label. */
2111 STAMCOUNTER StatNativeTbExitObsoleteTb;
2112
2113 /** Native recompiler: Failure situations with direct linking scenario \#1.
2114 * Counter with StatNativeTbExitReturnBreak. Not in release builds.
2115 * @{ */
2116 STAMCOUNTER StatNativeTbExitDirectLinking1NoTb;
2117 STAMCOUNTER StatNativeTbExitDirectLinking1MismatchGCPhysPc;
2118 STAMCOUNTER StatNativeTbExitDirectLinking1MismatchFlags;
2119 STAMCOUNTER StatNativeTbExitDirectLinking1PendingIrq;
2120 /** @} */
2121
2122 /** Native recompiler: Failure situations with direct linking scenario \#2.
2123 * Counter with StatNativeTbExitReturnBreak. Not in release builds.
2124 * @{ */
2125 STAMCOUNTER StatNativeTbExitDirectLinking2NoTb;
2126 STAMCOUNTER StatNativeTbExitDirectLinking2MismatchGCPhysPc;
2127 STAMCOUNTER StatNativeTbExitDirectLinking2MismatchFlags;
2128 STAMCOUNTER StatNativeTbExitDirectLinking2PendingIrq;
2129 /** @} */
2130
2131 uint64_t au64Padding[5];
2132 /** @} */
2133
2134 /** Data TLB.
2135 * @remarks Must be 64-byte aligned. */
2136 IEMTLB DataTlb;
2137 /** Instruction TLB.
2138 * @remarks Must be 64-byte aligned. */
2139 IEMTLB CodeTlb;
2140
2141 /** Exception statistics. */
2142 STAMCOUNTER aStatXcpts[32];
2143 /** Interrupt statistics. */
2144 uint32_t aStatInts[256];
2145
2146#if defined(VBOX_WITH_STATISTICS) && !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
2147 /** Instruction statistics for ring-0/raw-mode. */
2148 IEMINSTRSTATS StatsRZ;
2149 /** Instruction statistics for ring-3. */
2150 IEMINSTRSTATS StatsR3;
2151# ifdef VBOX_WITH_IEM_RECOMPILER
2152 /** Statistics per threaded function call.
2153 * Updated by both the threaded and native recompilers. */
2154 uint32_t acThreadedFuncStats[0x6000 /*24576*/];
2155# endif
2156#endif
2157} IEMCPU;
2158AssertCompileMemberOffset(IEMCPU, cActiveMappings, 0x4f);
2159AssertCompileMemberAlignment(IEMCPU, aMemMappings, 16);
2160AssertCompileMemberAlignment(IEMCPU, aMemMappingLocks, 16);
2161AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64);
2162AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
2163AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
2164
2165/** Pointer to the per-CPU IEM state. */
2166typedef IEMCPU *PIEMCPU;
2167/** Pointer to the const per-CPU IEM state. */
2168typedef IEMCPU const *PCIEMCPU;
2169
2170
2171/** @def IEM_GET_CTX
2172 * Gets the guest CPU context for the calling EMT.
2173 * @returns PCPUMCTX
2174 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2175 */
2176#define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
2177
2178/** @def IEM_CTX_ASSERT
2179 * Asserts that the @a a_fExtrnMbz is present in the CPU context.
2180 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2181 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
2182 */
2183#define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) \
2184 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
2185 ("fExtrn=%#RX64 & fExtrnMbz=%#RX64 -> %#RX64\n", \
2186 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz), (a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz) ))
2187
2188/** @def IEM_CTX_IMPORT_RET
2189 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
2190 *
2191 * Will call the keep to import the bits as needed.
2192 *
2193 * Returns on import failure.
2194 *
2195 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2196 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
2197 */
2198#define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \
2199 do { \
2200 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
2201 { /* likely */ } \
2202 else \
2203 { \
2204 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
2205 AssertRCReturn(rcCtxImport, rcCtxImport); \
2206 } \
2207 } while (0)
2208
2209/** @def IEM_CTX_IMPORT_NORET
2210 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
2211 *
2212 * Will call the keep to import the bits as needed.
2213 *
2214 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2215 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
2216 */
2217#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \
2218 do { \
2219 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
2220 { /* likely */ } \
2221 else \
2222 { \
2223 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
2224 AssertLogRelRC(rcCtxImport); \
2225 } \
2226 } while (0)
2227
2228/** @def IEM_CTX_IMPORT_JMP
2229 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
2230 *
2231 * Will call the keep to import the bits as needed.
2232 *
2233 * Jumps on import failure.
2234 *
2235 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2236 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
2237 */
2238#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \
2239 do { \
2240 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
2241 { /* likely */ } \
2242 else \
2243 { \
2244 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
2245 AssertRCStmt(rcCtxImport, IEM_DO_LONGJMP(pVCpu, rcCtxImport)); \
2246 } \
2247 } while (0)
2248
2249
2250
2251/** @def IEM_GET_TARGET_CPU
2252 * Gets the current IEMTARGETCPU value.
2253 * @returns IEMTARGETCPU value.
2254 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2255 */
2256#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
2257# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
2258#else
2259# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
2260#endif
2261
2262/** @def IEM_GET_INSTR_LEN
2263 * Gets the instruction length. */
2264#ifdef IEM_WITH_CODE_TLB
2265# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(int32_t)(a_pVCpu)->iem.s.offCurInstrStart)
2266#else
2267# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offOpcode)
2268#endif
2269
2270/** @def IEM_TRY_SETJMP
2271 * Wrapper around setjmp / try, hiding all the ugly differences.
2272 *
2273 * @note Use with extreme care as this is a fragile macro.
2274 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2275 * @param a_rcTarget The variable that should receive the status code in case
2276 * of a longjmp/throw.
2277 */
2278/** @def IEM_TRY_SETJMP_AGAIN
2279 * For when setjmp / try is used again in the same variable scope as a previous
2280 * IEM_TRY_SETJMP invocation.
2281 */
2282/** @def IEM_CATCH_LONGJMP_BEGIN
2283 * Start wrapper for catch / setjmp-else.
2284 *
2285 * This will set up a scope.
2286 *
2287 * @note Use with extreme care as this is a fragile macro.
2288 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2289 * @param a_rcTarget The variable that should receive the status code in case
2290 * of a longjmp/throw.
2291 */
2292/** @def IEM_CATCH_LONGJMP_END
2293 * End wrapper for catch / setjmp-else.
2294 *
2295 * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
2296 * state.
2297 *
2298 * @note Use with extreme care as this is a fragile macro.
2299 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2300 */
2301#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
2302# ifdef IEM_WITH_THROW_CATCH
2303# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
2304 a_rcTarget = VINF_SUCCESS; \
2305 try
2306# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
2307 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
2308# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
2309 catch (int rcThrown) \
2310 { \
2311 a_rcTarget = rcThrown
2312# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
2313 } \
2314 ((void)0)
2315# else /* !IEM_WITH_THROW_CATCH */
2316# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
2317 jmp_buf JmpBuf; \
2318 jmp_buf * volatile pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
2319 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
2320 if ((rcStrict = setjmp(JmpBuf)) == 0)
2321# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
2322 pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
2323 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
2324 if ((rcStrict = setjmp(JmpBuf)) == 0)
2325# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
2326 else \
2327 { \
2328 ((void)0)
2329# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
2330 } \
2331 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
2332# endif /* !IEM_WITH_THROW_CATCH */
2333#endif /* IEM_WITH_SETJMP */
2334
2335
2336/**
2337 * Shared per-VM IEM data.
2338 */
2339typedef struct IEM
2340{
2341 /** The VMX APIC-access page handler type. */
2342 PGMPHYSHANDLERTYPE hVmxApicAccessPage;
2343#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
2344 /** Set if the CPUID host call functionality is enabled. */
2345 bool fCpuIdHostCall;
2346#endif
2347} IEM;
2348
2349
2350
2351/** @name IEM_ACCESS_XXX - Access details.
2352 * @{ */
2353#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
2354#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
2355#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
2356#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
2357#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
2358#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
2359#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
2360#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
2361#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
2362#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
2363/** The writes are partial, so if initialize the bounce buffer with the
2364 * orignal RAM content. */
2365#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
2366/** Used in aMemMappings to indicate that the entry is bounce buffered. */
2367#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
2368/** Bounce buffer with ring-3 write pending, first page. */
2369#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
2370/** Bounce buffer with ring-3 write pending, second page. */
2371#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
2372/** Not locked, accessed via the TLB. */
2373#define IEM_ACCESS_NOT_LOCKED UINT32_C(0x00001000)
2374/** Atomic access.
2375 * This enables special alignment checks and the VINF_EM_EMULATE_SPLIT_LOCK
2376 * fallback for misaligned stuff. See @bugref{10547}. */
2377#define IEM_ACCESS_ATOMIC UINT32_C(0x00002000)
2378/** Valid bit mask. */
2379#define IEM_ACCESS_VALID_MASK UINT32_C(0x00003fff)
2380/** Shift count for the TLB flags (upper word). */
2381#define IEM_ACCESS_SHIFT_TLB_FLAGS 16
2382
2383/** Atomic read+write data alias. */
2384#define IEM_ACCESS_DATA_ATOMIC (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA | IEM_ACCESS_ATOMIC)
2385/** Read+write data alias. */
2386#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
2387/** Write data alias. */
2388#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
2389/** Read data alias. */
2390#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
2391/** Instruction fetch alias. */
2392#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
2393/** Stack write alias. */
2394#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
2395/** Stack read alias. */
2396#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
2397/** Stack read+write alias. */
2398#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
2399/** Read system table alias. */
2400#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
2401/** Read+write system table alias. */
2402#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
2403/** @} */
2404
2405/** @name Prefix constants (IEMCPU::fPrefixes)
2406 * @{ */
2407#define IEM_OP_PRF_SEG_CS RT_BIT_32(0) /**< CS segment prefix (0x2e). */
2408#define IEM_OP_PRF_SEG_SS RT_BIT_32(1) /**< SS segment prefix (0x36). */
2409#define IEM_OP_PRF_SEG_DS RT_BIT_32(2) /**< DS segment prefix (0x3e). */
2410#define IEM_OP_PRF_SEG_ES RT_BIT_32(3) /**< ES segment prefix (0x26). */
2411#define IEM_OP_PRF_SEG_FS RT_BIT_32(4) /**< FS segment prefix (0x64). */
2412#define IEM_OP_PRF_SEG_GS RT_BIT_32(5) /**< GS segment prefix (0x65). */
2413#define IEM_OP_PRF_SEG_MASK UINT32_C(0x3f)
2414
2415#define IEM_OP_PRF_SIZE_OP RT_BIT_32(8) /**< Operand size prefix (0x66). */
2416#define IEM_OP_PRF_SIZE_REX_W RT_BIT_32(9) /**< REX.W prefix (0x48-0x4f). */
2417#define IEM_OP_PRF_SIZE_ADDR RT_BIT_32(10) /**< Address size prefix (0x67). */
2418
2419#define IEM_OP_PRF_LOCK RT_BIT_32(16) /**< Lock prefix (0xf0). */
2420#define IEM_OP_PRF_REPNZ RT_BIT_32(17) /**< Repeat-not-zero prefix (0xf2). */
2421#define IEM_OP_PRF_REPZ RT_BIT_32(18) /**< Repeat-if-zero prefix (0xf3). */
2422
2423#define IEM_OP_PRF_REX RT_BIT_32(24) /**< Any REX prefix (0x40-0x4f). */
2424#define IEM_OP_PRF_REX_B RT_BIT_32(25) /**< REX.B prefix (0x41,0x43,0x45,0x47,0x49,0x4b,0x4d,0x4f). */
2425#define IEM_OP_PRF_REX_X RT_BIT_32(26) /**< REX.X prefix (0x42,0x43,0x46,0x47,0x4a,0x4b,0x4e,0x4f). */
2426#define IEM_OP_PRF_REX_R RT_BIT_32(27) /**< REX.R prefix (0x44,0x45,0x46,0x47,0x4c,0x4d,0x4e,0x4f). */
2427/** Mask with all the REX prefix flags.
2428 * This is generally for use when needing to undo the REX prefixes when they
2429 * are followed legacy prefixes and therefore does not immediately preceed
2430 * the first opcode byte.
2431 * For testing whether any REX prefix is present, use IEM_OP_PRF_REX instead. */
2432#define IEM_OP_PRF_REX_MASK (IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W )
2433
2434#define IEM_OP_PRF_VEX RT_BIT_32(28) /**< Indiciates VEX prefix. */
2435#define IEM_OP_PRF_EVEX RT_BIT_32(29) /**< Indiciates EVEX prefix. */
2436#define IEM_OP_PRF_XOP RT_BIT_32(30) /**< Indiciates XOP prefix. */
2437/** @} */
2438
2439/** @name IEMOPFORM_XXX - Opcode forms
2440 * @note These are ORed together with IEMOPHINT_XXX.
2441 * @{ */
2442/** ModR/M: reg, r/m */
2443#define IEMOPFORM_RM 0
2444/** ModR/M: reg, r/m (register) */
2445#define IEMOPFORM_RM_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
2446/** ModR/M: reg, r/m (memory) */
2447#define IEMOPFORM_RM_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
2448/** ModR/M: reg, r/m, imm */
2449#define IEMOPFORM_RMI 1
2450/** ModR/M: reg, r/m (register), imm */
2451#define IEMOPFORM_RMI_REG (IEMOPFORM_RMI | IEMOPFORM_MOD3)
2452/** ModR/M: reg, r/m (memory), imm */
2453#define IEMOPFORM_RMI_MEM (IEMOPFORM_RMI | IEMOPFORM_NOT_MOD3)
2454/** ModR/M: reg, r/m, xmm0 */
2455#define IEMOPFORM_RM0 2
2456/** ModR/M: reg, r/m (register), xmm0 */
2457#define IEMOPFORM_RM0_REG (IEMOPFORM_RM0 | IEMOPFORM_MOD3)
2458/** ModR/M: reg, r/m (memory), xmm0 */
2459#define IEMOPFORM_RM0_MEM (IEMOPFORM_RM0 | IEMOPFORM_NOT_MOD3)
2460/** ModR/M: r/m, reg */
2461#define IEMOPFORM_MR 3
2462/** ModR/M: r/m (register), reg */
2463#define IEMOPFORM_MR_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
2464/** ModR/M: r/m (memory), reg */
2465#define IEMOPFORM_MR_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
2466/** ModR/M: r/m, reg, imm */
2467#define IEMOPFORM_MRI 4
2468/** ModR/M: r/m (register), reg, imm */
2469#define IEMOPFORM_MRI_REG (IEMOPFORM_MRI | IEMOPFORM_MOD3)
2470/** ModR/M: r/m (memory), reg, imm */
2471#define IEMOPFORM_MRI_MEM (IEMOPFORM_MRI | IEMOPFORM_NOT_MOD3)
2472/** ModR/M: r/m only */
2473#define IEMOPFORM_M 5
2474/** ModR/M: r/m only (register). */
2475#define IEMOPFORM_M_REG (IEMOPFORM_M | IEMOPFORM_MOD3)
2476/** ModR/M: r/m only (memory). */
2477#define IEMOPFORM_M_MEM (IEMOPFORM_M | IEMOPFORM_NOT_MOD3)
2478/** ModR/M: r/m, imm */
2479#define IEMOPFORM_MI 6
2480/** ModR/M: r/m (register), imm */
2481#define IEMOPFORM_MI_REG (IEMOPFORM_MI | IEMOPFORM_MOD3)
2482/** ModR/M: r/m (memory), imm */
2483#define IEMOPFORM_MI_MEM (IEMOPFORM_MI | IEMOPFORM_NOT_MOD3)
2484/** ModR/M: r/m, 1 (shift and rotate instructions) */
2485#define IEMOPFORM_M1 7
2486/** ModR/M: r/m (register), 1. */
2487#define IEMOPFORM_M1_REG (IEMOPFORM_M1 | IEMOPFORM_MOD3)
2488/** ModR/M: r/m (memory), 1. */
2489#define IEMOPFORM_M1_MEM (IEMOPFORM_M1 | IEMOPFORM_NOT_MOD3)
2490/** ModR/M: r/m, CL (shift and rotate instructions)
2491 * @todo This should just've been a generic fixed register. But the python
2492 * code doesn't needs more convincing. */
2493#define IEMOPFORM_M_CL 8
2494/** ModR/M: r/m (register), CL. */
2495#define IEMOPFORM_M_CL_REG (IEMOPFORM_M_CL | IEMOPFORM_MOD3)
2496/** ModR/M: r/m (memory), CL. */
2497#define IEMOPFORM_M_CL_MEM (IEMOPFORM_M_CL | IEMOPFORM_NOT_MOD3)
2498/** ModR/M: reg only */
2499#define IEMOPFORM_R 9
2500
2501/** VEX+ModR/M: reg, r/m */
2502#define IEMOPFORM_VEX_RM 16
2503/** VEX+ModR/M: reg, r/m (register) */
2504#define IEMOPFORM_VEX_RM_REG (IEMOPFORM_VEX_RM | IEMOPFORM_MOD3)
2505/** VEX+ModR/M: reg, r/m (memory) */
2506#define IEMOPFORM_VEX_RM_MEM (IEMOPFORM_VEX_RM | IEMOPFORM_NOT_MOD3)
2507/** VEX+ModR/M: r/m, reg */
2508#define IEMOPFORM_VEX_MR 17
2509/** VEX+ModR/M: r/m (register), reg */
2510#define IEMOPFORM_VEX_MR_REG (IEMOPFORM_VEX_MR | IEMOPFORM_MOD3)
2511/** VEX+ModR/M: r/m (memory), reg */
2512#define IEMOPFORM_VEX_MR_MEM (IEMOPFORM_VEX_MR | IEMOPFORM_NOT_MOD3)
2513/** VEX+ModR/M: r/m, reg, imm8 */
2514#define IEMOPFORM_VEX_MRI 18
2515/** VEX+ModR/M: r/m (register), reg, imm8 */
2516#define IEMOPFORM_VEX_MRI_REG (IEMOPFORM_VEX_MRI | IEMOPFORM_MOD3)
2517/** VEX+ModR/M: r/m (memory), reg, imm8 */
2518#define IEMOPFORM_VEX_MRI_MEM (IEMOPFORM_VEX_MRI | IEMOPFORM_NOT_MOD3)
2519/** VEX+ModR/M: r/m only */
2520#define IEMOPFORM_VEX_M 19
2521/** VEX+ModR/M: r/m only (register). */
2522#define IEMOPFORM_VEX_M_REG (IEMOPFORM_VEX_M | IEMOPFORM_MOD3)
2523/** VEX+ModR/M: r/m only (memory). */
2524#define IEMOPFORM_VEX_M_MEM (IEMOPFORM_VEX_M | IEMOPFORM_NOT_MOD3)
2525/** VEX+ModR/M: reg only */
2526#define IEMOPFORM_VEX_R 20
2527/** VEX+ModR/M: reg, vvvv, r/m */
2528#define IEMOPFORM_VEX_RVM 21
2529/** VEX+ModR/M: reg, vvvv, r/m (register). */
2530#define IEMOPFORM_VEX_RVM_REG (IEMOPFORM_VEX_RVM | IEMOPFORM_MOD3)
2531/** VEX+ModR/M: reg, vvvv, r/m (memory). */
2532#define IEMOPFORM_VEX_RVM_MEM (IEMOPFORM_VEX_RVM | IEMOPFORM_NOT_MOD3)
2533/** VEX+ModR/M: reg, vvvv, r/m, imm */
2534#define IEMOPFORM_VEX_RVMI 22
2535/** VEX+ModR/M: reg, vvvv, r/m (register), imm. */
2536#define IEMOPFORM_VEX_RVMI_REG (IEMOPFORM_VEX_RVMI | IEMOPFORM_MOD3)
2537/** VEX+ModR/M: reg, vvvv, r/m (memory), imm. */
2538#define IEMOPFORM_VEX_RVMI_MEM (IEMOPFORM_VEX_RVMI | IEMOPFORM_NOT_MOD3)
2539/** VEX+ModR/M: reg, vvvv, r/m, imm(reg) */
2540#define IEMOPFORM_VEX_RVMR 23
2541/** VEX+ModR/M: reg, vvvv, r/m (register), imm(reg). */
2542#define IEMOPFORM_VEX_RVMR_REG (IEMOPFORM_VEX_RVMI | IEMOPFORM_MOD3)
2543/** VEX+ModR/M: reg, vvvv, r/m (memory), imm(reg). */
2544#define IEMOPFORM_VEX_RVMR_MEM (IEMOPFORM_VEX_RVMI | IEMOPFORM_NOT_MOD3)
2545/** VEX+ModR/M: reg, r/m, vvvv */
2546#define IEMOPFORM_VEX_RMV 24
2547/** VEX+ModR/M: reg, r/m, vvvv (register). */
2548#define IEMOPFORM_VEX_RMV_REG (IEMOPFORM_VEX_RMV | IEMOPFORM_MOD3)
2549/** VEX+ModR/M: reg, r/m, vvvv (memory). */
2550#define IEMOPFORM_VEX_RMV_MEM (IEMOPFORM_VEX_RMV | IEMOPFORM_NOT_MOD3)
2551/** VEX+ModR/M: reg, r/m, imm8 */
2552#define IEMOPFORM_VEX_RMI 25
2553/** VEX+ModR/M: reg, r/m, imm8 (register). */
2554#define IEMOPFORM_VEX_RMI_REG (IEMOPFORM_VEX_RMI | IEMOPFORM_MOD3)
2555/** VEX+ModR/M: reg, r/m, imm8 (memory). */
2556#define IEMOPFORM_VEX_RMI_MEM (IEMOPFORM_VEX_RMI | IEMOPFORM_NOT_MOD3)
2557/** VEX+ModR/M: r/m, vvvv, reg */
2558#define IEMOPFORM_VEX_MVR 26
2559/** VEX+ModR/M: r/m, vvvv, reg (register) */
2560#define IEMOPFORM_VEX_MVR_REG (IEMOPFORM_VEX_MVR | IEMOPFORM_MOD3)
2561/** VEX+ModR/M: r/m, vvvv, reg (memory) */
2562#define IEMOPFORM_VEX_MVR_MEM (IEMOPFORM_VEX_MVR | IEMOPFORM_NOT_MOD3)
2563/** VEX+ModR/M+/n: vvvv, r/m */
2564#define IEMOPFORM_VEX_VM 27
2565/** VEX+ModR/M+/n: vvvv, r/m (register) */
2566#define IEMOPFORM_VEX_VM_REG (IEMOPFORM_VEX_VM | IEMOPFORM_MOD3)
2567/** VEX+ModR/M+/n: vvvv, r/m (memory) */
2568#define IEMOPFORM_VEX_VM_MEM (IEMOPFORM_VEX_VM | IEMOPFORM_NOT_MOD3)
2569/** VEX+ModR/M+/n: vvvv, r/m, imm8 */
2570#define IEMOPFORM_VEX_VMI 28
2571/** VEX+ModR/M+/n: vvvv, r/m, imm8 (register) */
2572#define IEMOPFORM_VEX_VMI_REG (IEMOPFORM_VEX_VMI | IEMOPFORM_MOD3)
2573/** VEX+ModR/M+/n: vvvv, r/m, imm8 (memory) */
2574#define IEMOPFORM_VEX_VMI_MEM (IEMOPFORM_VEX_VMI | IEMOPFORM_NOT_MOD3)
2575
2576/** Fixed register instruction, no R/M. */
2577#define IEMOPFORM_FIXED 32
2578
2579/** The r/m is a register. */
2580#define IEMOPFORM_MOD3 RT_BIT_32(8)
2581/** The r/m is a memory access. */
2582#define IEMOPFORM_NOT_MOD3 RT_BIT_32(9)
2583/** @} */
2584
2585/** @name IEMOPHINT_XXX - Additional Opcode Hints
2586 * @note These are ORed together with IEMOPFORM_XXX.
2587 * @{ */
2588/** Ignores the operand size prefix (66h). */
2589#define IEMOPHINT_IGNORES_OZ_PFX RT_BIT_32(10)
2590/** Ignores REX.W (aka WIG). */
2591#define IEMOPHINT_IGNORES_REXW RT_BIT_32(11)
2592/** Both the operand size prefixes (66h + REX.W) are ignored. */
2593#define IEMOPHINT_IGNORES_OP_SIZES (IEMOPHINT_IGNORES_OZ_PFX | IEMOPHINT_IGNORES_REXW)
2594/** Allowed with the lock prefix. */
2595#define IEMOPHINT_LOCK_ALLOWED RT_BIT_32(11)
2596/** The VEX.L value is ignored (aka LIG). */
2597#define IEMOPHINT_VEX_L_IGNORED RT_BIT_32(12)
2598/** The VEX.L value must be zero (i.e. 128-bit width only). */
2599#define IEMOPHINT_VEX_L_ZERO RT_BIT_32(13)
2600/** The VEX.L value must be one (i.e. 256-bit width only). */
2601#define IEMOPHINT_VEX_L_ONE RT_BIT_32(14)
2602/** The VEX.V value must be zero. */
2603#define IEMOPHINT_VEX_V_ZERO RT_BIT_32(15)
2604/** The REX.W/VEX.V value must be zero. */
2605#define IEMOPHINT_REX_W_ZERO RT_BIT_32(16)
2606#define IEMOPHINT_VEX_W_ZERO IEMOPHINT_REX_W_ZERO
2607/** The REX.W/VEX.V value must be one. */
2608#define IEMOPHINT_REX_W_ONE RT_BIT_32(17)
2609#define IEMOPHINT_VEX_W_ONE IEMOPHINT_REX_W_ONE
2610
2611/** Hint to IEMAllInstructionPython.py that this macro should be skipped. */
2612#define IEMOPHINT_SKIP_PYTHON RT_BIT_32(31)
2613/** @} */
2614
2615/**
2616 * Possible hardware task switch sources.
2617 */
2618typedef enum IEMTASKSWITCH
2619{
2620 /** Task switch caused by an interrupt/exception. */
2621 IEMTASKSWITCH_INT_XCPT = 1,
2622 /** Task switch caused by a far CALL. */
2623 IEMTASKSWITCH_CALL,
2624 /** Task switch caused by a far JMP. */
2625 IEMTASKSWITCH_JUMP,
2626 /** Task switch caused by an IRET. */
2627 IEMTASKSWITCH_IRET
2628} IEMTASKSWITCH;
2629AssertCompileSize(IEMTASKSWITCH, 4);
2630
2631/**
2632 * Possible CrX load (write) sources.
2633 */
2634typedef enum IEMACCESSCRX
2635{
2636 /** CrX access caused by 'mov crX' instruction. */
2637 IEMACCESSCRX_MOV_CRX,
2638 /** CrX (CR0) write caused by 'lmsw' instruction. */
2639 IEMACCESSCRX_LMSW,
2640 /** CrX (CR0) write caused by 'clts' instruction. */
2641 IEMACCESSCRX_CLTS,
2642 /** CrX (CR0) read caused by 'smsw' instruction. */
2643 IEMACCESSCRX_SMSW
2644} IEMACCESSCRX;
2645
2646#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2647/** @name IEM_SLAT_FAIL_XXX - Second-level address translation failure information.
2648 *
2649 * These flags provide further context to SLAT page-walk failures that could not be
2650 * determined by PGM (e.g, PGM is not privy to memory access permissions).
2651 *
2652 * @{
2653 */
2654/** Translating a nested-guest linear address failed accessing a nested-guest
2655 * physical address. */
2656# define IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR RT_BIT_32(0)
2657/** Translating a nested-guest linear address failed accessing a
2658 * paging-structure entry or updating accessed/dirty bits. */
2659# define IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE RT_BIT_32(1)
2660/** @} */
2661
2662DECLCALLBACK(FNPGMPHYSHANDLER) iemVmxApicAccessPageHandler;
2663# ifndef IN_RING3
2664DECLCALLBACK(FNPGMRZPHYSPFHANDLER) iemVmxApicAccessPagePfHandler;
2665# endif
2666#endif
2667
2668/**
2669 * Indicates to the verifier that the given flag set is undefined.
2670 *
2671 * Can be invoked again to add more flags.
2672 *
2673 * This is a NOOP if the verifier isn't compiled in.
2674 *
2675 * @note We're temporarily keeping this until code is converted to new
2676 * disassembler style opcode handling.
2677 */
2678#define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0)
2679
2680
2681/** @def IEM_DECL_IMPL_TYPE
2682 * For typedef'ing an instruction implementation function.
2683 *
2684 * @param a_RetType The return type.
2685 * @param a_Name The name of the type.
2686 * @param a_ArgList The argument list enclosed in parentheses.
2687 */
2688
2689/** @def IEM_DECL_IMPL_DEF
2690 * For defining an instruction implementation function.
2691 *
2692 * @param a_RetType The return type.
2693 * @param a_Name The name of the type.
2694 * @param a_ArgList The argument list enclosed in parentheses.
2695 */
2696
2697#if defined(__GNUC__) && defined(RT_ARCH_X86)
2698# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2699 __attribute__((__fastcall__)) a_RetType (a_Name) a_ArgList
2700# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2701 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2702# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2703 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2704
2705#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
2706# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2707 a_RetType (__fastcall a_Name) a_ArgList
2708# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2709 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2710# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2711 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2712
2713#elif __cplusplus >= 201700 /* P0012R1 support */
2714# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2715 a_RetType (VBOXCALL a_Name) a_ArgList RT_NOEXCEPT
2716# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2717 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2718# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2719 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2720
2721#else
2722# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2723 a_RetType (VBOXCALL a_Name) a_ArgList
2724# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2725 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2726# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2727 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2728
2729#endif
2730
2731/** Defined in IEMAllAImplC.cpp but also used by IEMAllAImplA.asm. */
2732RT_C_DECLS_BEGIN
2733extern uint8_t const g_afParity[256];
2734RT_C_DECLS_END
2735
2736
2737/** @name Arithmetic assignment operations on bytes (binary).
2738 * @{ */
2739typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU8, (uint32_t fEFlagsIn, uint8_t *pu8Dst, uint8_t u8Src));
2740typedef FNIEMAIMPLBINU8 *PFNIEMAIMPLBINU8;
2741FNIEMAIMPLBINU8 iemAImpl_add_u8, iemAImpl_add_u8_locked;
2742FNIEMAIMPLBINU8 iemAImpl_adc_u8, iemAImpl_adc_u8_locked;
2743FNIEMAIMPLBINU8 iemAImpl_sub_u8, iemAImpl_sub_u8_locked;
2744FNIEMAIMPLBINU8 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked;
2745FNIEMAIMPLBINU8 iemAImpl_or_u8, iemAImpl_or_u8_locked;
2746FNIEMAIMPLBINU8 iemAImpl_xor_u8, iemAImpl_xor_u8_locked;
2747FNIEMAIMPLBINU8 iemAImpl_and_u8, iemAImpl_and_u8_locked;
2748/** @} */
2749
2750/** @name Arithmetic assignment operations on words (binary).
2751 * @{ */
2752typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU16, (uint32_t fEFlagsIn, uint16_t *pu16Dst, uint16_t u16Src));
2753typedef FNIEMAIMPLBINU16 *PFNIEMAIMPLBINU16;
2754FNIEMAIMPLBINU16 iemAImpl_add_u16, iemAImpl_add_u16_locked;
2755FNIEMAIMPLBINU16 iemAImpl_adc_u16, iemAImpl_adc_u16_locked;
2756FNIEMAIMPLBINU16 iemAImpl_sub_u16, iemAImpl_sub_u16_locked;
2757FNIEMAIMPLBINU16 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked;
2758FNIEMAIMPLBINU16 iemAImpl_or_u16, iemAImpl_or_u16_locked;
2759FNIEMAIMPLBINU16 iemAImpl_xor_u16, iemAImpl_xor_u16_locked;
2760FNIEMAIMPLBINU16 iemAImpl_and_u16, iemAImpl_and_u16_locked;
2761/** @} */
2762
2763
2764/** @name Arithmetic assignment operations on double words (binary).
2765 * @{ */
2766typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU32, (uint32_t fEFlagsIn, uint32_t *pu32Dst, uint32_t u32Src));
2767typedef FNIEMAIMPLBINU32 *PFNIEMAIMPLBINU32;
2768FNIEMAIMPLBINU32 iemAImpl_add_u32, iemAImpl_add_u32_locked;
2769FNIEMAIMPLBINU32 iemAImpl_adc_u32, iemAImpl_adc_u32_locked;
2770FNIEMAIMPLBINU32 iemAImpl_sub_u32, iemAImpl_sub_u32_locked;
2771FNIEMAIMPLBINU32 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked;
2772FNIEMAIMPLBINU32 iemAImpl_or_u32, iemAImpl_or_u32_locked;
2773FNIEMAIMPLBINU32 iemAImpl_xor_u32, iemAImpl_xor_u32_locked;
2774FNIEMAIMPLBINU32 iemAImpl_and_u32, iemAImpl_and_u32_locked;
2775FNIEMAIMPLBINU32 iemAImpl_blsi_u32, iemAImpl_blsi_u32_fallback;
2776FNIEMAIMPLBINU32 iemAImpl_blsr_u32, iemAImpl_blsr_u32_fallback;
2777FNIEMAIMPLBINU32 iemAImpl_blsmsk_u32, iemAImpl_blsmsk_u32_fallback;
2778/** @} */
2779
2780/** @name Arithmetic assignment operations on quad words (binary).
2781 * @{ */
2782typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINU64, (uint32_t fEFlagsIn, uint64_t *pu64Dst, uint64_t u64Src));
2783typedef FNIEMAIMPLBINU64 *PFNIEMAIMPLBINU64;
2784FNIEMAIMPLBINU64 iemAImpl_add_u64, iemAImpl_add_u64_locked;
2785FNIEMAIMPLBINU64 iemAImpl_adc_u64, iemAImpl_adc_u64_locked;
2786FNIEMAIMPLBINU64 iemAImpl_sub_u64, iemAImpl_sub_u64_locked;
2787FNIEMAIMPLBINU64 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked;
2788FNIEMAIMPLBINU64 iemAImpl_or_u64, iemAImpl_or_u64_locked;
2789FNIEMAIMPLBINU64 iemAImpl_xor_u64, iemAImpl_xor_u64_locked;
2790FNIEMAIMPLBINU64 iemAImpl_and_u64, iemAImpl_and_u64_locked;
2791FNIEMAIMPLBINU64 iemAImpl_blsi_u64, iemAImpl_blsi_u64_fallback;
2792FNIEMAIMPLBINU64 iemAImpl_blsr_u64, iemAImpl_blsr_u64_fallback;
2793FNIEMAIMPLBINU64 iemAImpl_blsmsk_u64, iemAImpl_blsmsk_u64_fallback;
2794/** @} */
2795
2796typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU8, (uint32_t fEFlagsIn, uint8_t const *pu8Dst, uint8_t u8Src));
2797typedef FNIEMAIMPLBINROU8 *PFNIEMAIMPLBINROU8;
2798typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU16,(uint32_t fEFlagsIn, uint16_t const *pu16Dst, uint16_t u16Src));
2799typedef FNIEMAIMPLBINROU16 *PFNIEMAIMPLBINROU16;
2800typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU32,(uint32_t fEFlagsIn, uint32_t const *pu32Dst, uint32_t u32Src));
2801typedef FNIEMAIMPLBINROU32 *PFNIEMAIMPLBINROU32;
2802typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLBINROU64,(uint32_t fEFlagsIn, uint64_t const *pu64Dst, uint64_t u64Src));
2803typedef FNIEMAIMPLBINROU64 *PFNIEMAIMPLBINROU64;
2804
2805/** @name Compare operations (thrown in with the binary ops).
2806 * @{ */
2807FNIEMAIMPLBINROU8 iemAImpl_cmp_u8;
2808FNIEMAIMPLBINROU16 iemAImpl_cmp_u16;
2809FNIEMAIMPLBINROU32 iemAImpl_cmp_u32;
2810FNIEMAIMPLBINROU64 iemAImpl_cmp_u64;
2811/** @} */
2812
2813/** @name Test operations (thrown in with the binary ops).
2814 * @{ */
2815FNIEMAIMPLBINROU8 iemAImpl_test_u8;
2816FNIEMAIMPLBINROU16 iemAImpl_test_u16;
2817FNIEMAIMPLBINROU32 iemAImpl_test_u32;
2818FNIEMAIMPLBINROU64 iemAImpl_test_u64;
2819/** @} */
2820
2821/** @name Bit operations operations (thrown in with the binary ops).
2822 * @{ */
2823FNIEMAIMPLBINROU16 iemAImpl_bt_u16;
2824FNIEMAIMPLBINROU32 iemAImpl_bt_u32;
2825FNIEMAIMPLBINROU64 iemAImpl_bt_u64;
2826FNIEMAIMPLBINU16 iemAImpl_btc_u16, iemAImpl_btc_u16_locked;
2827FNIEMAIMPLBINU32 iemAImpl_btc_u32, iemAImpl_btc_u32_locked;
2828FNIEMAIMPLBINU64 iemAImpl_btc_u64, iemAImpl_btc_u64_locked;
2829FNIEMAIMPLBINU16 iemAImpl_btr_u16, iemAImpl_btr_u16_locked;
2830FNIEMAIMPLBINU32 iemAImpl_btr_u32, iemAImpl_btr_u32_locked;
2831FNIEMAIMPLBINU64 iemAImpl_btr_u64, iemAImpl_btr_u64_locked;
2832FNIEMAIMPLBINU16 iemAImpl_bts_u16, iemAImpl_bts_u16_locked;
2833FNIEMAIMPLBINU32 iemAImpl_bts_u32, iemAImpl_bts_u32_locked;
2834FNIEMAIMPLBINU64 iemAImpl_bts_u64, iemAImpl_bts_u64_locked;
2835/** @} */
2836
2837/** @name Arithmetic three operand operations on double words (binary).
2838 * @{ */
2839typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2, uint32_t *pEFlags));
2840typedef FNIEMAIMPLBINVEXU32 *PFNIEMAIMPLBINVEXU32;
2841FNIEMAIMPLBINVEXU32 iemAImpl_andn_u32, iemAImpl_andn_u32_fallback;
2842FNIEMAIMPLBINVEXU32 iemAImpl_bextr_u32, iemAImpl_bextr_u32_fallback;
2843FNIEMAIMPLBINVEXU32 iemAImpl_bzhi_u32, iemAImpl_bzhi_u32_fallback;
2844/** @} */
2845
2846/** @name Arithmetic three operand operations on quad words (binary).
2847 * @{ */
2848typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2, uint32_t *pEFlags));
2849typedef FNIEMAIMPLBINVEXU64 *PFNIEMAIMPLBINVEXU64;
2850FNIEMAIMPLBINVEXU64 iemAImpl_andn_u64, iemAImpl_andn_u64_fallback;
2851FNIEMAIMPLBINVEXU64 iemAImpl_bextr_u64, iemAImpl_bextr_u64_fallback;
2852FNIEMAIMPLBINVEXU64 iemAImpl_bzhi_u64, iemAImpl_bzhi_u64_fallback;
2853/** @} */
2854
2855/** @name Arithmetic three operand operations on double words w/o EFLAGS (binary).
2856 * @{ */
2857typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32NOEFL, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2));
2858typedef FNIEMAIMPLBINVEXU32NOEFL *PFNIEMAIMPLBINVEXU32NOEFL;
2859FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pdep_u32, iemAImpl_pdep_u32_fallback;
2860FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pext_u32, iemAImpl_pext_u32_fallback;
2861FNIEMAIMPLBINVEXU32NOEFL iemAImpl_sarx_u32, iemAImpl_sarx_u32_fallback;
2862FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shlx_u32, iemAImpl_shlx_u32_fallback;
2863FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shrx_u32, iemAImpl_shrx_u32_fallback;
2864FNIEMAIMPLBINVEXU32NOEFL iemAImpl_rorx_u32;
2865/** @} */
2866
2867/** @name Arithmetic three operand operations on quad words w/o EFLAGS (binary).
2868 * @{ */
2869typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64NOEFL, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2));
2870typedef FNIEMAIMPLBINVEXU64NOEFL *PFNIEMAIMPLBINVEXU64NOEFL;
2871FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pdep_u64, iemAImpl_pdep_u64_fallback;
2872FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pext_u64, iemAImpl_pext_u64_fallback;
2873FNIEMAIMPLBINVEXU64NOEFL iemAImpl_sarx_u64, iemAImpl_sarx_u64_fallback;
2874FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shlx_u64, iemAImpl_shlx_u64_fallback;
2875FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shrx_u64, iemAImpl_shrx_u64_fallback;
2876FNIEMAIMPLBINVEXU64NOEFL iemAImpl_rorx_u64;
2877/** @} */
2878
2879/** @name MULX 32-bit and 64-bit.
2880 * @{ */
2881typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU32, (uint32_t *puDst1, uint32_t *puDst2, uint32_t uSrc1, uint32_t uSrc2));
2882typedef FNIEMAIMPLMULXVEXU32 *PFNIEMAIMPLMULXVEXU32;
2883FNIEMAIMPLMULXVEXU32 iemAImpl_mulx_u32, iemAImpl_mulx_u32_fallback;
2884
2885typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU64, (uint64_t *puDst1, uint64_t *puDst2, uint64_t uSrc1, uint64_t uSrc2));
2886typedef FNIEMAIMPLMULXVEXU64 *PFNIEMAIMPLMULXVEXU64;
2887FNIEMAIMPLMULXVEXU64 iemAImpl_mulx_u64, iemAImpl_mulx_u64_fallback;
2888/** @} */
2889
2890
2891/** @name Exchange memory with register operations.
2892 * @{ */
2893IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_locked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2894IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_locked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2895IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_locked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2896IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_locked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2897IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_unlocked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2898IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_unlocked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2899IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_unlocked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2900IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_unlocked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2901/** @} */
2902
2903/** @name Exchange and add operations.
2904 * @{ */
2905IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2906IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2907IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2908IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2909IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8_locked, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2910IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16_locked,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2911IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32_locked,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2912IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64_locked,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2913/** @} */
2914
2915/** @name Compare and exchange.
2916 * @{ */
2917IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2918IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8_locked, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2919IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16, (uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2920IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16_locked,(uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2921IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32, (uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2922IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32_locked,(uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2923#if ARCH_BITS == 32
2924IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2925IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2926#else
2927IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2928IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2929#endif
2930IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2931 uint32_t *pEFlags));
2932IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b_locked,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2933 uint32_t *pEFlags));
2934IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2935 uint32_t *pEFlags));
2936IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_locked,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2937 uint32_t *pEFlags));
2938#ifndef RT_ARCH_ARM64
2939IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_fallback,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx,
2940 PRTUINT128U pu128RbxRcx, uint32_t *pEFlags));
2941#endif
2942/** @} */
2943
2944/** @name Memory ordering
2945 * @{ */
2946typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEMFENCE,(void));
2947typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE;
2948IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void));
2949IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void));
2950IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void));
2951#ifndef RT_ARCH_ARM64
2952IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void));
2953#endif
2954/** @} */
2955
2956/** @name Double precision shifts
2957 * @{ */
2958typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags));
2959typedef FNIEMAIMPLSHIFTDBLU16 *PFNIEMAIMPLSHIFTDBLU16;
2960typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU32,(uint32_t *pu32Dst, uint32_t u32Src, uint8_t cShift, uint32_t *pEFlags));
2961typedef FNIEMAIMPLSHIFTDBLU32 *PFNIEMAIMPLSHIFTDBLU32;
2962typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t cShift, uint32_t *pEFlags));
2963typedef FNIEMAIMPLSHIFTDBLU64 *PFNIEMAIMPLSHIFTDBLU64;
2964FNIEMAIMPLSHIFTDBLU16 iemAImpl_shld_u16, iemAImpl_shld_u16_amd, iemAImpl_shld_u16_intel;
2965FNIEMAIMPLSHIFTDBLU32 iemAImpl_shld_u32, iemAImpl_shld_u32_amd, iemAImpl_shld_u32_intel;
2966FNIEMAIMPLSHIFTDBLU64 iemAImpl_shld_u64, iemAImpl_shld_u64_amd, iemAImpl_shld_u64_intel;
2967FNIEMAIMPLSHIFTDBLU16 iemAImpl_shrd_u16, iemAImpl_shrd_u16_amd, iemAImpl_shrd_u16_intel;
2968FNIEMAIMPLSHIFTDBLU32 iemAImpl_shrd_u32, iemAImpl_shrd_u32_amd, iemAImpl_shrd_u32_intel;
2969FNIEMAIMPLSHIFTDBLU64 iemAImpl_shrd_u64, iemAImpl_shrd_u64_amd, iemAImpl_shrd_u64_intel;
2970/** @} */
2971
2972
2973/** @name Bit search operations (thrown in with the binary ops).
2974 * @{ */
2975FNIEMAIMPLBINU16 iemAImpl_bsf_u16, iemAImpl_bsf_u16_amd, iemAImpl_bsf_u16_intel;
2976FNIEMAIMPLBINU32 iemAImpl_bsf_u32, iemAImpl_bsf_u32_amd, iemAImpl_bsf_u32_intel;
2977FNIEMAIMPLBINU64 iemAImpl_bsf_u64, iemAImpl_bsf_u64_amd, iemAImpl_bsf_u64_intel;
2978FNIEMAIMPLBINU16 iemAImpl_bsr_u16, iemAImpl_bsr_u16_amd, iemAImpl_bsr_u16_intel;
2979FNIEMAIMPLBINU32 iemAImpl_bsr_u32, iemAImpl_bsr_u32_amd, iemAImpl_bsr_u32_intel;
2980FNIEMAIMPLBINU64 iemAImpl_bsr_u64, iemAImpl_bsr_u64_amd, iemAImpl_bsr_u64_intel;
2981FNIEMAIMPLBINU16 iemAImpl_lzcnt_u16, iemAImpl_lzcnt_u16_amd, iemAImpl_lzcnt_u16_intel;
2982FNIEMAIMPLBINU32 iemAImpl_lzcnt_u32, iemAImpl_lzcnt_u32_amd, iemAImpl_lzcnt_u32_intel;
2983FNIEMAIMPLBINU64 iemAImpl_lzcnt_u64, iemAImpl_lzcnt_u64_amd, iemAImpl_lzcnt_u64_intel;
2984FNIEMAIMPLBINU16 iemAImpl_tzcnt_u16, iemAImpl_tzcnt_u16_amd, iemAImpl_tzcnt_u16_intel;
2985FNIEMAIMPLBINU32 iemAImpl_tzcnt_u32, iemAImpl_tzcnt_u32_amd, iemAImpl_tzcnt_u32_intel;
2986FNIEMAIMPLBINU64 iemAImpl_tzcnt_u64, iemAImpl_tzcnt_u64_amd, iemAImpl_tzcnt_u64_intel;
2987FNIEMAIMPLBINU16 iemAImpl_popcnt_u16, iemAImpl_popcnt_u16_fallback;
2988FNIEMAIMPLBINU32 iemAImpl_popcnt_u32, iemAImpl_popcnt_u32_fallback;
2989FNIEMAIMPLBINU64 iemAImpl_popcnt_u64, iemAImpl_popcnt_u64_fallback;
2990/** @} */
2991
2992/** @name Signed multiplication operations (thrown in with the binary ops).
2993 * @{ */
2994FNIEMAIMPLBINU16 iemAImpl_imul_two_u16, iemAImpl_imul_two_u16_amd, iemAImpl_imul_two_u16_intel;
2995FNIEMAIMPLBINU32 iemAImpl_imul_two_u32, iemAImpl_imul_two_u32_amd, iemAImpl_imul_two_u32_intel;
2996FNIEMAIMPLBINU64 iemAImpl_imul_two_u64, iemAImpl_imul_two_u64_amd, iemAImpl_imul_two_u64_intel;
2997/** @} */
2998
2999/** @name Arithmetic assignment operations on bytes (unary).
3000 * @{ */
3001typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU8, (uint8_t *pu8Dst, uint32_t *pEFlags));
3002typedef FNIEMAIMPLUNARYU8 *PFNIEMAIMPLUNARYU8;
3003FNIEMAIMPLUNARYU8 iemAImpl_inc_u8, iemAImpl_inc_u8_locked;
3004FNIEMAIMPLUNARYU8 iemAImpl_dec_u8, iemAImpl_dec_u8_locked;
3005FNIEMAIMPLUNARYU8 iemAImpl_not_u8, iemAImpl_not_u8_locked;
3006FNIEMAIMPLUNARYU8 iemAImpl_neg_u8, iemAImpl_neg_u8_locked;
3007/** @} */
3008
3009/** @name Arithmetic assignment operations on words (unary).
3010 * @{ */
3011typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU16, (uint16_t *pu16Dst, uint32_t *pEFlags));
3012typedef FNIEMAIMPLUNARYU16 *PFNIEMAIMPLUNARYU16;
3013FNIEMAIMPLUNARYU16 iemAImpl_inc_u16, iemAImpl_inc_u16_locked;
3014FNIEMAIMPLUNARYU16 iemAImpl_dec_u16, iemAImpl_dec_u16_locked;
3015FNIEMAIMPLUNARYU16 iemAImpl_not_u16, iemAImpl_not_u16_locked;
3016FNIEMAIMPLUNARYU16 iemAImpl_neg_u16, iemAImpl_neg_u16_locked;
3017/** @} */
3018
3019/** @name Arithmetic assignment operations on double words (unary).
3020 * @{ */
3021typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU32, (uint32_t *pu32Dst, uint32_t *pEFlags));
3022typedef FNIEMAIMPLUNARYU32 *PFNIEMAIMPLUNARYU32;
3023FNIEMAIMPLUNARYU32 iemAImpl_inc_u32, iemAImpl_inc_u32_locked;
3024FNIEMAIMPLUNARYU32 iemAImpl_dec_u32, iemAImpl_dec_u32_locked;
3025FNIEMAIMPLUNARYU32 iemAImpl_not_u32, iemAImpl_not_u32_locked;
3026FNIEMAIMPLUNARYU32 iemAImpl_neg_u32, iemAImpl_neg_u32_locked;
3027/** @} */
3028
3029/** @name Arithmetic assignment operations on quad words (unary).
3030 * @{ */
3031typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU64, (uint64_t *pu64Dst, uint32_t *pEFlags));
3032typedef FNIEMAIMPLUNARYU64 *PFNIEMAIMPLUNARYU64;
3033FNIEMAIMPLUNARYU64 iemAImpl_inc_u64, iemAImpl_inc_u64_locked;
3034FNIEMAIMPLUNARYU64 iemAImpl_dec_u64, iemAImpl_dec_u64_locked;
3035FNIEMAIMPLUNARYU64 iemAImpl_not_u64, iemAImpl_not_u64_locked;
3036FNIEMAIMPLUNARYU64 iemAImpl_neg_u64, iemAImpl_neg_u64_locked;
3037/** @} */
3038
3039
3040/** @name Shift operations on bytes (Group 2).
3041 * @{ */
3042typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU8,(uint32_t fEFlagsIn, uint8_t *pu8Dst, uint8_t cShift));
3043typedef FNIEMAIMPLSHIFTU8 *PFNIEMAIMPLSHIFTU8;
3044FNIEMAIMPLSHIFTU8 iemAImpl_rol_u8, iemAImpl_rol_u8_amd, iemAImpl_rol_u8_intel;
3045FNIEMAIMPLSHIFTU8 iemAImpl_ror_u8, iemAImpl_ror_u8_amd, iemAImpl_ror_u8_intel;
3046FNIEMAIMPLSHIFTU8 iemAImpl_rcl_u8, iemAImpl_rcl_u8_amd, iemAImpl_rcl_u8_intel;
3047FNIEMAIMPLSHIFTU8 iemAImpl_rcr_u8, iemAImpl_rcr_u8_amd, iemAImpl_rcr_u8_intel;
3048FNIEMAIMPLSHIFTU8 iemAImpl_shl_u8, iemAImpl_shl_u8_amd, iemAImpl_shl_u8_intel;
3049FNIEMAIMPLSHIFTU8 iemAImpl_shr_u8, iemAImpl_shr_u8_amd, iemAImpl_shr_u8_intel;
3050FNIEMAIMPLSHIFTU8 iemAImpl_sar_u8, iemAImpl_sar_u8_amd, iemAImpl_sar_u8_intel;
3051/** @} */
3052
3053/** @name Shift operations on words (Group 2).
3054 * @{ */
3055typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU16,(uint32_t fEFlagsIn, uint16_t *pu16Dst, uint8_t cShift));
3056typedef FNIEMAIMPLSHIFTU16 *PFNIEMAIMPLSHIFTU16;
3057FNIEMAIMPLSHIFTU16 iemAImpl_rol_u16, iemAImpl_rol_u16_amd, iemAImpl_rol_u16_intel;
3058FNIEMAIMPLSHIFTU16 iemAImpl_ror_u16, iemAImpl_ror_u16_amd, iemAImpl_ror_u16_intel;
3059FNIEMAIMPLSHIFTU16 iemAImpl_rcl_u16, iemAImpl_rcl_u16_amd, iemAImpl_rcl_u16_intel;
3060FNIEMAIMPLSHIFTU16 iemAImpl_rcr_u16, iemAImpl_rcr_u16_amd, iemAImpl_rcr_u16_intel;
3061FNIEMAIMPLSHIFTU16 iemAImpl_shl_u16, iemAImpl_shl_u16_amd, iemAImpl_shl_u16_intel;
3062FNIEMAIMPLSHIFTU16 iemAImpl_shr_u16, iemAImpl_shr_u16_amd, iemAImpl_shr_u16_intel;
3063FNIEMAIMPLSHIFTU16 iemAImpl_sar_u16, iemAImpl_sar_u16_amd, iemAImpl_sar_u16_intel;
3064/** @} */
3065
3066/** @name Shift operations on double words (Group 2).
3067 * @{ */
3068typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU32,(uint32_t fEFlagsIn, uint32_t *pu32Dst, uint8_t cShift));
3069typedef FNIEMAIMPLSHIFTU32 *PFNIEMAIMPLSHIFTU32;
3070FNIEMAIMPLSHIFTU32 iemAImpl_rol_u32, iemAImpl_rol_u32_amd, iemAImpl_rol_u32_intel;
3071FNIEMAIMPLSHIFTU32 iemAImpl_ror_u32, iemAImpl_ror_u32_amd, iemAImpl_ror_u32_intel;
3072FNIEMAIMPLSHIFTU32 iemAImpl_rcl_u32, iemAImpl_rcl_u32_amd, iemAImpl_rcl_u32_intel;
3073FNIEMAIMPLSHIFTU32 iemAImpl_rcr_u32, iemAImpl_rcr_u32_amd, iemAImpl_rcr_u32_intel;
3074FNIEMAIMPLSHIFTU32 iemAImpl_shl_u32, iemAImpl_shl_u32_amd, iemAImpl_shl_u32_intel;
3075FNIEMAIMPLSHIFTU32 iemAImpl_shr_u32, iemAImpl_shr_u32_amd, iemAImpl_shr_u32_intel;
3076FNIEMAIMPLSHIFTU32 iemAImpl_sar_u32, iemAImpl_sar_u32_amd, iemAImpl_sar_u32_intel;
3077/** @} */
3078
3079/** @name Shift operations on words (Group 2).
3080 * @{ */
3081typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSHIFTU64,(uint32_t fEFlagsIn, uint64_t *pu64Dst, uint8_t cShift));
3082typedef FNIEMAIMPLSHIFTU64 *PFNIEMAIMPLSHIFTU64;
3083FNIEMAIMPLSHIFTU64 iemAImpl_rol_u64, iemAImpl_rol_u64_amd, iemAImpl_rol_u64_intel;
3084FNIEMAIMPLSHIFTU64 iemAImpl_ror_u64, iemAImpl_ror_u64_amd, iemAImpl_ror_u64_intel;
3085FNIEMAIMPLSHIFTU64 iemAImpl_rcl_u64, iemAImpl_rcl_u64_amd, iemAImpl_rcl_u64_intel;
3086FNIEMAIMPLSHIFTU64 iemAImpl_rcr_u64, iemAImpl_rcr_u64_amd, iemAImpl_rcr_u64_intel;
3087FNIEMAIMPLSHIFTU64 iemAImpl_shl_u64, iemAImpl_shl_u64_amd, iemAImpl_shl_u64_intel;
3088FNIEMAIMPLSHIFTU64 iemAImpl_shr_u64, iemAImpl_shr_u64_amd, iemAImpl_shr_u64_intel;
3089FNIEMAIMPLSHIFTU64 iemAImpl_sar_u64, iemAImpl_sar_u64_amd, iemAImpl_sar_u64_intel;
3090/** @} */
3091
3092/** @name Multiplication and division operations.
3093 * @{ */
3094typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t *pEFlags));
3095typedef FNIEMAIMPLMULDIVU8 *PFNIEMAIMPLMULDIVU8;
3096FNIEMAIMPLMULDIVU8 iemAImpl_mul_u8, iemAImpl_mul_u8_amd, iemAImpl_mul_u8_intel;
3097FNIEMAIMPLMULDIVU8 iemAImpl_imul_u8, iemAImpl_imul_u8_amd, iemAImpl_imul_u8_intel;
3098FNIEMAIMPLMULDIVU8 iemAImpl_div_u8, iemAImpl_div_u8_amd, iemAImpl_div_u8_intel;
3099FNIEMAIMPLMULDIVU8 iemAImpl_idiv_u8, iemAImpl_idiv_u8_amd, iemAImpl_idiv_u8_intel;
3100
3101typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t *pEFlags));
3102typedef FNIEMAIMPLMULDIVU16 *PFNIEMAIMPLMULDIVU16;
3103FNIEMAIMPLMULDIVU16 iemAImpl_mul_u16, iemAImpl_mul_u16_amd, iemAImpl_mul_u16_intel;
3104FNIEMAIMPLMULDIVU16 iemAImpl_imul_u16, iemAImpl_imul_u16_amd, iemAImpl_imul_u16_intel;
3105FNIEMAIMPLMULDIVU16 iemAImpl_div_u16, iemAImpl_div_u16_amd, iemAImpl_div_u16_intel;
3106FNIEMAIMPLMULDIVU16 iemAImpl_idiv_u16, iemAImpl_idiv_u16_amd, iemAImpl_idiv_u16_intel;
3107
3108typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t *pEFlags));
3109typedef FNIEMAIMPLMULDIVU32 *PFNIEMAIMPLMULDIVU32;
3110FNIEMAIMPLMULDIVU32 iemAImpl_mul_u32, iemAImpl_mul_u32_amd, iemAImpl_mul_u32_intel;
3111FNIEMAIMPLMULDIVU32 iemAImpl_imul_u32, iemAImpl_imul_u32_amd, iemAImpl_imul_u32_intel;
3112FNIEMAIMPLMULDIVU32 iemAImpl_div_u32, iemAImpl_div_u32_amd, iemAImpl_div_u32_intel;
3113FNIEMAIMPLMULDIVU32 iemAImpl_idiv_u32, iemAImpl_idiv_u32_amd, iemAImpl_idiv_u32_intel;
3114
3115typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t *pEFlags));
3116typedef FNIEMAIMPLMULDIVU64 *PFNIEMAIMPLMULDIVU64;
3117FNIEMAIMPLMULDIVU64 iemAImpl_mul_u64, iemAImpl_mul_u64_amd, iemAImpl_mul_u64_intel;
3118FNIEMAIMPLMULDIVU64 iemAImpl_imul_u64, iemAImpl_imul_u64_amd, iemAImpl_imul_u64_intel;
3119FNIEMAIMPLMULDIVU64 iemAImpl_div_u64, iemAImpl_div_u64_amd, iemAImpl_div_u64_intel;
3120FNIEMAIMPLMULDIVU64 iemAImpl_idiv_u64, iemAImpl_idiv_u64_amd, iemAImpl_idiv_u64_intel;
3121/** @} */
3122
3123/** @name Byte Swap.
3124 * @{ */
3125IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u16,(uint32_t *pu32Dst)); /* Yes, 32-bit register access. */
3126IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u32,(uint32_t *pu32Dst));
3127IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u64,(uint64_t *pu64Dst));
3128/** @} */
3129
3130/** @name Misc.
3131 * @{ */
3132FNIEMAIMPLBINU16 iemAImpl_arpl;
3133/** @} */
3134
3135/** @name RDRAND and RDSEED
3136 * @{ */
3137typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU16,(uint16_t *puDst, uint32_t *pEFlags));
3138typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU32,(uint32_t *puDst, uint32_t *pEFlags));
3139typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU64,(uint64_t *puDst, uint32_t *pEFlags));
3140typedef FNIEMAIMPLRDRANDSEEDU16 *PFNIEMAIMPLRDRANDSEEDU16;
3141typedef FNIEMAIMPLRDRANDSEEDU32 *PFNIEMAIMPLRDRANDSEEDU32;
3142typedef FNIEMAIMPLRDRANDSEEDU64 *PFNIEMAIMPLRDRANDSEEDU64;
3143
3144FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdrand_u16, iemAImpl_rdrand_u16_fallback;
3145FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdrand_u32, iemAImpl_rdrand_u32_fallback;
3146FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdrand_u64, iemAImpl_rdrand_u64_fallback;
3147FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdseed_u16, iemAImpl_rdseed_u16_fallback;
3148FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdseed_u32, iemAImpl_rdseed_u32_fallback;
3149FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdseed_u64, iemAImpl_rdseed_u64_fallback;
3150/** @} */
3151
3152/** @name ADOX and ADCX
3153 * @{ */
3154FNIEMAIMPLBINU32 iemAImpl_adcx_u32, iemAImpl_adcx_u32_fallback;
3155FNIEMAIMPLBINU64 iemAImpl_adcx_u64, iemAImpl_adcx_u64_fallback;
3156FNIEMAIMPLBINU32 iemAImpl_adox_u32, iemAImpl_adox_u32_fallback;
3157FNIEMAIMPLBINU64 iemAImpl_adox_u64, iemAImpl_adox_u64_fallback;
3158/** @} */
3159
3160/** @name FPU operations taking a 32-bit float argument
3161 * @{ */
3162typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3163 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
3164typedef FNIEMAIMPLFPUR32FSW *PFNIEMAIMPLFPUR32FSW;
3165
3166typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3167 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
3168typedef FNIEMAIMPLFPUR32 *PFNIEMAIMPLFPUR32;
3169
3170FNIEMAIMPLFPUR32FSW iemAImpl_fcom_r80_by_r32;
3171FNIEMAIMPLFPUR32 iemAImpl_fadd_r80_by_r32;
3172FNIEMAIMPLFPUR32 iemAImpl_fmul_r80_by_r32;
3173FNIEMAIMPLFPUR32 iemAImpl_fsub_r80_by_r32;
3174FNIEMAIMPLFPUR32 iemAImpl_fsubr_r80_by_r32;
3175FNIEMAIMPLFPUR32 iemAImpl_fdiv_r80_by_r32;
3176FNIEMAIMPLFPUR32 iemAImpl_fdivr_r80_by_r32;
3177
3178IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT32U pr32Val));
3179IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
3180 PRTFLOAT32U pr32Val, PCRTFLOAT80U pr80Val));
3181/** @} */
3182
3183/** @name FPU operations taking a 64-bit float argument
3184 * @{ */
3185typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3186 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
3187typedef FNIEMAIMPLFPUR64FSW *PFNIEMAIMPLFPUR64FSW;
3188
3189typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3190 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
3191typedef FNIEMAIMPLFPUR64 *PFNIEMAIMPLFPUR64;
3192
3193FNIEMAIMPLFPUR64FSW iemAImpl_fcom_r80_by_r64;
3194FNIEMAIMPLFPUR64 iemAImpl_fadd_r80_by_r64;
3195FNIEMAIMPLFPUR64 iemAImpl_fmul_r80_by_r64;
3196FNIEMAIMPLFPUR64 iemAImpl_fsub_r80_by_r64;
3197FNIEMAIMPLFPUR64 iemAImpl_fsubr_r80_by_r64;
3198FNIEMAIMPLFPUR64 iemAImpl_fdiv_r80_by_r64;
3199FNIEMAIMPLFPUR64 iemAImpl_fdivr_r80_by_r64;
3200
3201IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT64U pr64Val));
3202IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
3203 PRTFLOAT64U pr32Val, PCRTFLOAT80U pr80Val));
3204/** @} */
3205
3206/** @name FPU operations taking a 80-bit float argument
3207 * @{ */
3208typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3209 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
3210typedef FNIEMAIMPLFPUR80 *PFNIEMAIMPLFPUR80;
3211FNIEMAIMPLFPUR80 iemAImpl_fadd_r80_by_r80;
3212FNIEMAIMPLFPUR80 iemAImpl_fmul_r80_by_r80;
3213FNIEMAIMPLFPUR80 iemAImpl_fsub_r80_by_r80;
3214FNIEMAIMPLFPUR80 iemAImpl_fsubr_r80_by_r80;
3215FNIEMAIMPLFPUR80 iemAImpl_fdiv_r80_by_r80;
3216FNIEMAIMPLFPUR80 iemAImpl_fdivr_r80_by_r80;
3217FNIEMAIMPLFPUR80 iemAImpl_fprem_r80_by_r80;
3218FNIEMAIMPLFPUR80 iemAImpl_fprem1_r80_by_r80;
3219FNIEMAIMPLFPUR80 iemAImpl_fscale_r80_by_r80;
3220
3221FNIEMAIMPLFPUR80 iemAImpl_fpatan_r80_by_r80, iemAImpl_fpatan_r80_by_r80_amd, iemAImpl_fpatan_r80_by_r80_intel;
3222FNIEMAIMPLFPUR80 iemAImpl_fyl2x_r80_by_r80, iemAImpl_fyl2x_r80_by_r80_amd, iemAImpl_fyl2x_r80_by_r80_intel;
3223FNIEMAIMPLFPUR80 iemAImpl_fyl2xp1_r80_by_r80, iemAImpl_fyl2xp1_r80_by_r80_amd, iemAImpl_fyl2xp1_r80_by_r80_intel;
3224
3225typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3226 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
3227typedef FNIEMAIMPLFPUR80FSW *PFNIEMAIMPLFPUR80FSW;
3228FNIEMAIMPLFPUR80FSW iemAImpl_fcom_r80_by_r80;
3229FNIEMAIMPLFPUR80FSW iemAImpl_fucom_r80_by_r80;
3230
3231typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPUR80EFL,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
3232 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
3233typedef FNIEMAIMPLFPUR80EFL *PFNIEMAIMPLFPUR80EFL;
3234FNIEMAIMPLFPUR80EFL iemAImpl_fcomi_r80_by_r80;
3235FNIEMAIMPLFPUR80EFL iemAImpl_fucomi_r80_by_r80;
3236
3237typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARY,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
3238typedef FNIEMAIMPLFPUR80UNARY *PFNIEMAIMPLFPUR80UNARY;
3239FNIEMAIMPLFPUR80UNARY iemAImpl_fabs_r80;
3240FNIEMAIMPLFPUR80UNARY iemAImpl_fchs_r80;
3241FNIEMAIMPLFPUR80UNARY iemAImpl_f2xm1_r80, iemAImpl_f2xm1_r80_amd, iemAImpl_f2xm1_r80_intel;
3242FNIEMAIMPLFPUR80UNARY iemAImpl_fsqrt_r80;
3243FNIEMAIMPLFPUR80UNARY iemAImpl_frndint_r80;
3244FNIEMAIMPLFPUR80UNARY iemAImpl_fsin_r80, iemAImpl_fsin_r80_amd, iemAImpl_fsin_r80_intel;
3245FNIEMAIMPLFPUR80UNARY iemAImpl_fcos_r80, iemAImpl_fcos_r80_amd, iemAImpl_fcos_r80_intel;
3246
3247typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYFSW,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw, PCRTFLOAT80U pr80Val));
3248typedef FNIEMAIMPLFPUR80UNARYFSW *PFNIEMAIMPLFPUR80UNARYFSW;
3249FNIEMAIMPLFPUR80UNARYFSW iemAImpl_ftst_r80;
3250FNIEMAIMPLFPUR80UNARYFSW iemAImpl_fxam_r80;
3251
3252typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80LDCONST,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes));
3253typedef FNIEMAIMPLFPUR80LDCONST *PFNIEMAIMPLFPUR80LDCONST;
3254FNIEMAIMPLFPUR80LDCONST iemAImpl_fld1;
3255FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2t;
3256FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2e;
3257FNIEMAIMPLFPUR80LDCONST iemAImpl_fldpi;
3258FNIEMAIMPLFPUR80LDCONST iemAImpl_fldlg2;
3259FNIEMAIMPLFPUR80LDCONST iemAImpl_fldln2;
3260FNIEMAIMPLFPUR80LDCONST iemAImpl_fldz;
3261
3262typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYTWO,(PCX86FXSTATE pFpuState, PIEMFPURESULTTWO pFpuResTwo,
3263 PCRTFLOAT80U pr80Val));
3264typedef FNIEMAIMPLFPUR80UNARYTWO *PFNIEMAIMPLFPUR80UNARYTWO;
3265FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fptan_r80_r80, iemAImpl_fptan_r80_r80_amd, iemAImpl_fptan_r80_r80_intel;
3266FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fxtract_r80_r80;
3267FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fsincos_r80_r80, iemAImpl_fsincos_r80_r80_amd, iemAImpl_fsincos_r80_r80_intel;
3268
3269IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
3270IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
3271 PRTFLOAT80U pr80Dst, PCRTFLOAT80U pr80Src));
3272
3273IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_d80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTPBCD80U pd80Val));
3274IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_d80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
3275 PRTPBCD80U pd80Dst, PCRTFLOAT80U pr80Src));
3276
3277/** @} */
3278
3279/** @name FPU operations taking a 16-bit signed integer argument
3280 * @{ */
3281typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3282 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
3283typedef FNIEMAIMPLFPUI16 *PFNIEMAIMPLFPUI16;
3284typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI16,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
3285 int16_t *pi16Dst, PCRTFLOAT80U pr80Src));
3286typedef FNIEMAIMPLFPUSTR80TOI16 *PFNIEMAIMPLFPUSTR80TOI16;
3287
3288FNIEMAIMPLFPUI16 iemAImpl_fiadd_r80_by_i16;
3289FNIEMAIMPLFPUI16 iemAImpl_fimul_r80_by_i16;
3290FNIEMAIMPLFPUI16 iemAImpl_fisub_r80_by_i16;
3291FNIEMAIMPLFPUI16 iemAImpl_fisubr_r80_by_i16;
3292FNIEMAIMPLFPUI16 iemAImpl_fidiv_r80_by_i16;
3293FNIEMAIMPLFPUI16 iemAImpl_fidivr_r80_by_i16;
3294
3295typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3296 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
3297typedef FNIEMAIMPLFPUI16FSW *PFNIEMAIMPLFPUI16FSW;
3298FNIEMAIMPLFPUI16FSW iemAImpl_ficom_r80_by_i16;
3299
3300IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int16_t const *pi16Val));
3301FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fist_r80_to_i16;
3302FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fistt_r80_to_i16, iemAImpl_fistt_r80_to_i16_amd, iemAImpl_fistt_r80_to_i16_intel;
3303/** @} */
3304
3305/** @name FPU operations taking a 32-bit signed integer argument
3306 * @{ */
3307typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
3308 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
3309typedef FNIEMAIMPLFPUI32 *PFNIEMAIMPLFPUI32;
3310typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI32,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
3311 int32_t *pi32Dst, PCRTFLOAT80U pr80Src));
3312typedef FNIEMAIMPLFPUSTR80TOI32 *PFNIEMAIMPLFPUSTR80TOI32;
3313
3314FNIEMAIMPLFPUI32 iemAImpl_fiadd_r80_by_i32;
3315FNIEMAIMPLFPUI32 iemAImpl_fimul_r80_by_i32;
3316FNIEMAIMPLFPUI32 iemAImpl_fisub_r80_by_i32;
3317FNIEMAIMPLFPUI32 iemAImpl_fisubr_r80_by_i32;
3318FNIEMAIMPLFPUI32 iemAImpl_fidiv_r80_by_i32;
3319FNIEMAIMPLFPUI32 iemAImpl_fidivr_r80_by_i32;
3320
3321typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
3322 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
3323typedef FNIEMAIMPLFPUI32FSW *PFNIEMAIMPLFPUI32FSW;
3324FNIEMAIMPLFPUI32FSW iemAImpl_ficom_r80_by_i32;
3325
3326IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int32_t const *pi32Val));
3327FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fist_r80_to_i32;
3328FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fistt_r80_to_i32;
3329/** @} */
3330
3331/** @name FPU operations taking a 64-bit signed integer argument
3332 * @{ */
3333typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI64,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
3334 int64_t *pi64Dst, PCRTFLOAT80U pr80Src));
3335typedef FNIEMAIMPLFPUSTR80TOI64 *PFNIEMAIMPLFPUSTR80TOI64;
3336
3337IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int64_t const *pi64Val));
3338FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fist_r80_to_i64;
3339FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fistt_r80_to_i64;
3340/** @} */
3341
3342
3343/** Temporary type representing a 256-bit vector register. */
3344typedef struct { uint64_t au64[4]; } IEMVMM256;
3345/** Temporary type pointing to a 256-bit vector register. */
3346typedef IEMVMM256 *PIEMVMM256;
3347/** Temporary type pointing to a const 256-bit vector register. */
3348typedef IEMVMM256 *PCIEMVMM256;
3349
3350
3351/** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1.
3352 * @{ */
3353typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *puDst, uint64_t const *puSrc));
3354typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF2U64;
3355typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U128,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc));
3356typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF2U128;
3357typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U128,(PX86XSAVEAREA pExtState, PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
3358typedef FNIEMAIMPLMEDIAF3U128 *PFNIEMAIMPLMEDIAF3U128;
3359typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U256,(PX86XSAVEAREA pExtState, PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
3360typedef FNIEMAIMPLMEDIAF3U256 *PFNIEMAIMPLMEDIAF3U256;
3361typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U64,(uint64_t *puDst, uint64_t const *puSrc));
3362typedef FNIEMAIMPLMEDIAOPTF2U64 *PFNIEMAIMPLMEDIAOPTF2U64;
3363typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128,(PRTUINT128U puDst, PCRTUINT128U puSrc));
3364typedef FNIEMAIMPLMEDIAOPTF2U128 *PFNIEMAIMPLMEDIAOPTF2U128;
3365typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
3366typedef FNIEMAIMPLMEDIAOPTF3U128 *PFNIEMAIMPLMEDIAOPTF3U128;
3367typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
3368typedef FNIEMAIMPLMEDIAOPTF3U256 *PFNIEMAIMPLMEDIAOPTF3U256;
3369typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256,(PRTUINT256U puDst, PCRTUINT256U puSrc));
3370typedef FNIEMAIMPLMEDIAOPTF2U256 *PFNIEMAIMPLMEDIAOPTF2U256;
3371FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pshufb_u64, iemAImpl_pshufb_u64_fallback;
3372FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pand_u64, iemAImpl_pandn_u64, iemAImpl_por_u64, iemAImpl_pxor_u64;
3373FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqd_u64;
3374FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pcmpgtb_u64, iemAImpl_pcmpgtw_u64, iemAImpl_pcmpgtd_u64;
3375FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_paddb_u64, iemAImpl_paddsb_u64, iemAImpl_paddusb_u64;
3376FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_paddw_u64, iemAImpl_paddsw_u64, iemAImpl_paddusw_u64;
3377FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_paddd_u64;
3378FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_paddq_u64;
3379FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psubb_u64, iemAImpl_psubsb_u64, iemAImpl_psubusb_u64;
3380FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psubw_u64, iemAImpl_psubsw_u64, iemAImpl_psubusw_u64;
3381FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psubd_u64;
3382FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psubq_u64;
3383FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmaddwd_u64, iemAImpl_pmaddwd_u64_fallback;
3384FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmullw_u64, iemAImpl_pmulhw_u64;
3385FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pminub_u64, iemAImpl_pmaxub_u64;
3386FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pminsw_u64, iemAImpl_pmaxsw_u64;
3387FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pabsb_u64, iemAImpl_pabsb_u64_fallback;
3388FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pabsw_u64, iemAImpl_pabsw_u64_fallback;
3389FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pabsd_u64, iemAImpl_pabsd_u64_fallback;
3390FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psignb_u64, iemAImpl_psignb_u64_fallback;
3391FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psignw_u64, iemAImpl_psignw_u64_fallback;
3392FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psignd_u64, iemAImpl_psignd_u64_fallback;
3393FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phaddw_u64, iemAImpl_phaddw_u64_fallback;
3394FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phaddd_u64, iemAImpl_phaddd_u64_fallback;
3395FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phsubw_u64, iemAImpl_phsubw_u64_fallback;
3396FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phsubd_u64, iemAImpl_phsubd_u64_fallback;
3397FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phaddsw_u64, iemAImpl_phaddsw_u64_fallback;
3398FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_phsubsw_u64, iemAImpl_phsubsw_u64_fallback;
3399FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmaddubsw_u64, iemAImpl_pmaddubsw_u64_fallback;
3400FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmulhrsw_u64, iemAImpl_pmulhrsw_u64_fallback;
3401FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmuludq_u64;
3402FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllw_u64, iemAImpl_psrlw_u64, iemAImpl_psraw_u64;
3403FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pslld_u64, iemAImpl_psrld_u64, iemAImpl_psrad_u64;
3404FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllq_u64, iemAImpl_psrlq_u64;
3405FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packsswb_u64, iemAImpl_packuswb_u64;
3406FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packssdw_u64;
3407FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmulhuw_u64;
3408FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pavgb_u64, iemAImpl_pavgw_u64;
3409FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psadbw_u64;
3410
3411FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pshufb_u128, iemAImpl_pshufb_u128_fallback;
3412FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pand_u128, iemAImpl_pandn_u128, iemAImpl_por_u128, iemAImpl_pxor_u128;
3413FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpeqb_u128, iemAImpl_pcmpeqw_u128, iemAImpl_pcmpeqd_u128;
3414FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpeqq_u128, iemAImpl_pcmpeqq_u128_fallback;
3415FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpgtb_u128, iemAImpl_pcmpgtw_u128, iemAImpl_pcmpgtd_u128;
3416FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pcmpgtq_u128, iemAImpl_pcmpgtq_u128_fallback;
3417FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddb_u128, iemAImpl_paddsb_u128, iemAImpl_paddusb_u128;
3418FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddw_u128, iemAImpl_paddsw_u128, iemAImpl_paddusw_u128;
3419FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddd_u128;
3420FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_paddq_u128;
3421FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubb_u128, iemAImpl_psubsb_u128, iemAImpl_psubusb_u128;
3422FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubw_u128, iemAImpl_psubsw_u128, iemAImpl_psubusw_u128;
3423FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubd_u128;
3424FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psubq_u128;
3425FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmullw_u128, iemAImpl_pmullw_u128_fallback;
3426FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhw_u128;
3427FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulld_u128, iemAImpl_pmulld_u128_fallback;
3428FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaddwd_u128, iemAImpl_pmaddwd_u128_fallback;
3429FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminub_u128;
3430FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminud_u128, iemAImpl_pminud_u128_fallback;
3431FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminuw_u128, iemAImpl_pminuw_u128_fallback;
3432FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminsb_u128, iemAImpl_pminsb_u128_fallback;
3433FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminsd_u128, iemAImpl_pminsd_u128_fallback;
3434FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pminsw_u128, iemAImpl_pminsw_u128_fallback;
3435FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxub_u128;
3436FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxud_u128, iemAImpl_pmaxud_u128_fallback;
3437FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxuw_u128, iemAImpl_pmaxuw_u128_fallback;
3438FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxsb_u128, iemAImpl_pmaxsb_u128_fallback;
3439FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxsw_u128;
3440FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaxsd_u128, iemAImpl_pmaxsd_u128_fallback;
3441FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pabsb_u128, iemAImpl_pabsb_u128_fallback;
3442FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pabsw_u128, iemAImpl_pabsw_u128_fallback;
3443FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pabsd_u128, iemAImpl_pabsd_u128_fallback;
3444FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psignb_u128, iemAImpl_psignb_u128_fallback;
3445FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psignw_u128, iemAImpl_psignw_u128_fallback;
3446FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psignd_u128, iemAImpl_psignd_u128_fallback;
3447FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phaddw_u128, iemAImpl_phaddw_u128_fallback;
3448FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phaddd_u128, iemAImpl_phaddd_u128_fallback;
3449FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phsubw_u128, iemAImpl_phsubw_u128_fallback;
3450FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phsubd_u128, iemAImpl_phsubd_u128_fallback;
3451FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phaddsw_u128, iemAImpl_phaddsw_u128_fallback;
3452FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phsubsw_u128, iemAImpl_phsubsw_u128_fallback;
3453FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaddubsw_u128, iemAImpl_pmaddubsw_u128_fallback;
3454FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhrsw_u128, iemAImpl_pmulhrsw_u128_fallback;
3455FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuludq_u128;
3456FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmaddwd_u128, iemAImpl_pmaddwd_u128_fallback;
3457FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packsswb_u128, iemAImpl_packuswb_u128;
3458FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packssdw_u128, iemAImpl_packusdw_u128;
3459FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllw_u128, iemAImpl_psrlw_u128, iemAImpl_psraw_u128;
3460FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pslld_u128, iemAImpl_psrld_u128, iemAImpl_psrad_u128;
3461FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllq_u128, iemAImpl_psrlq_u128;
3462FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhuw_u128;
3463FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pavgb_u128, iemAImpl_pavgw_u128;
3464FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psadbw_u128;
3465FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuldq_u128, iemAImpl_pmuldq_u128_fallback;
3466FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpcklps_u128, iemAImpl_unpcklpd_u128;
3467FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpckhps_u128, iemAImpl_unpckhpd_u128;
3468FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phminposuw_u128, iemAImpl_phminposuw_u128_fallback;
3469
3470FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpshufb_u128, iemAImpl_vpshufb_u128_fallback;
3471FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpand_u128, iemAImpl_vpand_u128_fallback;
3472FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpandn_u128, iemAImpl_vpandn_u128_fallback;
3473FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpor_u128, iemAImpl_vpor_u128_fallback;
3474FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpxor_u128, iemAImpl_vpxor_u128_fallback;
3475FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqb_u128, iemAImpl_vpcmpeqb_u128_fallback;
3476FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqw_u128, iemAImpl_vpcmpeqw_u128_fallback;
3477FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqd_u128, iemAImpl_vpcmpeqd_u128_fallback;
3478FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpeqq_u128, iemAImpl_vpcmpeqq_u128_fallback;
3479FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtb_u128, iemAImpl_vpcmpgtb_u128_fallback;
3480FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtw_u128, iemAImpl_vpcmpgtw_u128_fallback;
3481FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtd_u128, iemAImpl_vpcmpgtd_u128_fallback;
3482FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpcmpgtq_u128, iemAImpl_vpcmpgtq_u128_fallback;
3483FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddb_u128, iemAImpl_vpaddb_u128_fallback;
3484FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddw_u128, iemAImpl_vpaddw_u128_fallback;
3485FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddd_u128, iemAImpl_vpaddd_u128_fallback;
3486FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddq_u128, iemAImpl_vpaddq_u128_fallback;
3487FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubb_u128, iemAImpl_vpsubb_u128_fallback;
3488FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubw_u128, iemAImpl_vpsubw_u128_fallback;
3489FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubd_u128, iemAImpl_vpsubd_u128_fallback;
3490FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubq_u128, iemAImpl_vpsubq_u128_fallback;
3491FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminub_u128, iemAImpl_vpminub_u128_fallback;
3492FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminuw_u128, iemAImpl_vpminuw_u128_fallback;
3493FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminud_u128, iemAImpl_vpminud_u128_fallback;
3494FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminsb_u128, iemAImpl_vpminsb_u128_fallback;
3495FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminsw_u128, iemAImpl_vpminsw_u128_fallback;
3496FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpminsd_u128, iemAImpl_vpminsd_u128_fallback;
3497FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxub_u128, iemAImpl_vpmaxub_u128_fallback;
3498FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxuw_u128, iemAImpl_vpmaxuw_u128_fallback;
3499FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxud_u128, iemAImpl_vpmaxud_u128_fallback;
3500FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxsb_u128, iemAImpl_vpmaxsb_u128_fallback;
3501FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxsw_u128, iemAImpl_vpmaxsw_u128_fallback;
3502FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaxsd_u128, iemAImpl_vpmaxsd_u128_fallback;
3503FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpacksswb_u128, iemAImpl_vpacksswb_u128_fallback;
3504FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackssdw_u128, iemAImpl_vpackssdw_u128_fallback;
3505FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackuswb_u128, iemAImpl_vpackuswb_u128_fallback;
3506FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackusdw_u128, iemAImpl_vpackusdw_u128_fallback;
3507FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmullw_u128, iemAImpl_vpmullw_u128_fallback;
3508FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulld_u128, iemAImpl_vpmulld_u128_fallback;
3509FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhw_u128, iemAImpl_vpmulhw_u128_fallback;
3510FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhuw_u128, iemAImpl_vpmulhuw_u128_fallback;
3511FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgb_u128, iemAImpl_vpavgb_u128_fallback;
3512FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgw_u128, iemAImpl_vpavgw_u128_fallback;
3513FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignb_u128, iemAImpl_vpsignb_u128_fallback;
3514FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignw_u128, iemAImpl_vpsignw_u128_fallback;
3515FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignd_u128, iemAImpl_vpsignd_u128_fallback;
3516FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddw_u128, iemAImpl_vphaddw_u128_fallback;
3517FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddd_u128, iemAImpl_vphaddd_u128_fallback;
3518FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubw_u128, iemAImpl_vphsubw_u128_fallback;
3519FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubd_u128, iemAImpl_vphsubd_u128_fallback;
3520FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddsw_u128, iemAImpl_vphaddsw_u128_fallback;
3521FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubsw_u128, iemAImpl_vphsubsw_u128_fallback;
3522FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddubsw_u128, iemAImpl_vpmaddubsw_u128_fallback;
3523FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhrsw_u128, iemAImpl_vpmulhrsw_u128_fallback;
3524FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsadbw_u128, iemAImpl_vpsadbw_u128_fallback;
3525FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuldq_u128, iemAImpl_vpmuldq_u128_fallback;
3526FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuludq_u128, iemAImpl_vpmuludq_u128_fallback;
3527FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsb_u128, iemAImpl_vpsubsb_u128_fallback;
3528FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsw_u128, iemAImpl_vpsubsw_u128_fallback;
3529FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusb_u128, iemAImpl_vpsubusb_u128_fallback;
3530FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusw_u128, iemAImpl_vpsubusw_u128_fallback;
3531FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusb_u128, iemAImpl_vpaddusb_u128_fallback;
3532FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusw_u128, iemAImpl_vpaddusw_u128_fallback;
3533FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsb_u128, iemAImpl_vpaddsb_u128_fallback;
3534FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsw_u128, iemAImpl_vpaddsw_u128_fallback;
3535FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllw_u128, iemAImpl_vpsllw_u128_fallback;
3536FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpslld_u128, iemAImpl_vpslld_u128_fallback;
3537FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllq_u128, iemAImpl_vpsllq_u128_fallback;
3538FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsraw_u128, iemAImpl_vpsraw_u128_fallback;
3539FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrad_u128, iemAImpl_vpsrad_u128_fallback;
3540FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlw_u128, iemAImpl_vpsrlw_u128_fallback;
3541FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrld_u128, iemAImpl_vpsrld_u128_fallback;
3542FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlq_u128, iemAImpl_vpsrlq_u128_fallback;
3543FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddwd_u128, iemAImpl_vpmaddwd_u128_fallback;
3544
3545FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsb_u128, iemAImpl_vpabsb_u128_fallback;
3546FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsw_u128, iemAImpl_vpabsd_u128_fallback;
3547FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsd_u128, iemAImpl_vpabsw_u128_fallback;
3548FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vphminposuw_u128, iemAImpl_vphminposuw_u128_fallback;
3549
3550FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpshufb_u256, iemAImpl_vpshufb_u256_fallback;
3551FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpand_u256, iemAImpl_vpand_u256_fallback;
3552FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpandn_u256, iemAImpl_vpandn_u256_fallback;
3553FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpor_u256, iemAImpl_vpor_u256_fallback;
3554FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpxor_u256, iemAImpl_vpxor_u256_fallback;
3555FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqb_u256, iemAImpl_vpcmpeqb_u256_fallback;
3556FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqw_u256, iemAImpl_vpcmpeqw_u256_fallback;
3557FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqd_u256, iemAImpl_vpcmpeqd_u256_fallback;
3558FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpeqq_u256, iemAImpl_vpcmpeqq_u256_fallback;
3559FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtb_u256, iemAImpl_vpcmpgtb_u256_fallback;
3560FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtw_u256, iemAImpl_vpcmpgtw_u256_fallback;
3561FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtd_u256, iemAImpl_vpcmpgtd_u256_fallback;
3562FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpcmpgtq_u256, iemAImpl_vpcmpgtq_u256_fallback;
3563FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddb_u256, iemAImpl_vpaddb_u256_fallback;
3564FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddw_u256, iemAImpl_vpaddw_u256_fallback;
3565FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddd_u256, iemAImpl_vpaddd_u256_fallback;
3566FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddq_u256, iemAImpl_vpaddq_u256_fallback;
3567FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubb_u256, iemAImpl_vpsubb_u256_fallback;
3568FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubw_u256, iemAImpl_vpsubw_u256_fallback;
3569FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubd_u256, iemAImpl_vpsubd_u256_fallback;
3570FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubq_u256, iemAImpl_vpsubq_u256_fallback;
3571FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminub_u256, iemAImpl_vpminub_u256_fallback;
3572FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminuw_u256, iemAImpl_vpminuw_u256_fallback;
3573FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminud_u256, iemAImpl_vpminud_u256_fallback;
3574FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminsb_u256, iemAImpl_vpminsb_u256_fallback;
3575FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminsw_u256, iemAImpl_vpminsw_u256_fallback;
3576FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpminsd_u256, iemAImpl_vpminsd_u256_fallback;
3577FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxub_u256, iemAImpl_vpmaxub_u256_fallback;
3578FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxuw_u256, iemAImpl_vpmaxuw_u256_fallback;
3579FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxud_u256, iemAImpl_vpmaxud_u256_fallback;
3580FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxsb_u256, iemAImpl_vpmaxsb_u256_fallback;
3581FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxsw_u256, iemAImpl_vpmaxsw_u256_fallback;
3582FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaxsd_u256, iemAImpl_vpmaxsd_u256_fallback;
3583FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpacksswb_u256, iemAImpl_vpacksswb_u256_fallback;
3584FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackssdw_u256, iemAImpl_vpackssdw_u256_fallback;
3585FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackuswb_u256, iemAImpl_vpackuswb_u256_fallback;
3586FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackusdw_u256, iemAImpl_vpackusdw_u256_fallback;
3587FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmullw_u256, iemAImpl_vpmullw_u256_fallback;
3588FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulld_u256, iemAImpl_vpmulld_u256_fallback;
3589FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhw_u256, iemAImpl_vpmulhw_u256_fallback;
3590FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhuw_u256, iemAImpl_vpmulhuw_u256_fallback;
3591FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgb_u256, iemAImpl_vpavgb_u256_fallback;
3592FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgw_u256, iemAImpl_vpavgw_u256_fallback;
3593FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignb_u256, iemAImpl_vpsignb_u256_fallback;
3594FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignw_u256, iemAImpl_vpsignw_u256_fallback;
3595FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignd_u256, iemAImpl_vpsignd_u256_fallback;
3596FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddw_u256, iemAImpl_vphaddw_u256_fallback;
3597FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddd_u256, iemAImpl_vphaddd_u256_fallback;
3598FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubw_u256, iemAImpl_vphsubw_u256_fallback;
3599FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubd_u256, iemAImpl_vphsubd_u256_fallback;
3600FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddsw_u256, iemAImpl_vphaddsw_u256_fallback;
3601FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubsw_u256, iemAImpl_vphsubsw_u256_fallback;
3602FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddubsw_u256, iemAImpl_vpmaddubsw_u256_fallback;
3603FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhrsw_u256, iemAImpl_vpmulhrsw_u256_fallback;
3604FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsadbw_u256, iemAImpl_vpsadbw_u256_fallback;
3605FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuldq_u256, iemAImpl_vpmuldq_u256_fallback;
3606FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuludq_u256, iemAImpl_vpmuludq_u256_fallback;
3607FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsb_u256, iemAImpl_vpsubsb_u256_fallback;
3608FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsw_u256, iemAImpl_vpsubsw_u256_fallback;
3609FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusb_u256, iemAImpl_vpsubusb_u256_fallback;
3610FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusw_u256, iemAImpl_vpsubusw_u256_fallback;
3611FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusb_u256, iemAImpl_vpaddusb_u256_fallback;
3612FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusw_u256, iemAImpl_vpaddusw_u256_fallback;
3613FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsb_u256, iemAImpl_vpaddsb_u256_fallback;
3614FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsw_u256, iemAImpl_vpaddsw_u256_fallback;
3615FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllw_u256, iemAImpl_vpsllw_u256_fallback;
3616FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpslld_u256, iemAImpl_vpslld_u256_fallback;
3617FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllq_u256, iemAImpl_vpsllq_u256_fallback;
3618FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsraw_u256, iemAImpl_vpsraw_u256_fallback;
3619FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrad_u256, iemAImpl_vpsrad_u256_fallback;
3620FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlw_u256, iemAImpl_vpsrlw_u256_fallback;
3621FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrld_u256, iemAImpl_vpsrld_u256_fallback;
3622FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlq_u256, iemAImpl_vpsrlq_u256_fallback;
3623FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddwd_u256, iemAImpl_vpmaddwd_u256_fallback;
3624
3625FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsb_u256, iemAImpl_vpabsb_u256_fallback;
3626FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsw_u256, iemAImpl_vpabsw_u256_fallback;
3627FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsd_u256, iemAImpl_vpabsd_u256_fallback;
3628/** @} */
3629
3630/** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1.
3631 * @{ */
3632FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpcklbw_u64, iemAImpl_punpcklwd_u64, iemAImpl_punpckldq_u64;
3633FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128;
3634FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpcklbw_u128, iemAImpl_vpunpcklbw_u128_fallback,
3635 iemAImpl_vpunpcklwd_u128, iemAImpl_vpunpcklwd_u128_fallback,
3636 iemAImpl_vpunpckldq_u128, iemAImpl_vpunpckldq_u128_fallback,
3637 iemAImpl_vpunpcklqdq_u128, iemAImpl_vpunpcklqdq_u128_fallback,
3638 iemAImpl_vunpcklps_u128, iemAImpl_vunpcklps_u128_fallback,
3639 iemAImpl_vunpcklpd_u128, iemAImpl_vunpcklpd_u128_fallback,
3640 iemAImpl_vunpckhps_u128, iemAImpl_vunpckhps_u128_fallback,
3641 iemAImpl_vunpckhpd_u128, iemAImpl_vunpckhpd_u128_fallback;
3642
3643FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpcklbw_u256, iemAImpl_vpunpcklbw_u256_fallback,
3644 iemAImpl_vpunpcklwd_u256, iemAImpl_vpunpcklwd_u256_fallback,
3645 iemAImpl_vpunpckldq_u256, iemAImpl_vpunpckldq_u256_fallback,
3646 iemAImpl_vpunpcklqdq_u256, iemAImpl_vpunpcklqdq_u256_fallback,
3647 iemAImpl_vunpcklps_u256, iemAImpl_vunpcklps_u256_fallback,
3648 iemAImpl_vunpcklpd_u256, iemAImpl_vunpcklpd_u256_fallback,
3649 iemAImpl_vunpckhps_u256, iemAImpl_vunpckhps_u256_fallback,
3650 iemAImpl_vunpckhpd_u256, iemAImpl_vunpckhpd_u256_fallback;
3651/** @} */
3652
3653/** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1.
3654 * @{ */
3655FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64;
3656FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128;
3657FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpckhbw_u128, iemAImpl_vpunpckhbw_u128_fallback,
3658 iemAImpl_vpunpckhwd_u128, iemAImpl_vpunpckhwd_u128_fallback,
3659 iemAImpl_vpunpckhdq_u128, iemAImpl_vpunpckhdq_u128_fallback,
3660 iemAImpl_vpunpckhqdq_u128, iemAImpl_vpunpckhqdq_u128_fallback;
3661FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpckhbw_u256, iemAImpl_vpunpckhbw_u256_fallback,
3662 iemAImpl_vpunpckhwd_u256, iemAImpl_vpunpckhwd_u256_fallback,
3663 iemAImpl_vpunpckhdq_u256, iemAImpl_vpunpckhdq_u256_fallback,
3664 iemAImpl_vpunpckhqdq_u256, iemAImpl_vpunpckhqdq_u256_fallback;
3665/** @} */
3666
3667/** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil)
3668 * @{ */
3669typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3670typedef FNIEMAIMPLMEDIAPSHUFU128 *PFNIEMAIMPLMEDIAPSHUFU128;
3671typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
3672typedef FNIEMAIMPLMEDIAPSHUFU256 *PFNIEMAIMPLMEDIAPSHUFU256;
3673IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw_u64,(uint64_t *puDst, uint64_t const *puSrc, uint8_t bEvil));
3674FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_pshufhw_u128, iemAImpl_pshuflw_u128, iemAImpl_pshufd_u128;
3675#ifndef IEM_WITHOUT_ASSEMBLY
3676FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256, iemAImpl_vpshuflw_u256, iemAImpl_vpshufd_u256;
3677#endif
3678FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256_fallback, iemAImpl_vpshuflw_u256_fallback, iemAImpl_vpshufd_u256_fallback;
3679/** @} */
3680
3681/** @name Media (SSE/MMX/AVX) operation: Shift Immediate Stuff (evil)
3682 * @{ */
3683typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU64,(uint64_t *puDst, uint8_t bShift));
3684typedef FNIEMAIMPLMEDIAPSHIFTU64 *PFNIEMAIMPLMEDIAPSHIFTU64;
3685typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU128,(PRTUINT128U puDst, uint8_t bShift));
3686typedef FNIEMAIMPLMEDIAPSHIFTU128 *PFNIEMAIMPLMEDIAPSHIFTU128;
3687typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU256,(PRTUINT256U puDst, uint8_t bShift));
3688typedef FNIEMAIMPLMEDIAPSHIFTU256 *PFNIEMAIMPLMEDIAPSHIFTU256;
3689FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psllw_imm_u64, iemAImpl_pslld_imm_u64, iemAImpl_psllq_imm_u64;
3690FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psrlw_imm_u64, iemAImpl_psrld_imm_u64, iemAImpl_psrlq_imm_u64;
3691FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psraw_imm_u64, iemAImpl_psrad_imm_u64;
3692FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psllw_imm_u128, iemAImpl_pslld_imm_u128, iemAImpl_psllq_imm_u128;
3693FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psrlw_imm_u128, iemAImpl_psrld_imm_u128, iemAImpl_psrlq_imm_u128;
3694FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psraw_imm_u128, iemAImpl_psrad_imm_u128;
3695FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_pslldq_imm_u128, iemAImpl_psrldq_imm_u128;
3696/** @} */
3697
3698/** @name Media (SSE/MMX/AVX) operation: Move Byte Mask
3699 * @{ */
3700IEM_DECL_IMPL_DEF(void, iemAImpl_maskmovq_u64,(uint64_t *puMem, uint64_t const *puSrc, uint64_t const *puMsk));
3701IEM_DECL_IMPL_DEF(void, iemAImpl_maskmovdqu_u128,(PRTUINT128U puMem, PCRTUINT128U puSrc, PCRTUINT128U puMsk));
3702IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u64,(uint64_t *pu64Dst, uint64_t const *puSrc));
3703IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u128,(uint64_t *pu64Dst, PCRTUINT128U puSrc));
3704#ifndef IEM_WITHOUT_ASSEMBLY
3705IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3706#endif
3707IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256_fallback,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3708/** @} */
3709
3710/** @name Media (SSE/MMX/AVX) operations: Variable Blend Packed Bytes/R32/R64.
3711 * @{ */
3712typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puMask));
3713typedef FNIEMAIMPLBLENDU128 *PFNIEMAIMPLBLENDU128;
3714typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, PCRTUINT128U puMask));
3715typedef FNIEMAIMPLAVXBLENDU128 *PFNIEMAIMPLAVXBLENDU128;
3716typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, PCRTUINT256U puMask));
3717typedef FNIEMAIMPLAVXBLENDU256 *PFNIEMAIMPLAVXBLENDU256;
3718
3719FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128;
3720FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128_fallback;
3721FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128;
3722FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128_fallback;
3723FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256;
3724FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256_fallback;
3725
3726FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128;
3727FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128_fallback;
3728FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128;
3729FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128_fallback;
3730FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256;
3731FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256_fallback;
3732
3733FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128;
3734FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128_fallback;
3735FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128;
3736FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128_fallback;
3737FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256;
3738FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256_fallback;
3739/** @} */
3740
3741
3742/** @name Media (SSE/MMX/AVX) operation: Sort this later
3743 * @{ */
3744IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3745IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3746IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3747IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3748IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3749
3750IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3751IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3752IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3753IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3754IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3755
3756IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3757IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3758IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3759IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3760IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3761
3762IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3763IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3764IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3765IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3766IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3767
3768IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3769IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3770IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3771IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3772IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3773
3774IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3775IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3776IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3777IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3778IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3779
3780IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3781IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3782IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3783IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3784IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3785
3786IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3787IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3788IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3789IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3790IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3791
3792IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3793IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3794IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3795IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3796IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3797
3798IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3799IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3800IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3801IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3802IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3803
3804IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3805IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3806IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3807IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3808IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3809
3810IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3811IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3812IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3813IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3814IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3815
3816IEM_DECL_IMPL_DEF(void, iemAImpl_shufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3817IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3818IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3819IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3820IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3821
3822IEM_DECL_IMPL_DEF(void, iemAImpl_shufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3823IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3824IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3825IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3826IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3827
3828IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3829IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64_fallback,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3830
3831IEM_DECL_IMPL_DEF(void, iemAImpl_movmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3832IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3833IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3834IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3835IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3836
3837IEM_DECL_IMPL_DEF(void, iemAImpl_movmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3838IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3839IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3840IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3841IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3842
3843
3844typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3845typedef FNIEMAIMPLMEDIAOPTF2U128IMM8 *PFNIEMAIMPLMEDIAOPTF2U128IMM8;
3846typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
3847typedef FNIEMAIMPLMEDIAOPTF2U256IMM8 *PFNIEMAIMPLMEDIAOPTF2U256IMM8;
3848typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3849typedef FNIEMAIMPLMEDIAOPTF3U128IMM8 *PFNIEMAIMPLMEDIAOPTF3U128IMM8;
3850typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3851typedef FNIEMAIMPLMEDIAOPTF3U256IMM8 *PFNIEMAIMPLMEDIAOPTF3U256IMM8;
3852
3853FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_palignr_u128, iemAImpl_palignr_u128_fallback;
3854FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pblendw_u128, iemAImpl_pblendw_u128_fallback;
3855FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendps_u128, iemAImpl_blendps_u128_fallback;
3856FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendpd_u128, iemAImpl_blendpd_u128_fallback;
3857
3858FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpalignr_u128, iemAImpl_vpalignr_u128_fallback;
3859FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendw_u128, iemAImpl_vpblendw_u128_fallback;
3860FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendd_u128, iemAImpl_vpblendd_u128_fallback;
3861FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendps_u128, iemAImpl_vblendps_u128_fallback;
3862FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendpd_u128, iemAImpl_vblendpd_u128_fallback;
3863
3864FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpalignr_u256, iemAImpl_vpalignr_u256_fallback;
3865FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendw_u256, iemAImpl_vpblendw_u256_fallback;
3866FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendd_u256, iemAImpl_vpblendd_u256_fallback;
3867FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendps_u256, iemAImpl_vblendps_u256_fallback;
3868FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendpd_u256, iemAImpl_vblendpd_u256_fallback;
3869FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2i128_u256, iemAImpl_vperm2i128_u256_fallback;
3870FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2f128_u256, iemAImpl_vperm2f128_u256_fallback;
3871
3872FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesimc_u128, iemAImpl_aesimc_u128_fallback;
3873FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenc_u128, iemAImpl_aesenc_u128_fallback;
3874FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenclast_u128, iemAImpl_aesenclast_u128_fallback;
3875FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdec_u128, iemAImpl_aesdec_u128_fallback;
3876FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdeclast_u128, iemAImpl_aesdeclast_u128_fallback;
3877
3878FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesimc_u128, iemAImpl_vaesimc_u128_fallback;
3879FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenc_u128, iemAImpl_vaesenc_u128_fallback;
3880FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenclast_u128, iemAImpl_vaesenclast_u128_fallback;
3881FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdec_u128, iemAImpl_vaesdec_u128_fallback;
3882FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdeclast_u128, iemAImpl_vaesdeclast_u128_fallback;
3883
3884FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_aeskeygenassist_u128, iemAImpl_aeskeygenassist_u128_fallback;
3885
3886FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vaeskeygenassist_u128, iemAImpl_vaeskeygenassist_u128_fallback;
3887
3888FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1nexte_u128, iemAImpl_sha1nexte_u128_fallback;
3889FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg1_u128, iemAImpl_sha1msg1_u128_fallback;
3890FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg2_u128, iemAImpl_sha1msg2_u128_fallback;
3891FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg1_u128, iemAImpl_sha256msg1_u128_fallback;
3892FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg2_u128, iemAImpl_sha256msg2_u128_fallback;
3893FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_sha1rnds4_u128, iemAImpl_sha1rnds4_u128_fallback;
3894IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3895IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3896
3897typedef struct IEMPCMPISTRXSRC
3898{
3899 RTUINT128U uSrc1;
3900 RTUINT128U uSrc2;
3901} IEMPCMPISTRXSRC;
3902typedef IEMPCMPISTRXSRC *PIEMPCMPISTRXSRC;
3903typedef const IEMPCMPISTRXSRC *PCIEMPCMPISTRXSRC;
3904
3905typedef struct IEMPCMPESTRXSRC
3906{
3907 RTUINT128U uSrc1;
3908 RTUINT128U uSrc2;
3909 uint64_t u64Rax;
3910 uint64_t u64Rdx;
3911} IEMPCMPESTRXSRC;
3912typedef IEMPCMPESTRXSRC *PIEMPCMPESTRXSRC;
3913typedef const IEMPCMPESTRXSRC *PCIEMPCMPESTRXSRC;
3914
3915typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLPCMPISTRIU128IMM8,(uint32_t *pEFlags, PCRTUINT128U pSrc1, PCRTUINT128U pSrc2, uint8_t bEvil));
3916typedef FNIEMAIMPLPCMPISTRIU128IMM8 *PFNIEMAIMPLPCMPISTRIU128IMM8;
3917typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3918typedef FNIEMAIMPLPCMPESTRIU128IMM8 *PFNIEMAIMPLPCMPESTRIU128IMM8;
3919
3920typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
3921typedef FNIEMAIMPLPCMPISTRMU128IMM8 *PFNIEMAIMPLPCMPISTRMU128IMM8;
3922typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3923typedef FNIEMAIMPLPCMPESTRMU128IMM8 *PFNIEMAIMPLPCMPESTRMU128IMM8;
3924
3925FNIEMAIMPLPCMPISTRIU128IMM8 iemAImpl_pcmpistri_u128, iemAImpl_pcmpistri_u128_fallback;
3926FNIEMAIMPLPCMPESTRIU128IMM8 iemAImpl_pcmpestri_u128, iemAImpl_pcmpestri_u128_fallback;
3927FNIEMAIMPLPCMPISTRMU128IMM8 iemAImpl_pcmpistrm_u128, iemAImpl_pcmpistrm_u128_fallback;
3928FNIEMAIMPLPCMPESTRMU128IMM8 iemAImpl_pcmpestrm_u128, iemAImpl_pcmpestrm_u128_fallback;
3929
3930FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pclmulqdq_u128, iemAImpl_pclmulqdq_u128_fallback;
3931FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpclmulqdq_u128, iemAImpl_vpclmulqdq_u128_fallback;
3932
3933FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_mpsadbw_u128, iemAImpl_mpsadbw_u128_fallback;
3934FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vmpsadbw_u128, iemAImpl_vmpsadbw_u128_fallback;
3935FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vmpsadbw_u256, iemAImpl_vmpsadbw_u256_fallback;
3936
3937FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsllw_imm_u128, iemAImpl_vpsllw_imm_u128_fallback;
3938FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsllw_imm_u256, iemAImpl_vpsllw_imm_u256_fallback;
3939FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpslld_imm_u128, iemAImpl_vpslld_imm_u128_fallback;
3940FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpslld_imm_u256, iemAImpl_vpslld_imm_u256_fallback;
3941FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsllq_imm_u128, iemAImpl_vpsllq_imm_u128_fallback;
3942FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsllq_imm_u256, iemAImpl_vpsllq_imm_u256_fallback;
3943IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
3944IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
3945IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
3946IEM_DECL_IMPL_DEF(void, iemAImpl_vpslldq_imm_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
3947
3948FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsraw_imm_u128, iemAImpl_vpsraw_imm_u128_fallback;
3949FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsraw_imm_u256, iemAImpl_vpsraw_imm_u256_fallback;
3950FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrad_imm_u128, iemAImpl_vpsrad_imm_u128_fallback;
3951FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrad_imm_u256, iemAImpl_vpsrad_imm_u256_fallback;
3952
3953FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrlw_imm_u128, iemAImpl_vpsrlw_imm_u128_fallback;
3954FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrlw_imm_u256, iemAImpl_vpsrlw_imm_u256_fallback;
3955FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrld_imm_u128, iemAImpl_vpsrld_imm_u128_fallback;
3956FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrld_imm_u256, iemAImpl_vpsrld_imm_u256_fallback;
3957FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_vpsrlq_imm_u128, iemAImpl_vpsrlq_imm_u128_fallback;
3958FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpsrlq_imm_u256, iemAImpl_vpsrlq_imm_u256_fallback;
3959IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
3960IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t uShift));
3961IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
3962IEM_DECL_IMPL_DEF(void, iemAImpl_vpsrldq_imm_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t uShift));
3963
3964FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpermilps_u128, iemAImpl_vpermilps_u128_fallback;
3965FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_vpermilps_imm_u128, iemAImpl_vpermilps_imm_u128_fallback;
3966FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpermilps_u256, iemAImpl_vpermilps_u256_fallback;
3967FNIEMAIMPLMEDIAOPTF2U256IMM8 iemAImpl_vpermilps_imm_u256, iemAImpl_vpermilps_imm_u256_fallback;
3968
3969FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpermilpd_u128, iemAImpl_vpermilpd_u128_fallback;
3970FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_vpermilpd_imm_u128, iemAImpl_vpermilpd_imm_u128_fallback;
3971FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpermilpd_u256, iemAImpl_vpermilpd_u256_fallback;
3972FNIEMAIMPLMEDIAOPTF2U256IMM8 iemAImpl_vpermilpd_imm_u256, iemAImpl_vpermilpd_imm_u256_fallback;
3973
3974FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllvd_u128, iemAImpl_vpsllvd_u128_fallback;
3975FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllvd_u256, iemAImpl_vpsllvd_u256_fallback;
3976FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsllvq_u128, iemAImpl_vpsllvq_u128_fallback;
3977FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsllvq_u256, iemAImpl_vpsllvq_u256_fallback;
3978FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsravd_u128, iemAImpl_vpsravd_u128_fallback;
3979FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsravd_u256, iemAImpl_vpsravd_u256_fallback;
3980FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlvd_u128, iemAImpl_vpsrlvd_u128_fallback;
3981FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlvd_u256, iemAImpl_vpsrlvd_u256_fallback;
3982FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsrlvq_u128, iemAImpl_vpsrlvq_u128_fallback;
3983FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsrlvq_u256, iemAImpl_vpsrlvq_u256_fallback;
3984/** @} */
3985
3986/** @name Media Odds and Ends
3987 * @{ */
3988typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U8,(uint32_t *puDst, uint8_t uSrc));
3989typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U16,(uint32_t *puDst, uint16_t uSrc));
3990typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U32,(uint32_t *puDst, uint32_t uSrc));
3991typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U64,(uint32_t *puDst, uint64_t uSrc));
3992FNIEMAIMPLCR32U8 iemAImpl_crc32_u8, iemAImpl_crc32_u8_fallback;
3993FNIEMAIMPLCR32U16 iemAImpl_crc32_u16, iemAImpl_crc32_u16_fallback;
3994FNIEMAIMPLCR32U32 iemAImpl_crc32_u32, iemAImpl_crc32_u32_fallback;
3995FNIEMAIMPLCR32U64 iemAImpl_crc32_u64, iemAImpl_crc32_u64_fallback;
3996
3997typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL128,(PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint32_t *pEFlags));
3998typedef FNIEMAIMPLF2EFL128 *PFNIEMAIMPLF2EFL128;
3999typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL256,(PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint32_t *pEFlags));
4000typedef FNIEMAIMPLF2EFL256 *PFNIEMAIMPLF2EFL256;
4001FNIEMAIMPLF2EFL128 iemAImpl_ptest_u128;
4002FNIEMAIMPLF2EFL256 iemAImpl_vptest_u256, iemAImpl_vptest_u256_fallback;
4003FNIEMAIMPLF2EFL128 iemAImpl_vtestps_u128, iemAImpl_vtestps_u128_fallback;
4004FNIEMAIMPLF2EFL256 iemAImpl_vtestps_u256, iemAImpl_vtestps_u256_fallback;
4005FNIEMAIMPLF2EFL128 iemAImpl_vtestpd_u128, iemAImpl_vtestpd_u128_fallback;
4006FNIEMAIMPLF2EFL256 iemAImpl_vtestpd_u256, iemAImpl_vtestpd_u256_fallback;
4007
4008typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I32U64,(uint32_t uMxCsrIn, int32_t *pi32Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
4009typedef FNIEMAIMPLSSEF2I32U64 *PFNIEMAIMPLSSEF2I32U64;
4010typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I64U64,(uint32_t uMxCsrIn, int64_t *pi64Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
4011typedef FNIEMAIMPLSSEF2I64U64 *PFNIEMAIMPLSSEF2I64U64;
4012typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I32U32,(uint32_t uMxCsrIn, int32_t *pi32Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
4013typedef FNIEMAIMPLSSEF2I32U32 *PFNIEMAIMPLSSEF2I32U32;
4014typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2I64U32,(uint32_t uMxCsrIn, int64_t *pi64Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
4015typedef FNIEMAIMPLSSEF2I64U32 *PFNIEMAIMPLSSEF2I64U32;
4016
4017FNIEMAIMPLSSEF2I32U64 iemAImpl_cvttsd2si_i32_r64;
4018FNIEMAIMPLSSEF2I32U64 iemAImpl_cvtsd2si_i32_r64;
4019
4020FNIEMAIMPLSSEF2I64U64 iemAImpl_cvttsd2si_i64_r64;
4021FNIEMAIMPLSSEF2I64U64 iemAImpl_cvtsd2si_i64_r64;
4022
4023FNIEMAIMPLSSEF2I32U32 iemAImpl_cvttss2si_i32_r32;
4024FNIEMAIMPLSSEF2I32U32 iemAImpl_cvtss2si_i32_r32;
4025
4026FNIEMAIMPLSSEF2I64U32 iemAImpl_cvttss2si_i64_r32;
4027FNIEMAIMPLSSEF2I64U32 iemAImpl_cvtss2si_i64_r32;
4028
4029typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R32I32,(uint32_t uMxCsrIn, PRTFLOAT32U pr32Dst, const int32_t *pi32Src));
4030typedef FNIEMAIMPLSSEF2R32I32 *PFNIEMAIMPLSSEF2R32I32;
4031typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R32I64,(uint32_t uMxCsrIn, PRTFLOAT32U pr32Dst, const int64_t *pi64Src));
4032typedef FNIEMAIMPLSSEF2R32I64 *PFNIEMAIMPLSSEF2R32I64;
4033
4034FNIEMAIMPLSSEF2R32I32 iemAImpl_cvtsi2ss_r32_i32;
4035FNIEMAIMPLSSEF2R32I64 iemAImpl_cvtsi2ss_r32_i64;
4036
4037typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R64I32,(uint32_t uMxCsrIn, PRTFLOAT64U pr64Dst, const int32_t *pi32Src));
4038typedef FNIEMAIMPLSSEF2R64I32 *PFNIEMAIMPLSSEF2R64I32;
4039typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLSSEF2R64I64,(uint32_t uMxCsrIn, PRTFLOAT64U pr64Dst, const int64_t *pi64Src));
4040typedef FNIEMAIMPLSSEF2R64I64 *PFNIEMAIMPLSSEF2R64I64;
4041
4042FNIEMAIMPLSSEF2R64I32 iemAImpl_cvtsi2sd_r64_i32;
4043FNIEMAIMPLSSEF2R64I64 iemAImpl_cvtsi2sd_r64_i64;
4044
4045
4046typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLF2EFLMXCSRR32R32,(uint32_t uMxCsrIn, uint32_t *pfEFlags, RTFLOAT32U uSrc1, RTFLOAT32U uSrc2));
4047typedef FNIEMAIMPLF2EFLMXCSRR32R32 *PFNIEMAIMPLF2EFLMXCSRR32R32;
4048
4049typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLF2EFLMXCSRR64R64,(uint32_t uMxCsrIn, uint32_t *pfEFlags, RTFLOAT64U uSrc1, RTFLOAT64U uSrc2));
4050typedef FNIEMAIMPLF2EFLMXCSRR64R64 *PFNIEMAIMPLF2EFLMXCSRR64R64;
4051
4052FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_ucomiss_u128;
4053FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_vucomiss_u128, iemAImpl_vucomiss_u128_fallback;
4054
4055FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_ucomisd_u128;
4056FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_vucomisd_u128, iemAImpl_vucomisd_u128_fallback;
4057
4058FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_comiss_u128;
4059FNIEMAIMPLF2EFLMXCSRR32R32 iemAImpl_vcomiss_u128, iemAImpl_vcomiss_u128_fallback;
4060
4061FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_comisd_u128;
4062FNIEMAIMPLF2EFLMXCSRR64R64 iemAImpl_vcomisd_u128, iemAImpl_vcomisd_u128_fallback;
4063
4064
4065typedef struct IEMMEDIAF2XMMSRC
4066{
4067 X86XMMREG uSrc1;
4068 X86XMMREG uSrc2;
4069} IEMMEDIAF2XMMSRC;
4070typedef IEMMEDIAF2XMMSRC *PIEMMEDIAF2XMMSRC;
4071typedef const IEMMEDIAF2XMMSRC *PCIEMMEDIAF2XMMSRC;
4072
4073typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMXCSRF2XMMIMM8,(uint32_t uMxCsrIn, PX86XMMREG puDst, PCIEMMEDIAF2XMMSRC puSrc, uint8_t bEvil));
4074typedef FNIEMAIMPLMXCSRF2XMMIMM8 *PFNIEMAIMPLMXCSRF2XMMIMM8;
4075
4076FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpps_u128;
4077FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmppd_u128;
4078FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpss_u128;
4079FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpsd_u128;
4080FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundss_u128;
4081FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundsd_u128;
4082
4083FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundps_u128, iemAImpl_roundps_u128_fallback;
4084FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundpd_u128, iemAImpl_roundpd_u128_fallback;
4085
4086FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dpps_u128, iemAImpl_dpps_u128_fallback;
4087FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dppd_u128, iemAImpl_dppd_u128_fallback;
4088
4089typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMXCSRU64U128,(uint32_t fMxCsrIn, uint64_t *pu64Dst, PCX86XMMREG pSrc));
4090typedef FNIEMAIMPLMXCSRU64U128 *PFNIEMAIMPLMXCSRU64U128;
4091
4092FNIEMAIMPLMXCSRU64U128 iemAImpl_cvtpd2pi_u128;
4093FNIEMAIMPLMXCSRU64U128 iemAImpl_cvttpd2pi_u128;
4094
4095typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMXCSRU128U64,(uint32_t fMxCsrIn, PX86XMMREG pDst, uint64_t u64Src));
4096typedef FNIEMAIMPLMXCSRU128U64 *PFNIEMAIMPLMXCSRU128U64;
4097
4098FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2ps_u128;
4099FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2pd_u128;
4100
4101typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLMXCSRU64U64,(uint32_t fMxCsrIn, uint64_t *pu64Dst, uint64_t u64Src));
4102typedef FNIEMAIMPLMXCSRU64U64 *PFNIEMAIMPLMXCSRU64U64;
4103
4104FNIEMAIMPLMXCSRU64U64 iemAImpl_cvtps2pi_u128;
4105FNIEMAIMPLMXCSRU64U64 iemAImpl_cvttps2pi_u128;
4106
4107/** @} */
4108
4109
4110/** @name Function tables.
4111 * @{
4112 */
4113
4114/**
4115 * Function table for a binary operator providing implementation based on
4116 * operand size.
4117 */
4118typedef struct IEMOPBINSIZES
4119{
4120 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
4121 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
4122 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
4123 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
4124} IEMOPBINSIZES;
4125/** Pointer to a binary operator function table. */
4126typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
4127
4128
4129/**
4130 * Function table for a unary operator providing implementation based on
4131 * operand size.
4132 */
4133typedef struct IEMOPUNARYSIZES
4134{
4135 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
4136 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
4137 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
4138 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
4139} IEMOPUNARYSIZES;
4140/** Pointer to a unary operator function table. */
4141typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
4142
4143
4144/**
4145 * Function table for a shift operator providing implementation based on
4146 * operand size.
4147 */
4148typedef struct IEMOPSHIFTSIZES
4149{
4150 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
4151 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
4152 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
4153 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
4154} IEMOPSHIFTSIZES;
4155/** Pointer to a shift operator function table. */
4156typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
4157
4158
4159/**
4160 * Function table for a multiplication or division operation.
4161 */
4162typedef struct IEMOPMULDIVSIZES
4163{
4164 PFNIEMAIMPLMULDIVU8 pfnU8;
4165 PFNIEMAIMPLMULDIVU16 pfnU16;
4166 PFNIEMAIMPLMULDIVU32 pfnU32;
4167 PFNIEMAIMPLMULDIVU64 pfnU64;
4168} IEMOPMULDIVSIZES;
4169/** Pointer to a multiplication or division operation function table. */
4170typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
4171
4172
4173/**
4174 * Function table for a double precision shift operator providing implementation
4175 * based on operand size.
4176 */
4177typedef struct IEMOPSHIFTDBLSIZES
4178{
4179 PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16;
4180 PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32;
4181 PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64;
4182} IEMOPSHIFTDBLSIZES;
4183/** Pointer to a double precision shift function table. */
4184typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
4185
4186
4187/**
4188 * Function table for media instruction taking two full sized media source
4189 * registers and one full sized destination register (AVX).
4190 */
4191typedef struct IEMOPMEDIAF3
4192{
4193 PFNIEMAIMPLMEDIAF3U128 pfnU128;
4194 PFNIEMAIMPLMEDIAF3U256 pfnU256;
4195} IEMOPMEDIAF3;
4196/** Pointer to a media operation function table for 3 full sized ops (AVX). */
4197typedef IEMOPMEDIAF3 const *PCIEMOPMEDIAF3;
4198
4199/** @def IEMOPMEDIAF3_INIT_VARS_EX
4200 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4201 * given functions as initializers. For use in AVX functions where a pair of
4202 * functions are only used once and the function table need not be public. */
4203#ifndef TST_IEM_CHECK_MC
4204# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4205# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4206 static IEMOPMEDIAF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4207 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4208# else
4209# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4210 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4211# endif
4212#else
4213# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4214#endif
4215/** @def IEMOPMEDIAF3_INIT_VARS
4216 * Generate AVX function tables for the @a a_InstrNm instruction.
4217 * @sa IEMOPMEDIAF3_INIT_VARS_EX */
4218#define IEMOPMEDIAF3_INIT_VARS(a_InstrNm) \
4219 IEMOPMEDIAF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4220 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4221
4222/**
4223 * Function table for media instruction taking two full sized media source
4224 * registers and one full sized destination register, but no additional state
4225 * (AVX).
4226 */
4227typedef struct IEMOPMEDIAOPTF3
4228{
4229 PFNIEMAIMPLMEDIAOPTF3U128 pfnU128;
4230 PFNIEMAIMPLMEDIAOPTF3U256 pfnU256;
4231} IEMOPMEDIAOPTF3;
4232/** Pointer to a media operation function table for 3 full sized ops (AVX). */
4233typedef IEMOPMEDIAOPTF3 const *PCIEMOPMEDIAOPTF3;
4234
4235/** @def IEMOPMEDIAOPTF3_INIT_VARS_EX
4236 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4237 * given functions as initializers. For use in AVX functions where a pair of
4238 * functions are only used once and the function table need not be public. */
4239#ifndef TST_IEM_CHECK_MC
4240# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4241# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4242 static IEMOPMEDIAOPTF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4243 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4244# else
4245# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4246 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4247# endif
4248#else
4249# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4250#endif
4251/** @def IEMOPMEDIAOPTF3_INIT_VARS
4252 * Generate AVX function tables for the @a a_InstrNm instruction.
4253 * @sa IEMOPMEDIAOPTF3_INIT_VARS_EX */
4254#define IEMOPMEDIAOPTF3_INIT_VARS(a_InstrNm) \
4255 IEMOPMEDIAOPTF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4256 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4257
4258/**
4259 * Function table for media instruction taking one full sized media source
4260 * registers and one full sized destination register, but no additional state
4261 * (AVX).
4262 */
4263typedef struct IEMOPMEDIAOPTF2
4264{
4265 PFNIEMAIMPLMEDIAOPTF2U128 pfnU128;
4266 PFNIEMAIMPLMEDIAOPTF2U256 pfnU256;
4267} IEMOPMEDIAOPTF2;
4268/** Pointer to a media operation function table for 2 full sized ops (AVX). */
4269typedef IEMOPMEDIAOPTF2 const *PCIEMOPMEDIAOPTF2;
4270
4271/** @def IEMOPMEDIAOPTF2_INIT_VARS_EX
4272 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4273 * given functions as initializers. For use in AVX functions where a pair of
4274 * functions are only used once and the function table need not be public. */
4275#ifndef TST_IEM_CHECK_MC
4276# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4277# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4278 static IEMOPMEDIAOPTF2 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4279 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4280# else
4281# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4282 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4283# endif
4284#else
4285# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4286#endif
4287/** @def IEMOPMEDIAOPTF2_INIT_VARS
4288 * Generate AVX function tables for the @a a_InstrNm instruction.
4289 * @sa IEMOPMEDIAOPTF2_INIT_VARS_EX */
4290#define IEMOPMEDIAOPTF2_INIT_VARS(a_InstrNm) \
4291 IEMOPMEDIAOPTF2_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4292 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4293
4294/**
4295 * Function table for media instruction taking one full sized media source
4296 * register and one full sized destination register and an 8-bit immediate, but no additional state
4297 * (AVX).
4298 */
4299typedef struct IEMOPMEDIAOPTF2IMM8
4300{
4301 PFNIEMAIMPLMEDIAOPTF2U128IMM8 pfnU128;
4302 PFNIEMAIMPLMEDIAOPTF2U256IMM8 pfnU256;
4303} IEMOPMEDIAOPTF2IMM8;
4304/** Pointer to a media operation function table for 2 full sized ops (AVX). */
4305typedef IEMOPMEDIAOPTF2IMM8 const *PCIEMOPMEDIAOPTF2IMM8;
4306
4307/** @def IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX
4308 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4309 * given functions as initializers. For use in AVX functions where a pair of
4310 * functions are only used once and the function table need not be public. */
4311#ifndef TST_IEM_CHECK_MC
4312# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4313# define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4314 static IEMOPMEDIAOPTF2IMM8 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4315 static IEMOPMEDIAOPTF2IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4316# else
4317# define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4318 static IEMOPMEDIAOPTF2IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4319# endif
4320#else
4321# define IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4322#endif
4323/** @def IEMOPMEDIAOPTF2IMM8_INIT_VARS
4324 * Generate AVX function tables for the @a a_InstrNm instruction.
4325 * @sa IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX */
4326#define IEMOPMEDIAOPTF2IMM8_INIT_VARS(a_InstrNm) \
4327 IEMOPMEDIAOPTF2IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u256),\
4328 RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_imm_u256_fallback))
4329
4330/**
4331 * Function table for media instruction taking two full sized media source
4332 * registers and one full sized destination register and an 8-bit immediate, but no additional state
4333 * (AVX).
4334 */
4335typedef struct IEMOPMEDIAOPTF3IMM8
4336{
4337 PFNIEMAIMPLMEDIAOPTF3U128IMM8 pfnU128;
4338 PFNIEMAIMPLMEDIAOPTF3U256IMM8 pfnU256;
4339} IEMOPMEDIAOPTF3IMM8;
4340/** Pointer to a media operation function table for 3 full sized ops (AVX). */
4341typedef IEMOPMEDIAOPTF3IMM8 const *PCIEMOPMEDIAOPTF3IMM8;
4342
4343/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX
4344 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4345 * given functions as initializers. For use in AVX functions where a pair of
4346 * functions are only used once and the function table need not be public. */
4347#ifndef TST_IEM_CHECK_MC
4348# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4349# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4350 static IEMOPMEDIAOPTF3IMM8 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4351 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4352# else
4353# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4354 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4355# endif
4356#else
4357# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4358#endif
4359/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS
4360 * Generate AVX function tables for the @a a_InstrNm instruction.
4361 * @sa IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX */
4362#define IEMOPMEDIAOPTF3IMM8_INIT_VARS(a_InstrNm) \
4363 IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4364 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4365/** @} */
4366
4367
4368/**
4369 * Function table for blend type instruction taking three full sized media source
4370 * registers and one full sized destination register, but no additional state
4371 * (AVX).
4372 */
4373typedef struct IEMOPBLENDOP
4374{
4375 PFNIEMAIMPLAVXBLENDU128 pfnU128;
4376 PFNIEMAIMPLAVXBLENDU256 pfnU256;
4377} IEMOPBLENDOP;
4378/** Pointer to a media operation function table for 4 full sized ops (AVX). */
4379typedef IEMOPBLENDOP const *PCIEMOPBLENDOP;
4380
4381/** @def IEMOPBLENDOP_INIT_VARS_EX
4382 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
4383 * given functions as initializers. For use in AVX functions where a pair of
4384 * functions are only used once and the function table need not be public. */
4385#ifndef TST_IEM_CHECK_MC
4386# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
4387# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4388 static IEMOPBLENDOP const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
4389 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4390# else
4391# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
4392 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
4393# endif
4394#else
4395# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
4396#endif
4397/** @def IEMOPBLENDOP_INIT_VARS
4398 * Generate AVX function tables for the @a a_InstrNm instruction.
4399 * @sa IEMOPBLENDOP_INIT_VARS_EX */
4400#define IEMOPBLENDOP_INIT_VARS(a_InstrNm) \
4401 IEMOPBLENDOP_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
4402 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
4403
4404
4405/** @name SSE/AVX single/double precision floating point operations.
4406 * @{ */
4407typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPSSEF2U128,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
4408typedef FNIEMAIMPLFPSSEF2U128 *PFNIEMAIMPLFPSSEF2U128;
4409typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPSSEF2U128R32,(uint32_t uMxCsrIn, PX86XMMREG Result, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
4410typedef FNIEMAIMPLFPSSEF2U128R32 *PFNIEMAIMPLFPSSEF2U128R32;
4411typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPSSEF2U128R64,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
4412typedef FNIEMAIMPLFPSSEF2U128R64 *PFNIEMAIMPLFPSSEF2U128R64;
4413
4414typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U128,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
4415typedef FNIEMAIMPLFPAVXF3U128 *PFNIEMAIMPLFPAVXF3U128;
4416typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U128R32,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
4417typedef FNIEMAIMPLFPAVXF3U128R32 *PFNIEMAIMPLFPAVXF3U128R32;
4418typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U128R64,(uint32_t uMxCsrIn, PX86XMMREG pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
4419typedef FNIEMAIMPLFPAVXF3U128R64 *PFNIEMAIMPLFPAVXF3U128R64;
4420
4421typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPAVXF3U256,(uint32_t uMxCsrIn, PX86YMMREG pResult, PCX86YMMREG puSrc1, PCX86YMMREG puSrc2));
4422typedef FNIEMAIMPLFPAVXF3U256 *PFNIEMAIMPLFPAVXF3U256;
4423
4424FNIEMAIMPLFPSSEF2U128 iemAImpl_addps_u128;
4425FNIEMAIMPLFPSSEF2U128 iemAImpl_addpd_u128;
4426FNIEMAIMPLFPSSEF2U128 iemAImpl_mulps_u128;
4427FNIEMAIMPLFPSSEF2U128 iemAImpl_mulpd_u128;
4428FNIEMAIMPLFPSSEF2U128 iemAImpl_subps_u128;
4429FNIEMAIMPLFPSSEF2U128 iemAImpl_subpd_u128;
4430FNIEMAIMPLFPSSEF2U128 iemAImpl_minps_u128;
4431FNIEMAIMPLFPSSEF2U128 iemAImpl_minpd_u128;
4432FNIEMAIMPLFPSSEF2U128 iemAImpl_divps_u128;
4433FNIEMAIMPLFPSSEF2U128 iemAImpl_divpd_u128;
4434FNIEMAIMPLFPSSEF2U128 iemAImpl_maxps_u128;
4435FNIEMAIMPLFPSSEF2U128 iemAImpl_maxpd_u128;
4436FNIEMAIMPLFPSSEF2U128 iemAImpl_haddps_u128;
4437FNIEMAIMPLFPSSEF2U128 iemAImpl_haddpd_u128;
4438FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubps_u128;
4439FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubpd_u128;
4440FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtps_u128;
4441FNIEMAIMPLFPSSEF2U128 iemAImpl_rsqrtps_u128;
4442FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtpd_u128;
4443FNIEMAIMPLFPSSEF2U128 iemAImpl_rcpps_u128;
4444FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubps_u128;
4445FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubpd_u128;
4446
4447FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2ps_u128;
4448IEM_DECL_IMPL_PROTO(uint32_t, iemAImpl_cvtps2pd_u128,(uint32_t uMxCsrIn, PX86XMMREG pResult, uint64_t const *pu64Src));
4449
4450FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2ps_u128;
4451FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2dq_u128;
4452FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttps2dq_u128;
4453FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttpd2dq_u128;
4454FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2pd_u128;
4455FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2dq_u128;
4456
4457FNIEMAIMPLFPSSEF2U128R32 iemAImpl_addss_u128_r32;
4458FNIEMAIMPLFPSSEF2U128R64 iemAImpl_addsd_u128_r64;
4459FNIEMAIMPLFPSSEF2U128R32 iemAImpl_mulss_u128_r32;
4460FNIEMAIMPLFPSSEF2U128R64 iemAImpl_mulsd_u128_r64;
4461FNIEMAIMPLFPSSEF2U128R32 iemAImpl_subss_u128_r32;
4462FNIEMAIMPLFPSSEF2U128R64 iemAImpl_subsd_u128_r64;
4463FNIEMAIMPLFPSSEF2U128R32 iemAImpl_minss_u128_r32;
4464FNIEMAIMPLFPSSEF2U128R64 iemAImpl_minsd_u128_r64;
4465FNIEMAIMPLFPSSEF2U128R32 iemAImpl_divss_u128_r32;
4466FNIEMAIMPLFPSSEF2U128R64 iemAImpl_divsd_u128_r64;
4467FNIEMAIMPLFPSSEF2U128R32 iemAImpl_maxss_u128_r32;
4468FNIEMAIMPLFPSSEF2U128R64 iemAImpl_maxsd_u128_r64;
4469FNIEMAIMPLFPSSEF2U128R32 iemAImpl_cvtss2sd_u128_r32;
4470FNIEMAIMPLFPSSEF2U128R64 iemAImpl_cvtsd2ss_u128_r64;
4471FNIEMAIMPLFPSSEF2U128R32 iemAImpl_sqrtss_u128_r32;
4472FNIEMAIMPLFPSSEF2U128R64 iemAImpl_sqrtsd_u128_r64;
4473FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rsqrtss_u128_r32;
4474FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rcpss_u128_r32;
4475
4476FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddps_u128, iemAImpl_vaddps_u128_fallback;
4477FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddpd_u128, iemAImpl_vaddpd_u128_fallback;
4478FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulps_u128, iemAImpl_vmulps_u128_fallback;
4479FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulpd_u128, iemAImpl_vmulpd_u128_fallback;
4480FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubps_u128, iemAImpl_vsubps_u128_fallback;
4481FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubpd_u128, iemAImpl_vsubpd_u128_fallback;
4482FNIEMAIMPLFPAVXF3U128 iemAImpl_vminps_u128, iemAImpl_vminps_u128_fallback;
4483FNIEMAIMPLFPAVXF3U128 iemAImpl_vminpd_u128, iemAImpl_vminpd_u128_fallback;
4484FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivps_u128, iemAImpl_vdivps_u128_fallback;
4485FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivpd_u128, iemAImpl_vdivpd_u128_fallback;
4486FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxps_u128, iemAImpl_vmaxps_u128_fallback;
4487FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxpd_u128, iemAImpl_vmaxpd_u128_fallback;
4488FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddps_u128, iemAImpl_vhaddps_u128_fallback;
4489FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddpd_u128, iemAImpl_vhaddpd_u128_fallback;
4490FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubps_u128, iemAImpl_vhsubps_u128_fallback;
4491FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubpd_u128, iemAImpl_vhsubpd_u128_fallback;
4492FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtps_u128, iemAImpl_vsqrtps_u128_fallback;
4493FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtpd_u128, iemAImpl_vsqrtpd_u128_fallback;
4494FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubps_u128, iemAImpl_vaddsubps_u128_fallback;
4495FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubpd_u128, iemAImpl_vaddsubpd_u128_fallback;
4496FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtpd2ps_u128, iemAImpl_vcvtpd2ps_u128_fallback;
4497FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtps2pd_u128, iemAImpl_vcvtps2pd_u128_fallback;
4498
4499FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vaddss_u128_r32, iemAImpl_vaddss_u128_r32_fallback;
4500FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vaddsd_u128_r64, iemAImpl_vaddsd_u128_r64_fallback;
4501FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmulss_u128_r32, iemAImpl_vmulss_u128_r32_fallback;
4502FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmulsd_u128_r64, iemAImpl_vmulsd_u128_r64_fallback;
4503FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsubss_u128_r32, iemAImpl_vsubss_u128_r32_fallback;
4504FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsubsd_u128_r64, iemAImpl_vsubsd_u128_r64_fallback;
4505FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vminss_u128_r32, iemAImpl_vminss_u128_r32_fallback;
4506FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vminsd_u128_r64, iemAImpl_vminsd_u128_r64_fallback;
4507FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vdivss_u128_r32, iemAImpl_vdivss_u128_r32_fallback;
4508FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vdivsd_u128_r64, iemAImpl_vdivsd_u128_r64_fallback;
4509FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmaxss_u128_r32, iemAImpl_vmaxss_u128_r32_fallback;
4510FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmaxsd_u128_r64, iemAImpl_vmaxsd_u128_r64_fallback;
4511FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsqrtss_u128_r32, iemAImpl_vsqrtss_u128_r32_fallback;
4512FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsqrtsd_u128_r64, iemAImpl_vsqrtsd_u128_r64_fallback;
4513
4514FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddps_u256, iemAImpl_vaddps_u256_fallback;
4515FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddpd_u256, iemAImpl_vaddpd_u256_fallback;
4516FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulps_u256, iemAImpl_vmulps_u256_fallback;
4517FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulpd_u256, iemAImpl_vmulpd_u256_fallback;
4518FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubps_u256, iemAImpl_vsubps_u256_fallback;
4519FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubpd_u256, iemAImpl_vsubpd_u256_fallback;
4520FNIEMAIMPLFPAVXF3U256 iemAImpl_vminps_u256, iemAImpl_vminps_u256_fallback;
4521FNIEMAIMPLFPAVXF3U256 iemAImpl_vminpd_u256, iemAImpl_vminpd_u256_fallback;
4522FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivps_u256, iemAImpl_vdivps_u256_fallback;
4523FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivpd_u256, iemAImpl_vdivpd_u256_fallback;
4524FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxps_u256, iemAImpl_vmaxps_u256_fallback;
4525FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxpd_u256, iemAImpl_vmaxpd_u256_fallback;
4526FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddps_u256, iemAImpl_vhaddps_u256_fallback;
4527FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddpd_u256, iemAImpl_vhaddpd_u256_fallback;
4528FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubps_u256, iemAImpl_vhsubps_u256_fallback;
4529FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubpd_u256, iemAImpl_vhsubpd_u256_fallback;
4530FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubps_u256, iemAImpl_vhaddsubps_u256_fallback;
4531FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubpd_u256, iemAImpl_vhaddsubpd_u256_fallback;
4532FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtpd2ps_u256, iemAImpl_vcvtpd2ps_u256_fallback;
4533FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtps2pd_u256, iemAImpl_vcvtps2pd_u256_fallback;
4534/** @} */
4535
4536/** @name C instruction implementations for anything slightly complicated.
4537 * @{ */
4538
4539/**
4540 * For typedef'ing or declaring a C instruction implementation function taking
4541 * no extra arguments.
4542 *
4543 * @param a_Name The name of the type.
4544 */
4545# define IEM_CIMPL_DECL_TYPE_0(a_Name) \
4546 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4547/**
4548 * For defining a C instruction implementation function taking no extra
4549 * arguments.
4550 *
4551 * @param a_Name The name of the function
4552 */
4553# define IEM_CIMPL_DEF_0(a_Name) \
4554 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4555/**
4556 * Prototype version of IEM_CIMPL_DEF_0.
4557 */
4558# define IEM_CIMPL_PROTO_0(a_Name) \
4559 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4560/**
4561 * For calling a C instruction implementation function taking no extra
4562 * arguments.
4563 *
4564 * This special call macro adds default arguments to the call and allow us to
4565 * change these later.
4566 *
4567 * @param a_fn The name of the function.
4568 */
4569# define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr)
4570
4571/** Type for a C instruction implementation function taking no extra
4572 * arguments. */
4573typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
4574/** Function pointer type for a C instruction implementation function taking
4575 * no extra arguments. */
4576typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
4577
4578/**
4579 * For typedef'ing or declaring a C instruction implementation function taking
4580 * one extra argument.
4581 *
4582 * @param a_Name The name of the type.
4583 * @param a_Type0 The argument type.
4584 * @param a_Arg0 The argument name.
4585 */
4586# define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
4587 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4588/**
4589 * For defining a C instruction implementation function taking one extra
4590 * argument.
4591 *
4592 * @param a_Name The name of the function
4593 * @param a_Type0 The argument type.
4594 * @param a_Arg0 The argument name.
4595 */
4596# define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
4597 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4598/**
4599 * Prototype version of IEM_CIMPL_DEF_1.
4600 */
4601# define IEM_CIMPL_PROTO_1(a_Name, a_Type0, a_Arg0) \
4602 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4603/**
4604 * For calling a C instruction implementation function taking one extra
4605 * argument.
4606 *
4607 * This special call macro adds default arguments to the call and allow us to
4608 * change these later.
4609 *
4610 * @param a_fn The name of the function.
4611 * @param a0 The name of the 1st argument.
4612 */
4613# define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0))
4614
4615/**
4616 * For typedef'ing or declaring a C instruction implementation function taking
4617 * two extra arguments.
4618 *
4619 * @param a_Name The name of the type.
4620 * @param a_Type0 The type of the 1st argument
4621 * @param a_Arg0 The name of the 1st argument.
4622 * @param a_Type1 The type of the 2nd argument.
4623 * @param a_Arg1 The name of the 2nd argument.
4624 */
4625# define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4626 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4627/**
4628 * For defining a C instruction implementation function taking two extra
4629 * arguments.
4630 *
4631 * @param a_Name The name of the function.
4632 * @param a_Type0 The type of the 1st argument
4633 * @param a_Arg0 The name of the 1st argument.
4634 * @param a_Type1 The type of the 2nd argument.
4635 * @param a_Arg1 The name of the 2nd argument.
4636 */
4637# define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4638 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4639/**
4640 * Prototype version of IEM_CIMPL_DEF_2.
4641 */
4642# define IEM_CIMPL_PROTO_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4643 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4644/**
4645 * For calling a C instruction implementation function taking two extra
4646 * arguments.
4647 *
4648 * This special call macro adds default arguments to the call and allow us to
4649 * change these later.
4650 *
4651 * @param a_fn The name of the function.
4652 * @param a0 The name of the 1st argument.
4653 * @param a1 The name of the 2nd argument.
4654 */
4655# define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1))
4656
4657/**
4658 * For typedef'ing or declaring a C instruction implementation function taking
4659 * three extra arguments.
4660 *
4661 * @param a_Name The name of the type.
4662 * @param a_Type0 The type of the 1st argument
4663 * @param a_Arg0 The name of the 1st argument.
4664 * @param a_Type1 The type of the 2nd argument.
4665 * @param a_Arg1 The name of the 2nd argument.
4666 * @param a_Type2 The type of the 3rd argument.
4667 * @param a_Arg2 The name of the 3rd argument.
4668 */
4669# define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4670 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4671/**
4672 * For defining a C instruction implementation function taking three extra
4673 * arguments.
4674 *
4675 * @param a_Name The name of the function.
4676 * @param a_Type0 The type of the 1st argument
4677 * @param a_Arg0 The name of the 1st argument.
4678 * @param a_Type1 The type of the 2nd argument.
4679 * @param a_Arg1 The name of the 2nd argument.
4680 * @param a_Type2 The type of the 3rd argument.
4681 * @param a_Arg2 The name of the 3rd argument.
4682 */
4683# define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4684 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4685/**
4686 * Prototype version of IEM_CIMPL_DEF_3.
4687 */
4688# define IEM_CIMPL_PROTO_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4689 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4690/**
4691 * For calling a C instruction implementation function taking three extra
4692 * arguments.
4693 *
4694 * This special call macro adds default arguments to the call and allow us to
4695 * change these later.
4696 *
4697 * @param a_fn The name of the function.
4698 * @param a0 The name of the 1st argument.
4699 * @param a1 The name of the 2nd argument.
4700 * @param a2 The name of the 3rd argument.
4701 */
4702# define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
4703
4704
4705/**
4706 * For typedef'ing or declaring a C instruction implementation function taking
4707 * four extra arguments.
4708 *
4709 * @param a_Name The name of the type.
4710 * @param a_Type0 The type of the 1st argument
4711 * @param a_Arg0 The name of the 1st argument.
4712 * @param a_Type1 The type of the 2nd argument.
4713 * @param a_Arg1 The name of the 2nd argument.
4714 * @param a_Type2 The type of the 3rd argument.
4715 * @param a_Arg2 The name of the 3rd argument.
4716 * @param a_Type3 The type of the 4th argument.
4717 * @param a_Arg3 The name of the 4th argument.
4718 */
4719# define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4720 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
4721/**
4722 * For defining a C instruction implementation function taking four extra
4723 * arguments.
4724 *
4725 * @param a_Name The name of the function.
4726 * @param a_Type0 The type of the 1st argument
4727 * @param a_Arg0 The name of the 1st argument.
4728 * @param a_Type1 The type of the 2nd argument.
4729 * @param a_Arg1 The name of the 2nd argument.
4730 * @param a_Type2 The type of the 3rd argument.
4731 * @param a_Arg2 The name of the 3rd argument.
4732 * @param a_Type3 The type of the 4th argument.
4733 * @param a_Arg3 The name of the 4th argument.
4734 */
4735# define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4736 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4737 a_Type2 a_Arg2, a_Type3 a_Arg3))
4738/**
4739 * Prototype version of IEM_CIMPL_DEF_4.
4740 */
4741# define IEM_CIMPL_PROTO_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4742 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4743 a_Type2 a_Arg2, a_Type3 a_Arg3))
4744/**
4745 * For calling a C instruction implementation function taking four extra
4746 * arguments.
4747 *
4748 * This special call macro adds default arguments to the call and allow us to
4749 * change these later.
4750 *
4751 * @param a_fn The name of the function.
4752 * @param a0 The name of the 1st argument.
4753 * @param a1 The name of the 2nd argument.
4754 * @param a2 The name of the 3rd argument.
4755 * @param a3 The name of the 4th argument.
4756 */
4757# define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
4758
4759
4760/**
4761 * For typedef'ing or declaring a C instruction implementation function taking
4762 * five extra arguments.
4763 *
4764 * @param a_Name The name of the type.
4765 * @param a_Type0 The type of the 1st argument
4766 * @param a_Arg0 The name of the 1st argument.
4767 * @param a_Type1 The type of the 2nd argument.
4768 * @param a_Arg1 The name of the 2nd argument.
4769 * @param a_Type2 The type of the 3rd argument.
4770 * @param a_Arg2 The name of the 3rd argument.
4771 * @param a_Type3 The type of the 4th argument.
4772 * @param a_Arg3 The name of the 4th argument.
4773 * @param a_Type4 The type of the 5th argument.
4774 * @param a_Arg4 The name of the 5th argument.
4775 */
4776# define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4777 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, \
4778 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
4779 a_Type3 a_Arg3, a_Type4 a_Arg4))
4780/**
4781 * For defining a C instruction implementation function taking five extra
4782 * arguments.
4783 *
4784 * @param a_Name The name of the function.
4785 * @param a_Type0 The type of the 1st argument
4786 * @param a_Arg0 The name of the 1st argument.
4787 * @param a_Type1 The type of the 2nd argument.
4788 * @param a_Arg1 The name of the 2nd argument.
4789 * @param a_Type2 The type of the 3rd argument.
4790 * @param a_Arg2 The name of the 3rd argument.
4791 * @param a_Type3 The type of the 4th argument.
4792 * @param a_Arg3 The name of the 4th argument.
4793 * @param a_Type4 The type of the 5th argument.
4794 * @param a_Arg4 The name of the 5th argument.
4795 */
4796# define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4797 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4798 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4799/**
4800 * Prototype version of IEM_CIMPL_DEF_5.
4801 */
4802# define IEM_CIMPL_PROTO_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4803 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4804 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4805/**
4806 * For calling a C instruction implementation function taking five extra
4807 * arguments.
4808 *
4809 * This special call macro adds default arguments to the call and allow us to
4810 * change these later.
4811 *
4812 * @param a_fn The name of the function.
4813 * @param a0 The name of the 1st argument.
4814 * @param a1 The name of the 2nd argument.
4815 * @param a2 The name of the 3rd argument.
4816 * @param a3 The name of the 4th argument.
4817 * @param a4 The name of the 5th argument.
4818 */
4819# define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
4820
4821/** @} */
4822
4823
4824/** @name Opcode Decoder Function Types.
4825 * @{ */
4826
4827/** @typedef PFNIEMOP
4828 * Pointer to an opcode decoder function.
4829 */
4830
4831/** @def FNIEMOP_DEF
4832 * Define an opcode decoder function.
4833 *
4834 * We're using macors for this so that adding and removing parameters as well as
4835 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
4836 *
4837 * @param a_Name The function name.
4838 */
4839
4840/** @typedef PFNIEMOPRM
4841 * Pointer to an opcode decoder function with RM byte.
4842 */
4843
4844/** @def FNIEMOPRM_DEF
4845 * Define an opcode decoder function with RM byte.
4846 *
4847 * We're using macors for this so that adding and removing parameters as well as
4848 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
4849 *
4850 * @param a_Name The function name.
4851 */
4852
4853#if defined(__GNUC__) && defined(RT_ARCH_X86)
4854typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
4855typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4856# define FNIEMOP_DEF(a_Name) \
4857 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
4858# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4859 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4860# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4861 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4862
4863#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
4864typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
4865typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4866# define FNIEMOP_DEF(a_Name) \
4867 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4868# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4869 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4870# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4871 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4872
4873#elif defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
4874typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4875typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4876# define FNIEMOP_DEF(a_Name) \
4877 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
4878# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4879 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4880# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4881 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4882
4883#else
4884typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4885typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4886# define FNIEMOP_DEF(a_Name) \
4887 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4888# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4889 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4890# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4891 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4892
4893#endif
4894#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
4895
4896/**
4897 * Call an opcode decoder function.
4898 *
4899 * We're using macors for this so that adding and removing parameters can be
4900 * done as we please. See FNIEMOP_DEF.
4901 */
4902#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
4903
4904/**
4905 * Call a common opcode decoder function taking one extra argument.
4906 *
4907 * We're using macors for this so that adding and removing parameters can be
4908 * done as we please. See FNIEMOP_DEF_1.
4909 */
4910#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
4911
4912/**
4913 * Call a common opcode decoder function taking one extra argument.
4914 *
4915 * We're using macors for this so that adding and removing parameters can be
4916 * done as we please. See FNIEMOP_DEF_1.
4917 */
4918#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
4919/** @} */
4920
4921
4922/** @name Misc Helpers
4923 * @{ */
4924
4925/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
4926 * due to GCC lacking knowledge about the value range of a switch. */
4927#if RT_CPLUSPLUS_PREREQ(202000)
4928# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: [[unlikely]] AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4929#else
4930# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4931#endif
4932
4933/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
4934#if RT_CPLUSPLUS_PREREQ(202000)
4935# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: [[unlikely]] AssertFailedReturn(a_RetValue)
4936#else
4937# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
4938#endif
4939
4940/**
4941 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4942 * occation.
4943 */
4944#ifdef LOG_ENABLED
4945# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4946 do { \
4947 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
4948 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4949 } while (0)
4950#else
4951# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4952 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4953#endif
4954
4955/**
4956 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4957 * occation using the supplied logger statement.
4958 *
4959 * @param a_LoggerArgs What to log on failure.
4960 */
4961#ifdef LOG_ENABLED
4962# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4963 do { \
4964 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
4965 /*LogFunc(a_LoggerArgs);*/ \
4966 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4967 } while (0)
4968#else
4969# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4970 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4971#endif
4972
4973/**
4974 * Gets the CPU mode (from fExec) as a IEMMODE value.
4975 *
4976 * @returns IEMMODE
4977 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4978 */
4979#define IEM_GET_CPU_MODE(a_pVCpu) ((a_pVCpu)->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK)
4980
4981/**
4982 * Check if we're currently executing in real or virtual 8086 mode.
4983 *
4984 * @returns @c true if it is, @c false if not.
4985 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4986 */
4987#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (( ((a_pVCpu)->iem.s.fExec ^ IEM_F_MODE_X86_PROT_MASK) \
4988 & (IEM_F_MODE_X86_V86_MASK | IEM_F_MODE_X86_PROT_MASK)) != 0)
4989
4990/**
4991 * Check if we're currently executing in virtual 8086 mode.
4992 *
4993 * @returns @c true if it is, @c false if not.
4994 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4995 */
4996#define IEM_IS_V86_MODE(a_pVCpu) (((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_V86_MASK) != 0)
4997
4998/**
4999 * Check if we're currently executing in long mode.
5000 *
5001 * @returns @c true if it is, @c false if not.
5002 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
5003 */
5004#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
5005
5006/**
5007 * Check if we're currently executing in a 16-bit code segment.
5008 *
5009 * @returns @c true if it is, @c false if not.
5010 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
5011 */
5012#define IEM_IS_16BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_16BIT)
5013
5014/**
5015 * Check if we're currently executing in a 32-bit code segment.
5016 *
5017 * @returns @c true if it is, @c false if not.
5018 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
5019 */
5020#define IEM_IS_32BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_32BIT)
5021
5022/**
5023 * Check if we're currently executing in a 64-bit code segment.
5024 *
5025 * @returns @c true if it is, @c false if not.
5026 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
5027 */
5028#define IEM_IS_64BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_64BIT)
5029
5030/**
5031 * Check if we're currently executing in real mode.
5032 *
5033 * @returns @c true if it is, @c false if not.
5034 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
5035 */
5036#define IEM_IS_REAL_MODE(a_pVCpu) (!((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_PROT_MASK))
5037
5038/**
5039 * Gets the current protection level (CPL).
5040 *
5041 * @returns 0..3
5042 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
5043 */
5044#define IEM_GET_CPL(a_pVCpu) (((a_pVCpu)->iem.s.fExec >> IEM_F_X86_CPL_SHIFT) & IEM_F_X86_CPL_SMASK)
5045
5046/**
5047 * Sets the current protection level (CPL).
5048 *
5049 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
5050 */
5051#define IEM_SET_CPL(a_pVCpu, a_uCpl) \
5052 do { (a_pVCpu)->iem.s.fExec = ((a_pVCpu)->iem.s.fExec & ~IEM_F_X86_CPL_MASK) | ((a_uCpl) << IEM_F_X86_CPL_SHIFT); } while (0)
5053
5054/**
5055 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
5056 * @returns PCCPUMFEATURES
5057 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
5058 */
5059#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
5060
5061/**
5062 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
5063 * @returns PCCPUMFEATURES
5064 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
5065 */
5066#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&g_CpumHostFeatures.s)
5067
5068/**
5069 * Evaluates to true if we're presenting an Intel CPU to the guest.
5070 */
5071#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
5072
5073/**
5074 * Evaluates to true if we're presenting an AMD CPU to the guest.
5075 */
5076#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
5077
5078/**
5079 * Check if the address is canonical.
5080 */
5081#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
5082
5083/** Checks if the ModR/M byte is in register mode or not. */
5084#define IEM_IS_MODRM_REG_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT) )
5085/** Checks if the ModR/M byte is in memory mode or not. */
5086#define IEM_IS_MODRM_MEM_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT) )
5087
5088/**
5089 * Gets the register (reg) part of a ModR/M encoding, with REX.R added in.
5090 *
5091 * For use during decoding.
5092 */
5093#define IEM_GET_MODRM_REG(a_pVCpu, a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | (a_pVCpu)->iem.s.uRexReg )
5094/**
5095 * Gets the r/m part of a ModR/M encoding as a register index, with REX.B added in.
5096 *
5097 * For use during decoding.
5098 */
5099#define IEM_GET_MODRM_RM(a_pVCpu, a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) | (a_pVCpu)->iem.s.uRexB )
5100
5101/**
5102 * Gets the register (reg) part of a ModR/M encoding, without REX.R.
5103 *
5104 * For use during decoding.
5105 */
5106#define IEM_GET_MODRM_REG_8(a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) )
5107/**
5108 * Gets the r/m part of a ModR/M encoding as a register index, without REX.B.
5109 *
5110 * For use during decoding.
5111 */
5112#define IEM_GET_MODRM_RM_8(a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) )
5113
5114/**
5115 * Gets the register (reg) part of a ModR/M encoding as an extended 8-bit
5116 * register index, with REX.R added in.
5117 *
5118 * For use during decoding.
5119 *
5120 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
5121 */
5122#define IEM_GET_MODRM_REG_EX8(a_pVCpu, a_bRm) \
5123 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
5124 || !((a_bRm) & (4 << X86_MODRM_REG_SHIFT)) /* IEM_GET_MODRM_REG(pVCpu, a_bRm) < 4 */ \
5125 ? IEM_GET_MODRM_REG(pVCpu, a_bRm) : (((a_bRm) >> X86_MODRM_REG_SHIFT) & 3) | 16)
5126/**
5127 * Gets the r/m part of a ModR/M encoding as an extended 8-bit register index,
5128 * with REX.B added in.
5129 *
5130 * For use during decoding.
5131 *
5132 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
5133 */
5134#define IEM_GET_MODRM_RM_EX8(a_pVCpu, a_bRm) \
5135 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
5136 || !((a_bRm) & 4) /* IEM_GET_MODRM_RM(pVCpu, a_bRm) < 4 */ \
5137 ? IEM_GET_MODRM_RM(pVCpu, a_bRm) : ((a_bRm) & 3) | 16)
5138
5139/**
5140 * Combines the prefix REX and ModR/M byte for passing to
5141 * iemOpHlpCalcRmEffAddrThreadedAddr64().
5142 *
5143 * @returns The ModRM byte but with bit 3 set to REX.B and bit 4 to REX.X.
5144 * The two bits are part of the REG sub-field, which isn't needed in
5145 * iemOpHlpCalcRmEffAddrThreadedAddr64().
5146 *
5147 * For use during decoding/recompiling.
5148 */
5149#define IEM_GET_MODRM_EX(a_pVCpu, a_bRm) \
5150 ( ((a_bRm) & ~X86_MODRM_REG_MASK) \
5151 | (uint8_t)( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X)) >> (25 - 3) ) )
5152AssertCompile(IEM_OP_PRF_REX_B == RT_BIT_32(25));
5153AssertCompile(IEM_OP_PRF_REX_X == RT_BIT_32(26));
5154
5155/**
5156 * Gets the effective VEX.VVVV value.
5157 *
5158 * The 4th bit is ignored if not 64-bit code.
5159 * @returns effective V-register value.
5160 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
5161 */
5162#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
5163 (IEM_IS_64BIT_CODE(a_pVCpu) ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
5164
5165
5166/**
5167 * Gets the register (reg) part of a the special 4th register byte used by
5168 * vblendvps and vblendvpd.
5169 *
5170 * For use during decoding.
5171 */
5172#define IEM_GET_IMM8_REG(a_pVCpu, a_bRegImm8) \
5173 (IEM_IS_64BIT_CODE(a_pVCpu) ? (a_bRegImm8) >> 4 : ((a_bRegImm8) >> 4) & 7)
5174
5175
5176/**
5177 * Checks if we're executing inside an AMD-V or VT-x guest.
5178 */
5179#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
5180# define IEM_IS_IN_GUEST(a_pVCpu) RT_BOOL((a_pVCpu)->iem.s.fExec & IEM_F_X86_CTX_IN_GUEST)
5181#else
5182# define IEM_IS_IN_GUEST(a_pVCpu) false
5183#endif
5184
5185
5186#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5187
5188/**
5189 * Check if the guest has entered VMX root operation.
5190 */
5191# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
5192
5193/**
5194 * Check if the guest has entered VMX non-root operation.
5195 */
5196# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) ( ((a_pVCpu)->iem.s.fExec & (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST)) \
5197 == (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST) )
5198
5199/**
5200 * Check if the nested-guest has the given Pin-based VM-execution control set.
5201 */
5202# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
5203
5204/**
5205 * Check if the nested-guest has the given Processor-based VM-execution control set.
5206 */
5207# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
5208
5209/**
5210 * Check if the nested-guest has the given Secondary Processor-based VM-execution
5211 * control set.
5212 */
5213# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
5214
5215/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
5216# define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
5217
5218/** Whether a shadow VMCS is present for the given VCPU. */
5219# define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
5220
5221/** Gets the VMXON region pointer. */
5222# define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
5223
5224/** Gets the guest-physical address of the current VMCS for the given VCPU. */
5225# define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
5226
5227/** Whether a current VMCS is present for the given VCPU. */
5228# define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
5229
5230/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
5231# define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
5232 do \
5233 { \
5234 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
5235 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
5236 } while (0)
5237
5238/** Clears any current VMCS for the given VCPU. */
5239# define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
5240 do \
5241 { \
5242 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
5243 } while (0)
5244
5245/**
5246 * Invokes the VMX VM-exit handler for an instruction intercept.
5247 */
5248# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
5249 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
5250
5251/**
5252 * Invokes the VMX VM-exit handler for an instruction intercept where the
5253 * instruction provides additional VM-exit information.
5254 */
5255# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
5256 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
5257
5258/**
5259 * Invokes the VMX VM-exit handler for a task switch.
5260 */
5261# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
5262 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
5263
5264/**
5265 * Invokes the VMX VM-exit handler for MWAIT.
5266 */
5267# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
5268 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
5269
5270/**
5271 * Invokes the VMX VM-exit handler for EPT faults.
5272 */
5273# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
5274 do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
5275
5276/**
5277 * Invokes the VMX VM-exit handler.
5278 */
5279# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
5280 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
5281
5282#else
5283# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
5284# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
5285# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
5286# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
5287# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
5288# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5289# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5290# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5291# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5292# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
5293# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
5294
5295#endif
5296
5297#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5298/**
5299 * Checks if we're executing a guest using AMD-V.
5300 */
5301# define IEM_SVM_IS_IN_GUEST(a_pVCpu) ( (a_pVCpu->iem.s.fExec & (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST)) \
5302 == (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST))
5303/**
5304 * Check if an SVM control/instruction intercept is set.
5305 */
5306# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
5307 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
5308
5309/**
5310 * Check if an SVM read CRx intercept is set.
5311 */
5312# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
5313 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
5314
5315/**
5316 * Check if an SVM write CRx intercept is set.
5317 */
5318# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
5319 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
5320
5321/**
5322 * Check if an SVM read DRx intercept is set.
5323 */
5324# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
5325 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
5326
5327/**
5328 * Check if an SVM write DRx intercept is set.
5329 */
5330# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
5331 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
5332
5333/**
5334 * Check if an SVM exception intercept is set.
5335 */
5336# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
5337 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
5338
5339/**
5340 * Invokes the SVM \#VMEXIT handler for the nested-guest.
5341 */
5342# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
5343 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
5344
5345/**
5346 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
5347 * corresponding decode assist information.
5348 */
5349# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
5350 do \
5351 { \
5352 uint64_t uExitInfo1; \
5353 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
5354 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
5355 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
5356 else \
5357 uExitInfo1 = 0; \
5358 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
5359 } while (0)
5360
5361/** Check and handles SVM nested-guest instruction intercept and updates
5362 * NRIP if needed.
5363 */
5364# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
5365 do \
5366 { \
5367 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
5368 { \
5369 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
5370 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
5371 } \
5372 } while (0)
5373
5374/** Checks and handles SVM nested-guest CR0 read intercept. */
5375# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
5376 do \
5377 { \
5378 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
5379 { /* probably likely */ } \
5380 else \
5381 { \
5382 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
5383 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
5384 } \
5385 } while (0)
5386
5387/**
5388 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
5389 */
5390# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) \
5391 do { \
5392 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
5393 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_cbInstr)); \
5394 } while (0)
5395
5396#else
5397# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
5398# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
5399# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
5400# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
5401# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
5402# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
5403# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
5404# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
5405# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, \
5406 a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
5407# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
5408# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) do { } while (0)
5409
5410#endif
5411
5412/** @} */
5413
5414uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu);
5415VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu);
5416
5417
5418/**
5419 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
5420 */
5421typedef union IEMSELDESC
5422{
5423 /** The legacy view. */
5424 X86DESC Legacy;
5425 /** The long mode view. */
5426 X86DESC64 Long;
5427} IEMSELDESC;
5428/** Pointer to a selector descriptor table entry. */
5429typedef IEMSELDESC *PIEMSELDESC;
5430
5431/** @name Raising Exceptions.
5432 * @{ */
5433VBOXSTRICTRC iemTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, uint32_t uNextEip, uint32_t fFlags,
5434 uint16_t uErr, uint64_t uCr2, RTSEL SelTSS, PIEMSELDESC pNewDescTSS) RT_NOEXCEPT;
5435
5436VBOXSTRICTRC iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
5437 uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
5438#ifdef IEM_WITH_SETJMP
5439DECL_NO_RETURN(void) iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
5440 uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
5441#endif
5442VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
5443#ifdef IEM_WITH_SETJMP
5444DECL_NO_RETURN(void) iemRaiseDivideErrorJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5445#endif
5446VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
5447VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT;
5448VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
5449#ifdef IEM_WITH_SETJMP
5450DECL_NO_RETURN(void) iemRaiseUndefinedOpcodeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5451#endif
5452VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT;
5453#ifdef IEM_WITH_SETJMP
5454DECL_NO_RETURN(void) iemRaiseDeviceNotAvailableJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5455#endif
5456VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5457VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT;
5458VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
5459VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5460/*VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;*/
5461VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5462VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5463VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5464VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5465VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
5466VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
5467#ifdef IEM_WITH_SETJMP
5468DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5469#endif
5470VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
5471VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT;
5472VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
5473#ifdef IEM_WITH_SETJMP
5474DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
5475#endif
5476VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
5477#ifdef IEM_WITH_SETJMP
5478DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP;
5479#endif
5480VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
5481#ifdef IEM_WITH_SETJMP
5482DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
5483#endif
5484VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
5485#ifdef IEM_WITH_SETJMP
5486DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
5487#endif
5488VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
5489#ifdef IEM_WITH_SETJMP
5490DECL_NO_RETURN(void) iemRaiseMathFaultJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5491#endif
5492VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
5493#ifdef IEM_WITH_SETJMP
5494DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5495#endif
5496VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
5497#ifdef IEM_WITH_SETJMP
5498DECL_NO_RETURN(void) iemRaiseSimdFpExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5499#endif
5500
5501void iemLogSyscallRealModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
5502void iemLogSyscallProtModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
5503
5504IEM_CIMPL_DEF_0(iemCImplRaiseDivideError);
5505IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix);
5506IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode);
5507
5508/**
5509 * Macro for calling iemCImplRaiseDivideError().
5510 *
5511 * This is for things that will _always_ decode to an \#DE, taking the
5512 * recompiler into consideration and everything.
5513 *
5514 * @return Strict VBox status code.
5515 */
5516#define IEMOP_RAISE_DIVIDE_ERROR_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseDivideError)
5517
5518/**
5519 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5520 *
5521 * This is for things that will _always_ decode to an \#UD, taking the
5522 * recompiler into consideration and everything.
5523 *
5524 * @return Strict VBox status code.
5525 */
5526#define IEMOP_RAISE_INVALID_LOCK_PREFIX_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidLockPrefix)
5527
5528/**
5529 * Macro for calling iemCImplRaiseInvalidOpcode() for decode/static \#UDs.
5530 *
5531 * This is for things that will _always_ decode to an \#UD, taking the
5532 * recompiler into consideration and everything.
5533 *
5534 * @return Strict VBox status code.
5535 */
5536#define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidOpcode)
5537
5538/**
5539 * Macro for calling iemCImplRaiseInvalidOpcode() for runtime-style \#UDs.
5540 *
5541 * Using this macro means you've got _buggy_ _code_ and are doing things that
5542 * belongs exclusively in IEMAllCImpl.cpp during decoding.
5543 *
5544 * @return Strict VBox status code.
5545 * @see IEMOP_RAISE_INVALID_OPCODE_RET
5546 */
5547#define IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidOpcode)
5548
5549/** @} */
5550
5551/** @name Register Access.
5552 * @{ */
5553VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
5554 IEMMODE enmEffOpSize) RT_NOEXCEPT;
5555VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT;
5556VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5557 IEMMODE enmEffOpSize) RT_NOEXCEPT;
5558/** @} */
5559
5560/** @name FPU access and helpers.
5561 * @{ */
5562void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
5563void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5564void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
5565void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5566void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5567void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5568 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5569void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5570 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5571void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5572void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5573void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5574void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5575void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5576void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5577void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5578void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5579void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5580void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5581void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5582void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5583void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5584void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5585void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5586/** @} */
5587
5588/** @name SSE+AVX SIMD access and helpers.
5589 * @{ */
5590void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT;
5591/** @} */
5592
5593/** @name Memory access.
5594 * @{ */
5595
5596/** Report a \#GP instead of \#AC and do not restrict to ring-3 */
5597#define IEM_MEMMAP_F_ALIGN_GP RT_BIT_32(16)
5598/** SSE access that should report a \#GP instead of \#AC, unless MXCSR.MM=1
5599 * when it works like normal \#AC. Always used with IEM_MEMMAP_F_ALIGN_GP. */
5600#define IEM_MEMMAP_F_ALIGN_SSE RT_BIT_32(17)
5601/** If \#AC is applicable, raise it. Always used with IEM_MEMMAP_F_ALIGN_GP.
5602 * Users include FXSAVE & FXRSTOR. */
5603#define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18)
5604
5605VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5606 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
5607VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5608#ifndef IN_RING3
5609VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5610#endif
5611void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5612void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
5613VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
5614VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5615VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT;
5616
5617void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr);
5618void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr);
5619#ifdef IEM_WITH_CODE_TLB
5620void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP;
5621#else
5622VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT;
5623#endif
5624#ifdef IEM_WITH_SETJMP
5625uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5626uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5627uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5628uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5629#else
5630VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT;
5631VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5632VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5633VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5634VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5635VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5636VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5637VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5638VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5639VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5640VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5641#endif
5642
5643VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5644VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5645VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5646VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5647VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5648VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5649VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5650VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5651VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5652VBOXSTRICTRC iemMemFetchDataU128NoAc(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5653VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5654VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5655VBOXSTRICTRC iemMemFetchDataU256NoAc(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5656VBOXSTRICTRC iemMemFetchDataU256AlignedAvx(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5657VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
5658 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT;
5659#ifdef IEM_WITH_SETJMP
5660uint8_t iemMemFetchDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5661uint16_t iemMemFetchDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5662uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5663uint32_t iemMemFlatFetchDataU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5664uint64_t iemMemFetchDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5665uint64_t iemMemFetchDataU64AlignedU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5666void iemMemFetchDataR80SafeJmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5667void iemMemFetchDataD80SafeJmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5668void iemMemFetchDataU128SafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5669void iemMemFetchDataU128NoAcSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5670void iemMemFetchDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5671void iemMemFetchDataU256SafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5672void iemMemFetchDataU256NoAcSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5673void iemMemFetchDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5674# if 0 /* these are inlined now */
5675uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5676uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5677uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5678uint32_t iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5679uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5680uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5681void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5682void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5683void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5684void iemMemFetchDataU128NoAcJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5685void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5686void iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5687void iemMemFetchDataU256AlignedAvxJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5688# endif
5689void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5690#endif
5691
5692VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5693VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5694VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5695VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5696VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT;
5697
5698VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT;
5699VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT;
5700VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT;
5701VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT;
5702VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5703VBOXSTRICTRC iemMemStoreDataU128NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5704VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5705VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5706VBOXSTRICTRC iemMemStoreDataU256NoAc(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5707VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5708VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5709#ifdef IEM_WITH_SETJMP
5710void iemMemStoreDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5711void iemMemStoreDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5712void iemMemStoreDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5713void iemMemStoreDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5714void iemMemStoreDataU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5715void iemMemStoreDataU128NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U pu128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5716void iemMemStoreDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U pu128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5717void iemMemStoreDataU256SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5718void iemMemStoreDataU256NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5719void iemMemStoreDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5720void iemMemStoreDataR80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTFLOAT80U pr80Value) IEM_NOEXCEPT_MAY_LONGJMP;
5721void iemMemStoreDataD80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTPBCD80U pd80Value) IEM_NOEXCEPT_MAY_LONGJMP;
5722#if 0
5723void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5724void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5725void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5726void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5727void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5728void iemMemStoreDataNoAcU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5729void iemMemStoreDataU256NoAcJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5730void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5731#endif
5732void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5733void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5734#endif
5735
5736#ifdef IEM_WITH_SETJMP
5737uint8_t *iemMemMapDataU8RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5738uint8_t *iemMemMapDataU8AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5739uint8_t *iemMemMapDataU8WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5740uint8_t const *iemMemMapDataU8RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5741uint16_t *iemMemMapDataU16RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5742uint16_t *iemMemMapDataU16AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5743uint16_t *iemMemMapDataU16WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5744uint16_t const *iemMemMapDataU16RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5745uint32_t *iemMemMapDataU32RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5746uint32_t *iemMemMapDataU32AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5747uint32_t *iemMemMapDataU32WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5748uint32_t const *iemMemMapDataU32RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5749uint64_t *iemMemMapDataU64RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5750uint64_t *iemMemMapDataU64AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5751uint64_t *iemMemMapDataU64WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5752uint64_t const *iemMemMapDataU64RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5753PRTFLOAT80U iemMemMapDataR80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5754PRTFLOAT80U iemMemMapDataR80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5755PCRTFLOAT80U iemMemMapDataR80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5756PRTPBCD80U iemMemMapDataD80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5757PRTPBCD80U iemMemMapDataD80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5758PCRTPBCD80U iemMemMapDataD80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5759PRTUINT128U iemMemMapDataU128RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5760PRTUINT128U iemMemMapDataU128AtSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5761PRTUINT128U iemMemMapDataU128WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5762PCRTUINT128U iemMemMapDataU128RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5763
5764void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5765void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5766void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5767void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5768void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5769void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5770#endif
5771
5772VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5773 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
5774VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT;
5775VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
5776VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5777VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT;
5778VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5779VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5780VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5781VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5782VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5783 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
5784VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
5785 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT;
5786VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5787VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT;
5788VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT;
5789VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT;
5790VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5791VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5792VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5793
5794#ifdef IEM_WITH_SETJMP
5795void iemMemStackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5796void iemMemStackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5797void iemMemStackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5798void iemMemStackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5799void iemMemStackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5800void iemMemStackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5801void iemMemStackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5802
5803void iemMemFlat32StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5804void iemMemFlat32StackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5805void iemMemFlat32StackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5806void iemMemFlat32StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5807void iemMemFlat32StackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5808
5809void iemMemFlat64StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5810void iemMemFlat64StackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5811void iemMemFlat64StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5812void iemMemFlat64StackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5813
5814void iemMemStoreStackU16SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5815void iemMemStoreStackU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5816void iemMemStoreStackU32SRegSafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5817void iemMemStoreStackU64SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5818
5819uint16_t iemMemFetchStackU16SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5820uint32_t iemMemFetchStackU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5821uint64_t iemMemFetchStackU64SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5822
5823#endif
5824
5825/** @} */
5826
5827/** @name IEMAllCImpl.cpp
5828 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/'
5829 * @{ */
5830IEM_CIMPL_PROTO_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5831IEM_CIMPL_PROTO_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5832IEM_CIMPL_PROTO_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5833IEM_CIMPL_PROTO_0(iemCImpl_popa_16);
5834IEM_CIMPL_PROTO_0(iemCImpl_popa_32);
5835IEM_CIMPL_PROTO_0(iemCImpl_pusha_16);
5836IEM_CIMPL_PROTO_0(iemCImpl_pusha_32);
5837IEM_CIMPL_PROTO_1(iemCImpl_pushf, IEMMODE, enmEffOpSize);
5838IEM_CIMPL_PROTO_1(iemCImpl_popf, IEMMODE, enmEffOpSize);
5839IEM_CIMPL_PROTO_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5840IEM_CIMPL_PROTO_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5841typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5842typedef FNIEMCIMPLFARBRANCH *PFNIEMCIMPLFARBRANCH;
5843IEM_CIMPL_PROTO_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop);
5844IEM_CIMPL_PROTO_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters);
5845IEM_CIMPL_PROTO_1(iemCImpl_leave, IEMMODE, enmEffOpSize);
5846IEM_CIMPL_PROTO_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt);
5847IEM_CIMPL_PROTO_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize);
5848IEM_CIMPL_PROTO_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp);
5849IEM_CIMPL_PROTO_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize);
5850IEM_CIMPL_PROTO_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize);
5851IEM_CIMPL_PROTO_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize);
5852IEM_CIMPL_PROTO_1(iemCImpl_iret, IEMMODE, enmEffOpSize);
5853IEM_CIMPL_PROTO_0(iemCImpl_loadall286);
5854IEM_CIMPL_PROTO_0(iemCImpl_syscall);
5855IEM_CIMPL_PROTO_1(iemCImpl_sysret, IEMMODE, enmEffOpSize);
5856IEM_CIMPL_PROTO_0(iemCImpl_sysenter);
5857IEM_CIMPL_PROTO_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize);
5858IEM_CIMPL_PROTO_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel);
5859IEM_CIMPL_PROTO_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel);
5860IEM_CIMPL_PROTO_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize);
5861IEM_CIMPL_PROTO_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize);
5862IEM_CIMPL_PROTO_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite);
5863IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar);
5864IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar);
5865IEM_CIMPL_PROTO_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5866IEM_CIMPL_PROTO_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5867IEM_CIMPL_PROTO_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5868IEM_CIMPL_PROTO_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5869IEM_CIMPL_PROTO_1(iemCImpl_lldt, uint16_t, uNewLdt);
5870IEM_CIMPL_PROTO_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5871IEM_CIMPL_PROTO_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5872IEM_CIMPL_PROTO_1(iemCImpl_ltr, uint16_t, uNewTr);
5873IEM_CIMPL_PROTO_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5874IEM_CIMPL_PROTO_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5875IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg);
5876IEM_CIMPL_PROTO_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5877IEM_CIMPL_PROTO_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5878IEM_CIMPL_PROTO_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg);
5879IEM_CIMPL_PROTO_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg);
5880IEM_CIMPL_PROTO_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst);
5881IEM_CIMPL_PROTO_0(iemCImpl_clts);
5882IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg);
5883IEM_CIMPL_PROTO_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg);
5884IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg);
5885IEM_CIMPL_PROTO_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg);
5886IEM_CIMPL_PROTO_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage);
5887IEM_CIMPL_PROTO_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType);
5888IEM_CIMPL_PROTO_0(iemCImpl_invd);
5889IEM_CIMPL_PROTO_0(iemCImpl_wbinvd);
5890IEM_CIMPL_PROTO_0(iemCImpl_rsm);
5891IEM_CIMPL_PROTO_0(iemCImpl_rdtsc);
5892IEM_CIMPL_PROTO_0(iemCImpl_rdtscp);
5893IEM_CIMPL_PROTO_0(iemCImpl_rdpmc);
5894IEM_CIMPL_PROTO_0(iemCImpl_rdmsr);
5895IEM_CIMPL_PROTO_0(iemCImpl_wrmsr);
5896IEM_CIMPL_PROTO_3(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5897IEM_CIMPL_PROTO_2(iemCImpl_in_eAX_DX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5898IEM_CIMPL_PROTO_3(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5899IEM_CIMPL_PROTO_2(iemCImpl_out_DX_eAX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5900IEM_CIMPL_PROTO_0(iemCImpl_cli);
5901IEM_CIMPL_PROTO_0(iemCImpl_sti);
5902IEM_CIMPL_PROTO_0(iemCImpl_hlt);
5903IEM_CIMPL_PROTO_1(iemCImpl_monitor, uint8_t, iEffSeg);
5904IEM_CIMPL_PROTO_0(iemCImpl_mwait);
5905IEM_CIMPL_PROTO_0(iemCImpl_swapgs);
5906IEM_CIMPL_PROTO_0(iemCImpl_cpuid);
5907IEM_CIMPL_PROTO_1(iemCImpl_aad, uint8_t, bImm);
5908IEM_CIMPL_PROTO_1(iemCImpl_aam, uint8_t, bImm);
5909IEM_CIMPL_PROTO_0(iemCImpl_daa);
5910IEM_CIMPL_PROTO_0(iemCImpl_das);
5911IEM_CIMPL_PROTO_0(iemCImpl_aaa);
5912IEM_CIMPL_PROTO_0(iemCImpl_aas);
5913IEM_CIMPL_PROTO_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound);
5914IEM_CIMPL_PROTO_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound);
5915IEM_CIMPL_PROTO_0(iemCImpl_xgetbv);
5916IEM_CIMPL_PROTO_0(iemCImpl_xsetbv);
5917IEM_CIMPL_PROTO_5(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
5918 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags, uint8_t, bUnmapInfo);
5919IEM_CIMPL_PROTO_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5920IEM_CIMPL_PROTO_1(iemCImpl_finit, bool, fCheckXcpts);
5921IEM_CIMPL_PROTO_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5922IEM_CIMPL_PROTO_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5923IEM_CIMPL_PROTO_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5924IEM_CIMPL_PROTO_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5925IEM_CIMPL_PROTO_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5926IEM_CIMPL_PROTO_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5927IEM_CIMPL_PROTO_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5928IEM_CIMPL_PROTO_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5929IEM_CIMPL_PROTO_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5930IEM_CIMPL_PROTO_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5931IEM_CIMPL_PROTO_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5932IEM_CIMPL_PROTO_1(iemCImpl_fldcw, uint16_t, u16Fcw);
5933IEM_CIMPL_PROTO_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode);
5934IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode);
5935IEM_CIMPL_PROTO_2(iemCImpl_rdseed, uint8_t, iReg, IEMMODE, enmEffOpSize);
5936IEM_CIMPL_PROTO_2(iemCImpl_rdrand, uint8_t, iReg, IEMMODE, enmEffOpSize);
5937IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovps_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5938IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovps_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5939IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovps_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc);
5940IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovps_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc);
5941IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovd_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5942IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovd_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5943IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovd_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc);
5944IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovd_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc);
5945IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovpd_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5946IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovpd_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5947IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovpd_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc);
5948IEM_CIMPL_PROTO_4(iemCImpl_vmaskmovpd_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc);
5949IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovq_load_u128, uint8_t, iXRegDst, uint8_t, iXRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5950IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovq_load_u256, uint8_t, iYRegDst, uint8_t, iYRegMsk, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5951IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovq_store_u128, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iXRegMsk, uint8_t, iXRegSrc);
5952IEM_CIMPL_PROTO_4(iemCImpl_vpmaskmovq_store_u256, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, uint8_t, iYRegMsk, uint8_t, iYRegSrc);
5953
5954/** @} */
5955
5956/** @name IEMAllCImplStrInstr.cpp.h
5957 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/' -e 's/RT_CONCAT4(//' \
5958 * -e 's/,ADDR_SIZE)/64/g' -e 's/,OP_SIZE,/64/g' -e 's/,OP_rAX,/rax/g' IEMAllCImplStrInstr.cpp.h
5959 * @{ */
5960IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr16, uint8_t, iEffSeg);
5961IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr16, uint8_t, iEffSeg);
5962IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m16);
5963IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m16);
5964IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr16, uint8_t, iEffSeg);
5965IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m16);
5966IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m16, int8_t, iEffSeg);
5967IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr16, bool, fIoChecked);
5968IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr16, bool, fIoChecked);
5969IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5970IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5971
5972IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr16, uint8_t, iEffSeg);
5973IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr16, uint8_t, iEffSeg);
5974IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m16);
5975IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m16);
5976IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr16, uint8_t, iEffSeg);
5977IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m16);
5978IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m16, int8_t, iEffSeg);
5979IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr16, bool, fIoChecked);
5980IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr16, bool, fIoChecked);
5981IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5982IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5983
5984IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr16, uint8_t, iEffSeg);
5985IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr16, uint8_t, iEffSeg);
5986IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m16);
5987IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m16);
5988IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr16, uint8_t, iEffSeg);
5989IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m16);
5990IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m16, int8_t, iEffSeg);
5991IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr16, bool, fIoChecked);
5992IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr16, bool, fIoChecked);
5993IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5994IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5995
5996
5997IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr32, uint8_t, iEffSeg);
5998IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr32, uint8_t, iEffSeg);
5999IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m32);
6000IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m32);
6001IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr32, uint8_t, iEffSeg);
6002IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m32);
6003IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m32, int8_t, iEffSeg);
6004IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr32, bool, fIoChecked);
6005IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr32, bool, fIoChecked);
6006IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
6007IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
6008
6009IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr32, uint8_t, iEffSeg);
6010IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr32, uint8_t, iEffSeg);
6011IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m32);
6012IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m32);
6013IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr32, uint8_t, iEffSeg);
6014IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m32);
6015IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m32, int8_t, iEffSeg);
6016IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr32, bool, fIoChecked);
6017IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr32, bool, fIoChecked);
6018IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
6019IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
6020
6021IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr32, uint8_t, iEffSeg);
6022IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr32, uint8_t, iEffSeg);
6023IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m32);
6024IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m32);
6025IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr32, uint8_t, iEffSeg);
6026IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m32);
6027IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m32, int8_t, iEffSeg);
6028IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr32, bool, fIoChecked);
6029IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr32, bool, fIoChecked);
6030IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
6031IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
6032
6033IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr32, uint8_t, iEffSeg);
6034IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr32, uint8_t, iEffSeg);
6035IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m32);
6036IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m32);
6037IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr32, uint8_t, iEffSeg);
6038IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m32);
6039IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m32, int8_t, iEffSeg);
6040IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr32, bool, fIoChecked);
6041IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr32, bool, fIoChecked);
6042IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
6043IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
6044
6045
6046IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr64, uint8_t, iEffSeg);
6047IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr64, uint8_t, iEffSeg);
6048IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m64);
6049IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m64);
6050IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr64, uint8_t, iEffSeg);
6051IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m64);
6052IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m64, int8_t, iEffSeg);
6053IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr64, bool, fIoChecked);
6054IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr64, bool, fIoChecked);
6055IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
6056IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
6057
6058IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr64, uint8_t, iEffSeg);
6059IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr64, uint8_t, iEffSeg);
6060IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m64);
6061IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m64);
6062IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr64, uint8_t, iEffSeg);
6063IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m64);
6064IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m64, int8_t, iEffSeg);
6065IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr64, bool, fIoChecked);
6066IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr64, bool, fIoChecked);
6067IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
6068IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
6069
6070IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr64, uint8_t, iEffSeg);
6071IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr64, uint8_t, iEffSeg);
6072IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m64);
6073IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m64);
6074IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr64, uint8_t, iEffSeg);
6075IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m64);
6076IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m64, int8_t, iEffSeg);
6077IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr64, bool, fIoChecked);
6078IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr64, bool, fIoChecked);
6079IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
6080IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
6081
6082IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr64, uint8_t, iEffSeg);
6083IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr64, uint8_t, iEffSeg);
6084IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m64);
6085IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m64);
6086IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr64, uint8_t, iEffSeg);
6087IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m64);
6088IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m64, int8_t, iEffSeg);
6089IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr64, bool, fIoChecked);
6090IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr64, bool, fIoChecked);
6091IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
6092IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
6093/** @} */
6094
6095#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
6096VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual) RT_NOEXCEPT;
6097VBOXSTRICTRC iemVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr) RT_NOEXCEPT;
6098VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr) RT_NOEXCEPT;
6099VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr) RT_NOEXCEPT;
6100VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr) RT_NOEXCEPT;
6101VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
6102VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT;
6103VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu) RT_NOEXCEPT;
6104VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPUCC pVCpu, bool fMonitorHwArmed, uint8_t cbInstr) RT_NOEXCEPT;
6105VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port,
6106 bool fImm, uint8_t cbAccess, uint8_t cbInstr) RT_NOEXCEPT;
6107VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess,
6108 bool fRep, VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr) RT_NOEXCEPT;
6109VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
6110VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
6111VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
6112VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPUCC pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
6113VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
6114VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPUCC pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
6115VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT;
6116VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPUCC pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw,
6117 RTGCPTR GCPtrEffDst, uint8_t cbInstr) RT_NOEXCEPT;
6118VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr) RT_NOEXCEPT;
6119VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPUCC pVCpu) RT_NOEXCEPT;
6120VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess, size_t cbAccess, uint32_t fAccess) RT_NOEXCEPT;
6121uint32_t iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg) RT_NOEXCEPT;
6122void iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg) RT_NOEXCEPT;
6123VBOXSTRICTRC iemVmxInvvpid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
6124 uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT;
6125bool iemVmxIsRdmsrWrmsrInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr) RT_NOEXCEPT;
6126IEM_CIMPL_PROTO_0(iemCImpl_vmxoff);
6127IEM_CIMPL_PROTO_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon);
6128IEM_CIMPL_PROTO_0(iemCImpl_vmlaunch);
6129IEM_CIMPL_PROTO_0(iemCImpl_vmresume);
6130IEM_CIMPL_PROTO_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
6131IEM_CIMPL_PROTO_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
6132IEM_CIMPL_PROTO_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
6133IEM_CIMPL_PROTO_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64VmcsField);
6134IEM_CIMPL_PROTO_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64VmcsField);
6135IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg64, uint64_t *, pu64Dst, uint64_t, u64VmcsField);
6136IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg32, uint64_t *, pu32Dst, uint32_t, u32VmcsField);
6137IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64VmcsField);
6138IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32VmcsField);
6139IEM_CIMPL_PROTO_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType);
6140IEM_CIMPL_PROTO_3(iemCImpl_invept, uint8_t, iEffSeg, RTGCPTR, GCPtrInveptDesc, uint64_t, uInveptType);
6141IEM_CIMPL_PROTO_0(iemCImpl_vmx_pause);
6142#endif
6143
6144#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
6145VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) RT_NOEXCEPT;
6146VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2) RT_NOEXCEPT;
6147VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPUCC pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
6148 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) RT_NOEXCEPT;
6149VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPUCC pVCpu, uint32_t idMsr, bool fWrite, uint8_t cbInstr) RT_NOEXCEPT;
6150IEM_CIMPL_PROTO_0(iemCImpl_vmrun);
6151IEM_CIMPL_PROTO_0(iemCImpl_vmload);
6152IEM_CIMPL_PROTO_0(iemCImpl_vmsave);
6153IEM_CIMPL_PROTO_0(iemCImpl_clgi);
6154IEM_CIMPL_PROTO_0(iemCImpl_stgi);
6155IEM_CIMPL_PROTO_0(iemCImpl_invlpga);
6156IEM_CIMPL_PROTO_0(iemCImpl_skinit);
6157IEM_CIMPL_PROTO_0(iemCImpl_svm_pause);
6158#endif
6159
6160IEM_CIMPL_PROTO_0(iemCImpl_vmcall); /* vmx */
6161IEM_CIMPL_PROTO_0(iemCImpl_vmmcall); /* svm */
6162IEM_CIMPL_PROTO_1(iemCImpl_Hypercall, uint16_t, uDisOpcode); /* both */
6163
6164extern const PFNIEMOP g_apfnIemInterpretOnlyOneByteMap[256];
6165extern const PFNIEMOP g_apfnIemInterpretOnlyTwoByteMap[1024];
6166extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f3a[1024];
6167extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f38[1024];
6168extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap1[1024];
6169extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap2[1024];
6170extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap3[1024];
6171
6172/*
6173 * Recompiler related stuff.
6174 */
6175extern const PFNIEMOP g_apfnIemThreadedRecompilerOneByteMap[256];
6176extern const PFNIEMOP g_apfnIemThreadedRecompilerTwoByteMap[1024];
6177extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f3a[1024];
6178extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f38[1024];
6179extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap1[1024];
6180extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap2[1024];
6181extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap3[1024];
6182
6183DECLCALLBACK(int) iemTbInit(PVMCC pVM, uint32_t cInitialTbs, uint32_t cMaxTbs,
6184 uint64_t cbInitialExec, uint64_t cbMaxExec, uint32_t cbChunkExec);
6185void iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb, bool fSafeToFree);
6186DECLHIDDEN(void) iemTbAllocatorFree(PVMCPUCC pVCpu, PIEMTB pTb);
6187void iemTbAllocatorProcessDelayedFrees(PVMCPUCC pVCpu, PIEMTBALLOCATOR pTbAllocator);
6188void iemTbAllocatorFreeupNativeSpace(PVMCPUCC pVCpu, uint32_t cNeededInstrs);
6189DECLHIDDEN(const char *) iemTbFlagsToString(uint32_t fFlags, char *pszBuf, size_t cbBuf) RT_NOEXCEPT;
6190DECLHIDDEN(void) iemThreadedDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT;
6191
6192
6193/** @todo FNIEMTHREADEDFUNC and friends may need more work... */
6194#if defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
6195typedef VBOXSTRICTRC /*__attribute__((__nothrow__))*/ FNIEMTHREADEDFUNC(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
6196typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
6197# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
6198 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
6199# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
6200 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
6201
6202#else
6203typedef VBOXSTRICTRC (FNIEMTHREADEDFUNC)(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
6204typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
6205# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
6206 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
6207# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
6208 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
6209#endif
6210
6211
6212IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_Nop);
6213IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_LogCpuState);
6214
6215IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_DeferToCImpl0);
6216
6217IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckIrq);
6218IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckMode);
6219IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckHwInstrBps);
6220IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLim);
6221
6222IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes);
6223IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodes);
6224IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim);
6225
6226/* Branching: */
6227IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes);
6228IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodes);
6229IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim);
6230
6231IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb);
6232IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb);
6233IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim);
6234
6235/* Natural page crossing: */
6236IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb);
6237IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb);
6238IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim);
6239
6240IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb);
6241IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb);
6242IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim);
6243
6244IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb);
6245IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb);
6246IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim);
6247
6248bool iemThreadedCompileEmitIrqCheckBefore(PVMCPUCC pVCpu, PIEMTB pTb);
6249bool iemThreadedCompileBeginEmitCallsComplications(PVMCPUCC pVCpu, PIEMTB pTb);
6250
6251/* Native recompiler public bits: */
6252DECLHIDDEN(PIEMTB) iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT;
6253DECLHIDDEN(void) iemNativeDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT;
6254int iemExecMemAllocatorInit(PVMCPU pVCpu, uint64_t cbMax, uint64_t cbInitial, uint32_t cbChunk) RT_NOEXCEPT;
6255DECLHIDDEN(void *) iemExecMemAllocatorAlloc(PVMCPU pVCpu, uint32_t cbReq, PIEMTB pTb, void **ppvExec) RT_NOEXCEPT;
6256DECLHIDDEN(void) iemExecMemAllocatorReadyForUse(PVMCPUCC pVCpu, void *pv, size_t cb) RT_NOEXCEPT;
6257void iemExecMemAllocatorFree(PVMCPU pVCpu, void *pv, size_t cb) RT_NOEXCEPT;
6258DECLASM(DECL_NO_RETURN(void)) iemNativeTbLongJmp(void *pvFramePointer, int rc) RT_NOEXCEPT;
6259
6260#endif /* !RT_IN_ASSEMBLER - ASM-NOINC-END */
6261
6262
6263/** @} */
6264
6265RT_C_DECLS_END
6266
6267/* ASM-INC: %include "IEMInternalStruct.mac" */
6268
6269#endif /* !VMM_INCLUDED_SRC_include_IEMInternal_h */
6270
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette