VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInternal.h@ 101722

Last change on this file since 101722 was 101722, checked in by vboxsync, 15 months ago

VMM/IEM: Identify the different styles IEM_MC_CALL_XXX and mark these using new IEM_CIMPL_F_CALLS_XXX flags in the python scripts. This is necessary as IEM_MC_CALL_CIMPL_X, IEM_MC_CALL_FPU_AIMPL_X, IEM_MC_CALL_MMX_AIMPL_X, and IEM_MC_CALL_SSE_AIMPL_X all have hidden parameters that need to be accounted for when recompiling to native code (for more perfect register allocations for variables). Split up the different cmpxchg16b AIMPL/CIMPL variations into separate MC blocks, as we can't mix AIMPL and CIMPL calls in the same block (also, in the CIMPL case, there would be unused tail code after the call). bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 283.8 KB
Line 
1/* $Id: IEMInternal.h 101722 2023-11-03 00:36:45Z vboxsync $ */
2/** @file
3 * IEM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInternal_h
29#define VMM_INCLUDED_SRC_include_IEMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/vmm/cpum.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/stam.h>
38#include <VBox/param.h>
39
40#include <iprt/setjmp-without-sigmask.h>
41#include <iprt/list.h>
42
43
44RT_C_DECLS_BEGIN
45
46
47/** @defgroup grp_iem_int Internals
48 * @ingroup grp_iem
49 * @internal
50 * @{
51 */
52
53/** For expanding symbol in slickedit and other products tagging and
54 * crossreferencing IEM symbols. */
55#ifndef IEM_STATIC
56# define IEM_STATIC static
57#endif
58
59/** @def IEM_WITH_SETJMP
60 * Enables alternative status code handling using setjmps.
61 *
62 * This adds a bit of expense via the setjmp() call since it saves all the
63 * non-volatile registers. However, it eliminates return code checks and allows
64 * for more optimal return value passing (return regs instead of stack buffer).
65 */
66#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
67# define IEM_WITH_SETJMP
68#endif
69
70/** @def IEM_WITH_THROW_CATCH
71 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
72 * mode code when IEM_WITH_SETJMP is in effect.
73 *
74 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
75 * setjmp/long resulted in bs2-test-1 running 3.00% faster and all but on test
76 * result value improving by more than 1%. (Best out of three.)
77 *
78 * With Visual C++ 2019 and code TLB on windows, using throw/catch instead of
79 * setjmp/long resulted in bs2-test-1 running 3.68% faster and all but some of
80 * the MMIO and CPUID tests ran noticeably faster. Variation is greater than on
81 * Linux, but it should be quite a bit faster for normal code.
82 */
83#if (defined(__cplusplus) && defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
84 || defined(DOXYGEN_RUNNING)
85# define IEM_WITH_THROW_CATCH
86#endif
87
88/** @def IEM_DO_LONGJMP
89 *
90 * Wrapper around longjmp / throw.
91 *
92 * @param a_pVCpu The CPU handle.
93 * @param a_rc The status code jump back with / throw.
94 */
95#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
96# ifdef IEM_WITH_THROW_CATCH
97# define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
98# else
99# define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
100# endif
101#endif
102
103/** For use with IEM function that may do a longjmp (when enabled).
104 *
105 * Visual C++ has trouble longjmp'ing from/over functions with the noexcept
106 * attribute. So, we indicate that function that may be part of a longjmp may
107 * throw "exceptions" and that the compiler should definitely not generate and
108 * std::terminate calling unwind code.
109 *
110 * Here is one example of this ending in std::terminate:
111 * @code{.txt}
11200 00000041`cadfda10 00007ffc`5d5a1f9f ucrtbase!abort+0x4e
11301 00000041`cadfda40 00007ffc`57af229a ucrtbase!terminate+0x1f
11402 00000041`cadfda70 00007ffb`eec91030 VCRUNTIME140!__std_terminate+0xa [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\ehhelpers.cpp @ 192]
11503 00000041`cadfdaa0 00007ffb`eec92c6d VCRUNTIME140_1!_CallSettingFrame+0x20 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\handlers.asm @ 50]
11604 00000041`cadfdad0 00007ffb`eec93ae5 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToState+0x241 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 1085]
11705 00000041`cadfdc00 00007ffb`eec92258 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToEmptyState+0x2d [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 218]
11806 00000041`cadfdc30 00007ffb`eec940e9 VCRUNTIME140_1!__InternalCxxFrameHandler<__FrameHandler4>+0x194 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 304]
11907 00000041`cadfdcd0 00007ffc`5f9f249f VCRUNTIME140_1!__CxxFrameHandler4+0xa9 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 290]
12008 00000041`cadfdd40 00007ffc`5f980939 ntdll!RtlpExecuteHandlerForUnwind+0xf
12109 00000041`cadfdd70 00007ffc`5f9a0edd ntdll!RtlUnwindEx+0x339
1220a 00000041`cadfe490 00007ffc`57aff976 ntdll!RtlUnwind+0xcd
1230b 00000041`cadfea00 00007ffb`e1b5de01 VCRUNTIME140!__longjmp_internal+0xe6 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\longjmp.asm @ 140]
1240c (Inline Function) --------`-------- VBoxVMM!iemOpcodeGetNextU8SlowJmp+0x95 [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 1155]
1250d 00000041`cadfea50 00007ffb`e1b60f6b VBoxVMM!iemOpcodeGetNextU8Jmp+0xc1 [L:\vbox-intern\src\VBox\VMM\include\IEMInline.h @ 402]
1260e 00000041`cadfea90 00007ffb`e1cc6201 VBoxVMM!IEMExecForExits+0xdb [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 10185]
1270f 00000041`cadfec70 00007ffb`e1d0df8d VBoxVMM!EMHistoryExec+0x4f1 [L:\vbox-intern\src\VBox\VMM\VMMAll\EMAll.cpp @ 452]
12810 00000041`cadfed60 00007ffb`e1d0d4c0 VBoxVMM!nemR3WinHandleExitCpuId+0x79d [L:\vbox-intern\src\VBox\VMM\VMMAll\NEMAllNativeTemplate-win.cpp.h @ 1829] @encode
129 @endcode
130 *
131 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
132 */
133#if defined(IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))
134# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false)
135#else
136# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT
137#endif
138
139#define IEM_IMPLEMENTS_TASKSWITCH
140
141/** @def IEM_WITH_3DNOW
142 * Includes the 3DNow decoding. */
143#if (!defined(IEM_WITH_3DNOW) && !defined(IEM_WITHOUT_3DNOW)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
144# define IEM_WITH_3DNOW
145#endif
146
147/** @def IEM_WITH_THREE_0F_38
148 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
149#if (!defined(IEM_WITH_THREE_0F_38) && !defined(IEM_WITHOUT_THREE_0F_38)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
150# define IEM_WITH_THREE_0F_38
151#endif
152
153/** @def IEM_WITH_THREE_0F_3A
154 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
155#if (!defined(IEM_WITH_THREE_0F_3A) && !defined(IEM_WITHOUT_THREE_0F_3A)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
156# define IEM_WITH_THREE_0F_3A
157#endif
158
159/** @def IEM_WITH_VEX
160 * Includes the VEX decoding. */
161#if (!defined(IEM_WITH_VEX) && !defined(IEM_WITHOUT_VEX)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
162# define IEM_WITH_VEX
163#endif
164
165/** @def IEM_CFG_TARGET_CPU
166 * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
167 *
168 * By default we allow this to be configured by the user via the
169 * CPUM/GuestCpuName config string, but this comes at a slight cost during
170 * decoding. So, for applications of this code where there is no need to
171 * be dynamic wrt target CPU, just modify this define.
172 */
173#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
174# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
175#endif
176
177//#define IEM_WITH_CODE_TLB // - work in progress
178//#define IEM_WITH_DATA_TLB // - work in progress
179
180
181/** @def IEM_USE_UNALIGNED_DATA_ACCESS
182 * Use unaligned accesses instead of elaborate byte assembly. */
183#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
184# define IEM_USE_UNALIGNED_DATA_ACCESS
185#endif
186
187//#define IEM_LOG_MEMORY_WRITES
188
189#if !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
190/** Instruction statistics. */
191typedef struct IEMINSTRSTATS
192{
193# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
194# include "IEMInstructionStatisticsTmpl.h"
195# undef IEM_DO_INSTR_STAT
196} IEMINSTRSTATS;
197#else
198struct IEMINSTRSTATS;
199typedef struct IEMINSTRSTATS IEMINSTRSTATS;
200#endif
201/** Pointer to IEM instruction statistics. */
202typedef IEMINSTRSTATS *PIEMINSTRSTATS;
203
204
205/** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour
206 * @{ */
207#define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 0 /**< Native x86 EFLAGS result; Intel EFLAGS when on non-x86 hosts. */
208#define IEMTARGETCPU_EFL_BEHAVIOR_INTEL 1 /**< Intel EFLAGS result. */
209#define IEMTARGETCPU_EFL_BEHAVIOR_AMD 2 /**< AMD EFLAGS result */
210#define IEMTARGETCPU_EFL_BEHAVIOR_RESERVED 3 /**< Reserved/dummy entry slot that's the same as 0. */
211#define IEMTARGETCPU_EFL_BEHAVIOR_MASK 3 /**< For masking the index before use. */
212/** Selects the right variant from a_aArray.
213 * pVCpu is implicit in the caller context. */
214#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \
215 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
216/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can
217 * be used because the host CPU does not support the operation. */
218#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \
219 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
220/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional
221 * array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index
222 * into the two.
223 * @sa IEM_SELECT_NATIVE_OR_FALLBACK */
224#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
225# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
226 (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
227#else
228# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
229 (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
230#endif
231/** @} */
232
233/**
234 * Picks @a a_pfnNative or @a a_pfnFallback according to the host CPU feature
235 * indicator given by @a a_fCpumFeatureMember (CPUMFEATURES member).
236 *
237 * On non-x86 hosts, this will shortcut to the fallback w/o checking the
238 * indicator.
239 *
240 * @sa IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX
241 */
242#if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
243# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) \
244 (g_CpumHostFeatures.s.a_fCpumFeatureMember ? a_pfnNative : a_pfnFallback)
245#else
246# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) (a_pfnFallback)
247#endif
248
249
250/**
251 * Extended operand mode that includes a representation of 8-bit.
252 *
253 * This is used for packing down modes when invoking some C instruction
254 * implementations.
255 */
256typedef enum IEMMODEX
257{
258 IEMMODEX_16BIT = IEMMODE_16BIT,
259 IEMMODEX_32BIT = IEMMODE_32BIT,
260 IEMMODEX_64BIT = IEMMODE_64BIT,
261 IEMMODEX_8BIT
262} IEMMODEX;
263AssertCompileSize(IEMMODEX, 4);
264
265
266/**
267 * Branch types.
268 */
269typedef enum IEMBRANCH
270{
271 IEMBRANCH_JUMP = 1,
272 IEMBRANCH_CALL,
273 IEMBRANCH_TRAP,
274 IEMBRANCH_SOFTWARE_INT,
275 IEMBRANCH_HARDWARE_INT
276} IEMBRANCH;
277AssertCompileSize(IEMBRANCH, 4);
278
279
280/**
281 * INT instruction types.
282 */
283typedef enum IEMINT
284{
285 /** INT n instruction (opcode 0xcd imm). */
286 IEMINT_INTN = 0,
287 /** Single byte INT3 instruction (opcode 0xcc). */
288 IEMINT_INT3 = IEM_XCPT_FLAGS_BP_INSTR,
289 /** Single byte INTO instruction (opcode 0xce). */
290 IEMINT_INTO = IEM_XCPT_FLAGS_OF_INSTR,
291 /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
292 IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
293} IEMINT;
294AssertCompileSize(IEMINT, 4);
295
296
297/**
298 * A FPU result.
299 */
300typedef struct IEMFPURESULT
301{
302 /** The output value. */
303 RTFLOAT80U r80Result;
304 /** The output status. */
305 uint16_t FSW;
306} IEMFPURESULT;
307AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
308/** Pointer to a FPU result. */
309typedef IEMFPURESULT *PIEMFPURESULT;
310/** Pointer to a const FPU result. */
311typedef IEMFPURESULT const *PCIEMFPURESULT;
312
313
314/**
315 * A FPU result consisting of two output values and FSW.
316 */
317typedef struct IEMFPURESULTTWO
318{
319 /** The first output value. */
320 RTFLOAT80U r80Result1;
321 /** The output status. */
322 uint16_t FSW;
323 /** The second output value. */
324 RTFLOAT80U r80Result2;
325} IEMFPURESULTTWO;
326AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
327AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
328/** Pointer to a FPU result consisting of two output values and FSW. */
329typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
330/** Pointer to a const FPU result consisting of two output values and FSW. */
331typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
332
333
334/**
335 * IEM TLB entry.
336 *
337 * Lookup assembly:
338 * @code{.asm}
339 ; Calculate tag.
340 mov rax, [VA]
341 shl rax, 16
342 shr rax, 16 + X86_PAGE_SHIFT
343 or rax, [uTlbRevision]
344
345 ; Do indexing.
346 movzx ecx, al
347 lea rcx, [pTlbEntries + rcx]
348
349 ; Check tag.
350 cmp [rcx + IEMTLBENTRY.uTag], rax
351 jne .TlbMiss
352
353 ; Check access.
354 mov rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
355 and rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
356 cmp rax, [uTlbPhysRev]
357 jne .TlbMiss
358
359 ; Calc address and we're done.
360 mov eax, X86_PAGE_OFFSET_MASK
361 and eax, [VA]
362 or rax, [rcx + IEMTLBENTRY.pMappingR3]
363 %ifdef VBOX_WITH_STATISTICS
364 inc qword [cTlbHits]
365 %endif
366 jmp .Done
367
368 .TlbMiss:
369 mov r8d, ACCESS_FLAGS
370 mov rdx, [VA]
371 mov rcx, [pVCpu]
372 call iemTlbTypeMiss
373 .Done:
374
375 @endcode
376 *
377 */
378typedef struct IEMTLBENTRY
379{
380 /** The TLB entry tag.
381 * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
382 * is ASSUMING a virtual address width of 48 bits.
383 *
384 * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
385 *
386 * The TLB lookup code uses the current TLB revision, which won't ever be zero,
387 * enabling an extremely cheap TLB invalidation most of the time. When the TLB
388 * revision wraps around though, the tags needs to be zeroed.
389 *
390 * @note Try use SHRD instruction? After seeing
391 * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
392 *
393 * @todo This will need to be reorganized for 57-bit wide virtual address and
394 * PCID (currently 12 bits) and ASID (currently 6 bits) support. We'll
395 * have to move the TLB entry versioning entirely to the
396 * fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
397 * 19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
398 * consumed by PCID and ASID (12 + 6 = 18).
399 */
400 uint64_t uTag;
401 /** Access flags and physical TLB revision.
402 *
403 * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
404 * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
405 * - Bit 2 - page tables - not user (complemented X86_PTE_US).
406 * - Bit 3 - pgm phys/virt - not directly writable.
407 * - Bit 4 - pgm phys page - not directly readable.
408 * - Bit 5 - page tables - not accessed (complemented X86_PTE_A).
409 * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
410 * - Bit 7 - tlb entry - pMappingR3 member not valid.
411 * - Bits 63 thru 8 are used for the physical TLB revision number.
412 *
413 * We're using complemented bit meanings here because it makes it easy to check
414 * whether special action is required. For instance a user mode write access
415 * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
416 * non-zero result would mean special handling needed because either it wasn't
417 * writable, or it wasn't user, or the page wasn't dirty. A user mode read
418 * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
419 * need to check any PTE flag.
420 */
421 uint64_t fFlagsAndPhysRev;
422 /** The guest physical page address. */
423 uint64_t GCPhys;
424 /** Pointer to the ring-3 mapping. */
425 R3PTRTYPE(uint8_t *) pbMappingR3;
426#if HC_ARCH_BITS == 32
427 uint32_t u32Padding1;
428#endif
429} IEMTLBENTRY;
430AssertCompileSize(IEMTLBENTRY, 32);
431/** Pointer to an IEM TLB entry. */
432typedef IEMTLBENTRY *PIEMTLBENTRY;
433
434/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
435 * @{ */
436#define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
437#define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
438#define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
439#define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
440#define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
441#define IEMTLBE_F_PT_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */
442#define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
443#define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(7) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
444#define IEMTLBE_F_PG_UNASSIGNED RT_BIT_64(8) /**< Phys page: Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
445#define IEMTLBE_F_PG_CODE_PAGE RT_BIT_64(9) /**< Phys page: Code page. */
446#define IEMTLBE_F_PHYS_REV UINT64_C(0xfffffffffffffc00) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
447/** @} */
448
449
450/**
451 * An IEM TLB.
452 *
453 * We've got two of these, one for data and one for instructions.
454 */
455typedef struct IEMTLB
456{
457 /** The TLB entries.
458 * We've choosen 256 because that way we can obtain the result directly from a
459 * 8-bit register without an additional AND instruction. */
460 IEMTLBENTRY aEntries[256];
461 /** The TLB revision.
462 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
463 * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
464 * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
465 * (The revision zero indicates an invalid TLB entry.)
466 *
467 * The initial value is choosen to cause an early wraparound. */
468 uint64_t uTlbRevision;
469 /** The TLB physical address revision - shadow of PGM variable.
470 *
471 * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
472 * incremented by adding RT_BIT_64(8). When it wraps around and becomes zero,
473 * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
474 * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
475 *
476 * The initial value is choosen to cause an early wraparound. */
477 uint64_t volatile uTlbPhysRev;
478
479 /* Statistics: */
480
481 /** TLB hits (VBOX_WITH_STATISTICS only). */
482 uint64_t cTlbHits;
483 /** TLB misses. */
484 uint32_t cTlbMisses;
485 /** Slow read path. */
486 uint32_t cTlbSlowReadPath;
487 /** Safe read path. */
488 uint32_t cTlbSafeReadPath;
489 /** Safe write path. */
490 uint32_t cTlbSafeWritePath;
491#if 0
492 /** TLB misses because of tag mismatch. */
493 uint32_t cTlbMissesTag;
494 /** TLB misses because of virtual access violation. */
495 uint32_t cTlbMissesVirtAccess;
496 /** TLB misses because of dirty bit. */
497 uint32_t cTlbMissesDirty;
498 /** TLB misses because of MMIO */
499 uint32_t cTlbMissesMmio;
500 /** TLB misses because of write access handlers. */
501 uint32_t cTlbMissesWriteHandler;
502 /** TLB misses because no r3(/r0) mapping. */
503 uint32_t cTlbMissesMapping;
504#endif
505 /** Alignment padding. */
506 uint32_t au32Padding[6];
507} IEMTLB;
508AssertCompileSizeAlignment(IEMTLB, 64);
509/** IEMTLB::uTlbRevision increment. */
510#define IEMTLB_REVISION_INCR RT_BIT_64(36)
511/** IEMTLB::uTlbRevision mask. */
512#define IEMTLB_REVISION_MASK (~(RT_BIT_64(36) - 1))
513/** IEMTLB::uTlbPhysRev increment.
514 * @sa IEMTLBE_F_PHYS_REV */
515#define IEMTLB_PHYS_REV_INCR RT_BIT_64(10)
516/**
517 * Calculates the TLB tag for a virtual address.
518 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
519 * @param a_pTlb The TLB.
520 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
521 * the clearing of the top 16 bits won't work (if 32-bit
522 * we'll end up with mostly zeros).
523 */
524#define IEMTLB_CALC_TAG(a_pTlb, a_GCPtr) ( IEMTLB_CALC_TAG_NO_REV(a_GCPtr) | (a_pTlb)->uTlbRevision )
525/**
526 * Calculates the TLB tag for a virtual address but without TLB revision.
527 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
528 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
529 * the clearing of the top 16 bits won't work (if 32-bit
530 * we'll end up with mostly zeros).
531 */
532#define IEMTLB_CALC_TAG_NO_REV(a_GCPtr) ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
533/**
534 * Converts a TLB tag value into a TLB index.
535 * @returns Index into IEMTLB::aEntries.
536 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
537 */
538#define IEMTLB_TAG_TO_INDEX(a_uTag) ( (uint8_t)(a_uTag) )
539/**
540 * Converts a TLB tag value into a TLB index.
541 * @returns Index into IEMTLB::aEntries.
542 * @param a_pTlb The TLB.
543 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
544 */
545#define IEMTLB_TAG_TO_ENTRY(a_pTlb, a_uTag) ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_INDEX(a_uTag)] )
546
547
548/** @name IEM_MC_F_XXX - MC block flags/clues.
549 * @todo Merge with IEM_CIMPL_F_XXX
550 * @{ */
551#define IEM_MC_F_ONLY_8086 RT_BIT_32(0)
552#define IEM_MC_F_MIN_186 RT_BIT_32(1)
553#define IEM_MC_F_MIN_286 RT_BIT_32(2)
554#define IEM_MC_F_NOT_286_OR_OLDER IEM_MC_F_MIN_386
555#define IEM_MC_F_MIN_386 RT_BIT_32(3)
556#define IEM_MC_F_MIN_486 RT_BIT_32(4)
557#define IEM_MC_F_MIN_PENTIUM RT_BIT_32(5)
558#define IEM_MC_F_MIN_PENTIUM_II IEM_MC_F_MIN_PENTIUM
559#define IEM_MC_F_MIN_CORE IEM_MC_F_MIN_PENTIUM
560#define IEM_MC_F_64BIT RT_BIT_32(6)
561#define IEM_MC_F_NOT_64BIT RT_BIT_32(7)
562/** @} */
563
564/** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls.
565 *
566 * These clues are mainly for the recompiler, so that it can emit correct code.
567 *
568 * They are processed by the python script and which also automatically
569 * calculates flags for MC blocks based on the statements, extending the use of
570 * these flags to describe MC block behavior to the recompiler core. The python
571 * script pass the flags to the IEM_MC2_END_EMIT_CALLS macro, but mainly for
572 * error checking purposes. The script emits the necessary fEndTb = true and
573 * similar statements as this reduces compile time a tiny bit.
574 *
575 * @{ */
576/** Flag set if direct branch, clear if absolute or indirect. */
577#define IEM_CIMPL_F_BRANCH_DIRECT RT_BIT_32(0)
578/** Flag set if indirect branch, clear if direct or relative.
579 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
580 * as well as for return instructions (RET, IRET, RETF). */
581#define IEM_CIMPL_F_BRANCH_INDIRECT RT_BIT_32(1)
582/** Flag set if relative branch, clear if absolute or indirect. */
583#define IEM_CIMPL_F_BRANCH_RELATIVE RT_BIT_32(2)
584/** Flag set if conditional branch, clear if unconditional. */
585#define IEM_CIMPL_F_BRANCH_CONDITIONAL RT_BIT_32(3)
586/** Flag set if it's a far branch (changes CS). */
587#define IEM_CIMPL_F_BRANCH_FAR RT_BIT_32(4)
588/** Convenience: Testing any kind of branch. */
589#define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
590
591/** Execution flags may change (IEMCPU::fExec). */
592#define IEM_CIMPL_F_MODE RT_BIT_32(5)
593/** May change significant portions of RFLAGS. */
594#define IEM_CIMPL_F_RFLAGS RT_BIT_32(6)
595/** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS. */
596#define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(7)
597/** May trigger interrupt shadowing. */
598#define IEM_CIMPL_F_INHIBIT_SHADOW RT_BIT_32(8)
599/** May enable interrupts, so recheck IRQ immediately afterwards executing
600 * the instruction. */
601#define IEM_CIMPL_F_CHECK_IRQ_AFTER RT_BIT_32(9)
602/** May disable interrupts, so recheck IRQ immediately before executing the
603 * instruction. */
604#define IEM_CIMPL_F_CHECK_IRQ_BEFORE RT_BIT_32(10)
605/** Convenience: Check for IRQ both before and after an instruction. */
606#define IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER (IEM_CIMPL_F_CHECK_IRQ_BEFORE | IEM_CIMPL_F_CHECK_IRQ_AFTER)
607/** May trigger a VM exit (treated like IEM_CIMPL_F_MODE atm). */
608#define IEM_CIMPL_F_VMEXIT RT_BIT_32(11)
609/** May modify FPU state.
610 * @todo Not sure if this is useful yet. */
611#define IEM_CIMPL_F_FPU RT_BIT_32(12)
612/** REP prefixed instruction which may yield before updating PC.
613 * @todo Not sure if this is useful, REP functions now return non-zero
614 * status if they don't update the PC. */
615#define IEM_CIMPL_F_REP RT_BIT_32(13)
616/** I/O instruction.
617 * @todo Not sure if this is useful yet. */
618#define IEM_CIMPL_F_IO RT_BIT_32(14)
619/** Force end of TB after the instruction. */
620#define IEM_CIMPL_F_END_TB RT_BIT_32(15)
621/** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
622#define IEM_CIMPL_F_XCPT \
623 (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
624
625/** The block calls a C-implementation instruction function with two implicit arguments.
626 * Mutually exclusive with IEM_CIMPL_F_CALLS_AIMPL and
627 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
628 * @note The python scripts will add this is missing. */
629#define IEM_CIMPL_F_CALLS_CIMPL RT_BIT_32(16)
630/** The block calls an ASM-implementation instruction function.
631 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and
632 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
633 * @note The python scripts will add this is missing. */
634#define IEM_CIMPL_F_CALLS_AIMPL RT_BIT_32(17)
635/** The block calls an ASM-implementation instruction function with an implicit
636 * X86FXSTATE pointer argument.
637 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and IEM_CIMPL_F_CALLS_AIMPL.
638 * @note The python scripts will add this is missing. */
639#define IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE RT_BIT_32(18)
640/** @} */
641
642
643/** @name IEM_F_XXX - Execution mode flags (IEMCPU::fExec, IEMTB::fFlags).
644 *
645 * These flags are set when entering IEM and adjusted as code is executed, such
646 * that they will always contain the current values as instructions are
647 * finished.
648 *
649 * In recompiled execution mode, (most of) these flags are included in the
650 * translation block selection key and stored in IEMTB::fFlags alongside the
651 * IEMTB_F_XXX flags. The latter flags uses bits 31 thru 24, which are all zero
652 * in IEMCPU::fExec.
653 *
654 * @{ */
655/** Mode: The block target mode mask. */
656#define IEM_F_MODE_MASK UINT32_C(0x0000001f)
657/** Mode: The IEMMODE part of the IEMTB_F_MODE_MASK value. */
658#define IEM_F_MODE_CPUMODE_MASK UINT32_C(0x00000003)
659/** X86 Mode: Bit used to indicating pre-386 CPU in 16-bit mode (for eliminating
660 * conditional in EIP/IP updating), and flat wide open CS, SS DS, and ES in
661 * 32-bit mode (for simplifying most memory accesses). */
662#define IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK UINT32_C(0x00000004)
663/** X86 Mode: Bit indicating protected mode, real mode (or SMM) when not set. */
664#define IEM_F_MODE_X86_PROT_MASK UINT32_C(0x00000008)
665/** X86 Mode: Bit used to indicate virtual 8086 mode (only 16-bit). */
666#define IEM_F_MODE_X86_V86_MASK UINT32_C(0x00000010)
667
668/** X86 Mode: 16-bit on 386 or later. */
669#define IEM_F_MODE_X86_16BIT UINT32_C(0x00000000)
670/** X86 Mode: 80286, 80186 and 8086/88 targetting blocks (EIP update opt). */
671#define IEM_F_MODE_X86_16BIT_PRE_386 UINT32_C(0x00000004)
672/** X86 Mode: 16-bit protected mode on 386 or later. */
673#define IEM_F_MODE_X86_16BIT_PROT UINT32_C(0x00000008)
674/** X86 Mode: 16-bit protected mode on 386 or later. */
675#define IEM_F_MODE_X86_16BIT_PROT_PRE_386 UINT32_C(0x0000000c)
676/** X86 Mode: 16-bit virtual 8086 protected mode (on 386 or later). */
677#define IEM_F_MODE_X86_16BIT_PROT_V86 UINT32_C(0x00000018)
678
679/** X86 Mode: 32-bit on 386 or later. */
680#define IEM_F_MODE_X86_32BIT UINT32_C(0x00000001)
681/** X86 Mode: 32-bit mode with wide open flat CS, SS, DS and ES. */
682#define IEM_F_MODE_X86_32BIT_FLAT UINT32_C(0x00000005)
683/** X86 Mode: 32-bit protected mode. */
684#define IEM_F_MODE_X86_32BIT_PROT UINT32_C(0x00000009)
685/** X86 Mode: 32-bit protected mode with wide open flat CS, SS, DS and ES. */
686#define IEM_F_MODE_X86_32BIT_PROT_FLAT UINT32_C(0x0000000d)
687
688/** X86 Mode: 64-bit (includes protected, but not the flat bit). */
689#define IEM_F_MODE_X86_64BIT UINT32_C(0x0000000a)
690
691
692/** Bypass access handlers when set. */
693#define IEM_F_BYPASS_HANDLERS UINT32_C(0x00010000)
694/** Have pending hardware instruction breakpoints. */
695#define IEM_F_PENDING_BRK_INSTR UINT32_C(0x00020000)
696/** Have pending hardware data breakpoints. */
697#define IEM_F_PENDING_BRK_DATA UINT32_C(0x00040000)
698
699/** X86: Have pending hardware I/O breakpoints. */
700#define IEM_F_PENDING_BRK_X86_IO UINT32_C(0x00000400)
701/** X86: Disregard the lock prefix (implied or not) when set. */
702#define IEM_F_X86_DISREGARD_LOCK UINT32_C(0x00000800)
703
704/** Pending breakpoint mask (what iemCalcExecDbgFlags works out). */
705#define IEM_F_PENDING_BRK_MASK (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA | IEM_F_PENDING_BRK_X86_IO)
706
707/** Caller configurable options. */
708#define IEM_F_USER_OPTS (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK)
709
710/** X86: The current protection level (CPL) shift factor. */
711#define IEM_F_X86_CPL_SHIFT 8
712/** X86: The current protection level (CPL) mask. */
713#define IEM_F_X86_CPL_MASK UINT32_C(0x00000300)
714/** X86: The current protection level (CPL) shifted mask. */
715#define IEM_F_X86_CPL_SMASK UINT32_C(0x00000003)
716
717/** X86 execution context.
718 * The IEM_F_X86_CTX_XXX values are individual flags that can be combined (with
719 * the exception of IEM_F_X86_CTX_NORMAL). This allows running VMs from SMM
720 * mode. */
721#define IEM_F_X86_CTX_MASK UINT32_C(0x0000f000)
722/** X86 context: Plain regular execution context. */
723#define IEM_F_X86_CTX_NORMAL UINT32_C(0x00000000)
724/** X86 context: VT-x enabled. */
725#define IEM_F_X86_CTX_VMX UINT32_C(0x00001000)
726/** X86 context: AMD-V enabled. */
727#define IEM_F_X86_CTX_SVM UINT32_C(0x00002000)
728/** X86 context: In AMD-V or VT-x guest mode. */
729#define IEM_F_X86_CTX_IN_GUEST UINT32_C(0x00004000)
730/** X86 context: System management mode (SMM). */
731#define IEM_F_X86_CTX_SMM UINT32_C(0x00008000)
732
733/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
734 * iemRegFinishClearingRF() most for most situations (CPUMCTX_DBG_HIT_DRX_MASK
735 * and CPUMCTX_DBG_DBGF_MASK are covered by the IEM_F_PENDING_BRK_XXX bits
736 * alread). */
737
738/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
739 * iemRegFinishClearingRF() most for most situations
740 * (CPUMCTX_DBG_HIT_DRX_MASK and CPUMCTX_DBG_DBGF_MASK are covered by
741 * the IEM_F_PENDING_BRK_XXX bits alread). */
742
743/** @} */
744
745
746/** @name IEMTB_F_XXX - Translation block flags (IEMTB::fFlags).
747 *
748 * Extends the IEM_F_XXX flags (subject to IEMTB_F_IEM_F_MASK) to make up the
749 * translation block flags. The combined flag mask (subject to
750 * IEMTB_F_KEY_MASK) is used as part of the lookup key for translation blocks.
751 *
752 * @{ */
753/** Mask of IEM_F_XXX flags included in IEMTB_F_XXX. */
754#define IEMTB_F_IEM_F_MASK UINT32_C(0x00ffffff)
755
756/** Type: The block type mask. */
757#define IEMTB_F_TYPE_MASK UINT32_C(0x03000000)
758/** Type: Purly threaded recompiler (via tables). */
759#define IEMTB_F_TYPE_THREADED UINT32_C(0x01000000)
760/** Type: Native recompilation. */
761#define IEMTB_F_TYPE_NATIVE UINT32_C(0x02000000)
762
763/** Set when we're starting the block in an "interrupt shadow".
764 * We don't need to distingish between the two types of this mask, thus the one.
765 * @see CPUMCTX_INHIBIT_SHADOW, CPUMIsInInterruptShadow() */
766#define IEMTB_F_INHIBIT_SHADOW UINT32_C(0x04000000)
767/** Set when we're currently inhibiting NMIs
768 * @see CPUMCTX_INHIBIT_NMI, CPUMAreInterruptsInhibitedByNmi() */
769#define IEMTB_F_INHIBIT_NMI UINT32_C(0x08000000)
770
771/** Checks that EIP/IP is wihin CS.LIM before each instruction. Used when
772 * we're close the limit before starting a TB, as determined by
773 * iemGetTbFlagsForCurrentPc(). */
774#define IEMTB_F_CS_LIM_CHECKS UINT32_C(0x10000000)
775
776/** Mask of the IEMTB_F_XXX flags that are part of the TB lookup key.
777 * @note We skip the CPL as we don't currently generate ring-specific code,
778 * that's all handled in CIMPL functions.
779 *
780 * For the same reasons, we skip all of IEM_F_X86_CTX_MASK, with the
781 * exception of SMM (which we don't implement). */
782#define IEMTB_F_KEY_MASK ( (UINT32_MAX & ~(IEM_F_X86_CTX_MASK | IEM_F_X86_CPL_MASK | IEMTB_F_TYPE_MASK)) \
783 | IEM_F_X86_CTX_SMM)
784/** @} */
785
786AssertCompile( (IEM_F_MODE_X86_16BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
787AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
788AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_PROT_MASK));
789AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_V86_MASK));
790AssertCompile( (IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
791AssertCompile( IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
792AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_PROT_MASK));
793AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
794AssertCompile( (IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
795AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
796AssertCompile( IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
797AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_V86_MASK));
798AssertCompile( (IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
799AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
800AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_PROT_MASK);
801AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
802AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_PROT_MASK);
803AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
804AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_V86_MASK);
805
806AssertCompile( (IEM_F_MODE_X86_32BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
807AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
808AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_PROT_MASK));
809AssertCompile( (IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
810AssertCompile( IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
811AssertCompile(!(IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_PROT_MASK));
812AssertCompile( (IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
813AssertCompile(!(IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
814AssertCompile( IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
815AssertCompile( (IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
816AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
817AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_PROT_MASK);
818
819AssertCompile( (IEM_F_MODE_X86_64BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT);
820AssertCompile( IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_PROT_MASK);
821AssertCompile(!(IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
822
823/** Native instruction type for use with the native code generator.
824 * This is a byte (uint8_t) for x86 and amd64 and uint32_t for the other(s). */
825#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
826typedef uint8_t IEMNATIVEINSTR;
827#else
828typedef uint32_t IEMNATIVEINSTR;
829#endif
830/** Pointer to a native instruction unit. */
831typedef IEMNATIVEINSTR *PIEMNATIVEINSTR;
832/** Pointer to a const native instruction unit. */
833typedef IEMNATIVEINSTR const *PCIEMNATIVEINSTR;
834
835/**
836 * A call for the threaded call table.
837 */
838typedef struct IEMTHRDEDCALLENTRY
839{
840 /** The function to call (IEMTHREADEDFUNCS). */
841 uint16_t enmFunction;
842 /** Instruction number in the TB (for statistics). */
843 uint8_t idxInstr;
844 uint8_t uUnused0;
845
846 /** Offset into IEMTB::pabOpcodes. */
847 uint16_t offOpcode;
848 /** The opcode length. */
849 uint8_t cbOpcode;
850 /** Index in to IEMTB::aRanges. */
851 uint8_t idxRange;
852
853 /** Generic parameters. */
854 uint64_t auParams[3];
855} IEMTHRDEDCALLENTRY;
856AssertCompileSize(IEMTHRDEDCALLENTRY, sizeof(uint64_t) * 4);
857/** Pointer to a threaded call entry. */
858typedef struct IEMTHRDEDCALLENTRY *PIEMTHRDEDCALLENTRY;
859/** Pointer to a const threaded call entry. */
860typedef IEMTHRDEDCALLENTRY const *PCIEMTHRDEDCALLENTRY;
861
862/**
863 * Native IEM TB 'function' typedef.
864 *
865 * This will throw/longjmp on occation.
866 *
867 * @note AMD64 doesn't have that many non-volatile registers and does sport
868 * 32-bit address displacments, so we don't need pCtx.
869 *
870 * On ARM64 pCtx allows us to directly address the whole register
871 * context without requiring a separate indexing register holding the
872 * offset. This saves an instruction loading the offset for each guest
873 * CPU context access, at the cost of a non-volatile register.
874 * Fortunately, ARM64 has quite a lot more registers.
875 */
876typedef
877#ifdef RT_ARCH_AMD64
878int FNIEMTBNATIVE(PVMCPUCC pVCpu)
879#else
880int FNIEMTBNATIVE(PVMCPUCC pVCpu, PCPUMCTX pCtx)
881#endif
882#if RT_CPLUSPLUS_PREREQ(201700)
883 IEM_NOEXCEPT_MAY_LONGJMP
884#endif
885 ;
886/** Pointer to a native IEM TB entry point function.
887 * This will throw/longjmp on occation. */
888typedef FNIEMTBNATIVE *PFNIEMTBNATIVE;
889
890
891/**
892 * Translation block debug info entry type.
893 */
894typedef enum IEMTBDBGENTRYTYPE
895{
896 kIemTbDbgEntryType_Invalid = 0,
897 /** The entry is for marking a native code position.
898 * Entries following this all apply to this position. */
899 kIemTbDbgEntryType_NativeOffset,
900 /** The entry is for a new guest instruction. */
901 kIemTbDbgEntryType_GuestInstruction,
902 /** Marks the start of a threaded call. */
903 kIemTbDbgEntryType_ThreadedCall,
904 /** Marks the location of a label. */
905 kIemTbDbgEntryType_Label,
906 /** Info about a host register shadowing a guest register. */
907 kIemTbDbgEntryType_GuestRegShadowing,
908 kIemTbDbgEntryType_End
909} IEMTBDBGENTRYTYPE;
910
911/**
912 * Translation block debug info entry.
913 */
914typedef union IEMTBDBGENTRY
915{
916 /** Plain 32-bit view. */
917 uint32_t u;
918
919 /** Generic view for getting at the type field. */
920 struct
921 {
922 /** IEMTBDBGENTRYTYPE */
923 uint32_t uType : 4;
924 uint32_t uTypeSpecific : 28;
925 } Gen;
926
927 struct
928 {
929 /** kIemTbDbgEntryType_ThreadedCall1. */
930 uint32_t uType : 4;
931 /** Native code offset. */
932 uint32_t offNative : 28;
933 } NativeOffset;
934
935 struct
936 {
937 /** kIemTbDbgEntryType_GuestInstruction. */
938 uint32_t uType : 4;
939 uint32_t uUnused : 4;
940 /** The IEM_F_XXX flags. */
941 uint32_t fExec : 24;
942 } GuestInstruction;
943
944 struct
945 {
946 /* kIemTbDbgEntryType_ThreadedCall. */
947 uint32_t uType : 4;
948 /** Set if the call was recompiled to native code, clear if just calling
949 * threaded function. */
950 uint32_t fRecompiled : 1;
951 uint32_t uUnused : 11;
952 /** The threaded call number (IEMTHREADEDFUNCS). */
953 uint32_t enmCall : 16;
954 } ThreadedCall;
955
956 struct
957 {
958 /* kIemTbDbgEntryType_Label. */
959 uint32_t uType : 4;
960 uint32_t uUnused : 4;
961 /** The label type (IEMNATIVELABELTYPE). */
962 uint32_t enmLabel : 8;
963 /** The label data. */
964 uint32_t uData : 16;
965 } Label;
966
967 struct
968 {
969 /* kIemTbDbgEntryType_GuestRegShadowing. */
970 uint32_t uType : 4;
971 uint32_t uUnused : 4;
972 /** The guest register being shadowed (IEMNATIVEGSTREG). */
973 uint32_t idxGstReg : 8;
974 /** The host new register number, UINT8_MAX if dropped. */
975 uint32_t idxHstReg : 8;
976 /** The previous host register number, UINT8_MAX if new. */
977 uint32_t idxHstRegPrev : 8;
978 } GuestRegShadowing;
979} IEMTBDBGENTRY;
980AssertCompileSize(IEMTBDBGENTRY, sizeof(uint32_t));
981/** Pointer to a debug info entry. */
982typedef IEMTBDBGENTRY *PIEMTBDBGENTRY;
983/** Pointer to a const debug info entry. */
984typedef IEMTBDBGENTRY const *PCIEMTBDBGENTRY;
985
986/**
987 * Translation block debug info.
988 */
989typedef struct IEMTBDBG
990{
991 /** Number of entries in aEntries. */
992 uint32_t cEntries;
993 /** Debug info entries. */
994 RT_FLEXIBLE_ARRAY_EXTENSION
995 IEMTBDBGENTRY aEntries[RT_FLEXIBLE_ARRAY];
996} IEMTBDBG;
997/** Pointer to TB debug info. */
998typedef IEMTBDBG *PIEMTBDBG;
999/** Pointer to const TB debug info. */
1000typedef IEMTBDBG const *PCIEMTBDBG;
1001
1002
1003/**
1004 * Translation block.
1005 *
1006 * The current plan is to just keep TBs and associated lookup hash table private
1007 * to each VCpu as that simplifies TB removal greatly (no races) and generally
1008 * avoids using expensive atomic primitives for updating lists and stuff.
1009 */
1010#pragma pack(2) /* to prevent the Thrd structure from being padded unnecessarily */
1011typedef struct IEMTB
1012{
1013 /** Next block with the same hash table entry. */
1014 struct IEMTB *pNext;
1015 /** Usage counter. */
1016 uint32_t cUsed;
1017 /** The IEMCPU::msRecompilerPollNow last time it was used. */
1018 uint32_t msLastUsed;
1019
1020 /** @name What uniquely identifies the block.
1021 * @{ */
1022 RTGCPHYS GCPhysPc;
1023 /** IEMTB_F_XXX (i.e. IEM_F_XXX ++). */
1024 uint32_t fFlags;
1025 union
1026 {
1027 struct
1028 {
1029 /**< Relevant CS X86DESCATTR_XXX bits. */
1030 uint16_t fAttr;
1031 } x86;
1032 };
1033 /** @} */
1034
1035 /** Number of opcode ranges. */
1036 uint8_t cRanges;
1037 /** Statistics: Number of instructions in the block. */
1038 uint8_t cInstructions;
1039
1040 /** Type specific info. */
1041 union
1042 {
1043 struct
1044 {
1045 /** The call sequence table. */
1046 PIEMTHRDEDCALLENTRY paCalls;
1047 /** Number of calls in paCalls. */
1048 uint16_t cCalls;
1049 /** Number of calls allocated. */
1050 uint16_t cAllocated;
1051 } Thrd;
1052 struct
1053 {
1054 /** The native instructions (PFNIEMTBNATIVE). */
1055 PIEMNATIVEINSTR paInstructions;
1056 /** Number of instructions pointed to by paInstructions. */
1057 uint32_t cInstructions;
1058 } Native;
1059 /** Generic view for zeroing when freeing. */
1060 struct
1061 {
1062 uintptr_t uPtr;
1063 uint32_t uData;
1064 } Gen;
1065 };
1066
1067 /** The allocation chunk this TB belongs to. */
1068 uint8_t idxAllocChunk;
1069 uint8_t bUnused;
1070
1071 /** Number of bytes of opcodes stored in pabOpcodes.
1072 * @todo this field isn't really needed, aRanges keeps the actual info. */
1073 uint16_t cbOpcodes;
1074 /** Pointer to the opcode bytes this block was recompiled from. */
1075 uint8_t *pabOpcodes;
1076
1077 /** Debug info if enabled.
1078 * This is only generated by the native recompiler. */
1079 PIEMTBDBG pDbgInfo;
1080
1081 /* --- 64 byte cache line end --- */
1082
1083 /** Opcode ranges.
1084 *
1085 * The opcode checkers and maybe TLB loading functions will use this to figure
1086 * out what to do. The parameter will specify an entry and the opcode offset to
1087 * start at and the minimum number of bytes to verify (instruction length).
1088 *
1089 * When VT-x and AMD-V looks up the opcode bytes for an exitting instruction,
1090 * they'll first translate RIP (+ cbInstr - 1) to a physical address using the
1091 * code TLB (must have a valid entry for that address) and scan the ranges to
1092 * locate the corresponding opcodes. Probably.
1093 */
1094 struct IEMTBOPCODERANGE
1095 {
1096 /** Offset within pabOpcodes. */
1097 uint16_t offOpcodes;
1098 /** Number of bytes. */
1099 uint16_t cbOpcodes;
1100 /** The page offset. */
1101 RT_GCC_EXTENSION
1102 uint16_t offPhysPage : 12;
1103 /** Unused bits. */
1104 RT_GCC_EXTENSION
1105 uint16_t u2Unused : 2;
1106 /** Index into GCPhysPc + aGCPhysPages for the physical page address. */
1107 RT_GCC_EXTENSION
1108 uint16_t idxPhysPage : 2;
1109 } aRanges[8];
1110
1111 /** Physical pages that this TB covers.
1112 * The GCPhysPc w/o page offset is element zero, so starting here with 1. */
1113 RTGCPHYS aGCPhysPages[2];
1114} IEMTB;
1115#pragma pack()
1116AssertCompileMemberAlignment(IEMTB, GCPhysPc, sizeof(RTGCPHYS));
1117AssertCompileMemberAlignment(IEMTB, Thrd, sizeof(void *));
1118AssertCompileMemberAlignment(IEMTB, pabOpcodes, sizeof(void *));
1119AssertCompileMemberAlignment(IEMTB, pDbgInfo, sizeof(void *));
1120AssertCompileMemberAlignment(IEMTB, aGCPhysPages, sizeof(RTGCPHYS));
1121AssertCompileMemberOffset(IEMTB, aRanges, 64);
1122AssertCompileMemberSize(IEMTB, aRanges[0], 6);
1123#if 1
1124AssertCompileSize(IEMTB, 128);
1125# define IEMTB_SIZE_IS_POWER_OF_TWO /**< The IEMTB size is a power of two. */
1126#else
1127AssertCompileSize(IEMTB, 168);
1128# undef IEMTB_SIZE_IS_POWER_OF_TWO
1129#endif
1130
1131/** Pointer to a translation block. */
1132typedef IEMTB *PIEMTB;
1133/** Pointer to a const translation block. */
1134typedef IEMTB const *PCIEMTB;
1135
1136/**
1137 * A chunk of memory in the TB allocator.
1138 */
1139typedef struct IEMTBCHUNK
1140{
1141 /** Pointer to the translation blocks in this chunk. */
1142 PIEMTB paTbs;
1143#ifdef IN_RING0
1144 /** Allocation handle. */
1145 RTR0MEMOBJ hMemObj;
1146#endif
1147} IEMTBCHUNK;
1148
1149/**
1150 * A per-CPU translation block allocator.
1151 *
1152 * Because of how the IEMTBCACHE uses the lower 6 bits of the TB address to keep
1153 * the length of the collision list, and of course also for cache line alignment
1154 * reasons, the TBs must be allocated with at least 64-byte alignment.
1155 * Memory is there therefore allocated using one of the page aligned allocators.
1156 *
1157 *
1158 * To avoid wasting too much memory, it is allocated piecemeal as needed,
1159 * in chunks (IEMTBCHUNK) of 2 MiB or more. The TB has an 8-bit chunk index
1160 * that enables us to quickly calculate the allocation bitmap position when
1161 * freeing the translation block.
1162 */
1163typedef struct IEMTBALLOCATOR
1164{
1165 /** Magic value (IEMTBALLOCATOR_MAGIC). */
1166 uint32_t uMagic;
1167
1168#ifdef IEMTB_SIZE_IS_POWER_OF_TWO
1169 /** Mask corresponding to cTbsPerChunk - 1. */
1170 uint32_t fChunkMask;
1171 /** Shift count corresponding to cTbsPerChunk. */
1172 uint8_t cChunkShift;
1173#else
1174 uint32_t uUnused;
1175 uint8_t bUnused;
1176#endif
1177 /** Number of chunks we're allowed to allocate. */
1178 uint8_t cMaxChunks;
1179 /** Number of chunks currently populated. */
1180 uint16_t cAllocatedChunks;
1181 /** Number of translation blocks per chunk. */
1182 uint32_t cTbsPerChunk;
1183 /** Chunk size. */
1184 uint32_t cbPerChunk;
1185
1186 /** The maximum number of TBs. */
1187 uint32_t cMaxTbs;
1188 /** Total number of TBs in the populated chunks.
1189 * (cAllocatedChunks * cTbsPerChunk) */
1190 uint32_t cTotalTbs;
1191 /** The current number of TBs in use.
1192 * The number of free TBs: cAllocatedTbs - cInUseTbs; */
1193 uint32_t cInUseTbs;
1194 /** Statistics: Number of the cInUseTbs that are native ones. */
1195 uint32_t cNativeTbs;
1196 /** Statistics: Number of the cInUseTbs that are threaded ones. */
1197 uint32_t cThreadedTbs;
1198
1199 /** Where to start pruning TBs from when we're out.
1200 * See iemTbAllocatorAllocSlow for details. */
1201 uint32_t iPruneFrom;
1202 /** Hint about which bit to start scanning the bitmap from. */
1203 uint32_t iStartHint;
1204
1205 /** Statistics: Number of TB allocation calls. */
1206 STAMCOUNTER StatAllocs;
1207 /** Statistics: Number of TB free calls. */
1208 STAMCOUNTER StatFrees;
1209 /** Statistics: Time spend pruning. */
1210 STAMPROFILE StatPrune;
1211
1212 /** The delayed free list (see iemTbAlloctorScheduleForFree). */
1213 PIEMTB pDelayedFreeHead;
1214
1215 /** Allocation chunks. */
1216 IEMTBCHUNK aChunks[256];
1217
1218 /** Allocation bitmap for all possible chunk chunks. */
1219 RT_FLEXIBLE_ARRAY_EXTENSION
1220 uint64_t bmAllocated[RT_FLEXIBLE_ARRAY];
1221} IEMTBALLOCATOR;
1222/** Pointer to a TB allocator. */
1223typedef struct IEMTBALLOCATOR *PIEMTBALLOCATOR;
1224
1225/** Magic value for the TB allocator (Emmet Harley Cohen). */
1226#define IEMTBALLOCATOR_MAGIC UINT32_C(0x19900525)
1227
1228
1229/**
1230 * A per-CPU translation block cache (hash table).
1231 *
1232 * The hash table is allocated once during IEM initialization and size double
1233 * the max TB count, rounded up to the nearest power of two (so we can use and
1234 * AND mask rather than a rest division when hashing).
1235 */
1236typedef struct IEMTBCACHE
1237{
1238 /** Magic value (IEMTBCACHE_MAGIC). */
1239 uint32_t uMagic;
1240 /** Size of the hash table. This is a power of two. */
1241 uint32_t cHash;
1242 /** The mask corresponding to cHash. */
1243 uint32_t uHashMask;
1244 uint32_t uPadding;
1245
1246 /** @name Statistics
1247 * @{ */
1248 /** Number of collisions ever. */
1249 STAMCOUNTER cCollisions;
1250
1251 /** Statistics: Number of TB lookup misses. */
1252 STAMCOUNTER cLookupMisses;
1253 /** Statistics: Number of TB lookup hits (debug only). */
1254 STAMCOUNTER cLookupHits;
1255 STAMCOUNTER auPadding2[3];
1256 /** Statistics: Collision list length pruning. */
1257 STAMPROFILE StatPrune;
1258 /** @} */
1259
1260 /** The hash table itself.
1261 * @note The lower 6 bits of the pointer is used for keeping the collision
1262 * list length, so we can take action when it grows too long.
1263 * This works because TBs are allocated using a 64 byte (or
1264 * higher) alignment from page aligned chunks of memory, so the lower
1265 * 6 bits of the address will always be zero.
1266 * See IEMTBCACHE_PTR_COUNT_MASK, IEMTBCACHE_PTR_MAKE and friends.
1267 */
1268 RT_FLEXIBLE_ARRAY_EXTENSION
1269 PIEMTB apHash[RT_FLEXIBLE_ARRAY];
1270} IEMTBCACHE;
1271/** Pointer to a per-CPU translation block cahce. */
1272typedef IEMTBCACHE *PIEMTBCACHE;
1273
1274/** Magic value for IEMTBCACHE (Johnny O'Neal). */
1275#define IEMTBCACHE_MAGIC UINT32_C(0x19561010)
1276
1277/** The collision count mask for IEMTBCACHE::apHash entries. */
1278#define IEMTBCACHE_PTR_COUNT_MASK ((uintptr_t)0x3f)
1279/** The max collision count for IEMTBCACHE::apHash entries before pruning. */
1280#define IEMTBCACHE_PTR_MAX_COUNT ((uintptr_t)0x30)
1281/** Combine a TB pointer and a collision list length into a value for an
1282 * IEMTBCACHE::apHash entry. */
1283#define IEMTBCACHE_PTR_MAKE(a_pTb, a_cCount) (PIEMTB)((uintptr_t)(a_pTb) | (a_cCount))
1284/** Combine a TB pointer and a collision list length into a value for an
1285 * IEMTBCACHE::apHash entry. */
1286#define IEMTBCACHE_PTR_GET_TB(a_pHashEntry) (PIEMTB)((uintptr_t)(a_pHashEntry) & ~IEMTBCACHE_PTR_COUNT_MASK)
1287/** Combine a TB pointer and a collision list length into a value for an
1288 * IEMTBCACHE::apHash entry. */
1289#define IEMTBCACHE_PTR_GET_COUNT(a_pHashEntry) ((uintptr_t)(a_pHashEntry) & IEMTBCACHE_PTR_COUNT_MASK)
1290
1291/**
1292 * Calculates the hash table slot for a TB from physical PC address and TB flags.
1293 */
1294#define IEMTBCACHE_HASH(a_paCache, a_fTbFlags, a_GCPhysPc) \
1295 IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, (a_fTbFlags) & IEMTB_F_KEY_MASK, a_GCPhysPc)
1296
1297/**
1298 * Calculates the hash table slot for a TB from physical PC address and TB
1299 * flags, ASSUMING the caller has applied IEMTB_F_KEY_MASK to @a a_fTbFlags.
1300 */
1301#define IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, a_fTbFlags, a_GCPhysPc) \
1302 (((uint32_t)(a_GCPhysPc) ^ (a_fTbFlags)) & (a_paCache)->uHashMask)
1303
1304
1305/** @name IEMBRANCHED_F_XXX - Branched indicator (IEMCPU::fTbBranched).
1306 *
1307 * These flags parallels IEM_CIMPL_F_BRANCH_XXX.
1308 *
1309 * @{ */
1310/** Value if no branching happened recently. */
1311#define IEMBRANCHED_F_NO UINT8_C(0x00)
1312/** Flag set if direct branch, clear if absolute or indirect. */
1313#define IEMBRANCHED_F_DIRECT UINT8_C(0x01)
1314/** Flag set if indirect branch, clear if direct or relative. */
1315#define IEMBRANCHED_F_INDIRECT UINT8_C(0x02)
1316/** Flag set if relative branch, clear if absolute or indirect. */
1317#define IEMBRANCHED_F_RELATIVE UINT8_C(0x04)
1318/** Flag set if conditional branch, clear if unconditional. */
1319#define IEMBRANCHED_F_CONDITIONAL UINT8_C(0x08)
1320/** Flag set if it's a far branch. */
1321#define IEMBRANCHED_F_FAR UINT8_C(0x10)
1322/** Flag set (by IEM_MC_REL_JMP_XXX) if it's a zero bytes relative jump. */
1323#define IEMBRANCHED_F_ZERO UINT8_C(0x20)
1324/** @} */
1325
1326
1327/**
1328 * The per-CPU IEM state.
1329 */
1330typedef struct IEMCPU
1331{
1332 /** Info status code that needs to be propagated to the IEM caller.
1333 * This cannot be passed internally, as it would complicate all success
1334 * checks within the interpreter making the code larger and almost impossible
1335 * to get right. Instead, we'll store status codes to pass on here. Each
1336 * source of these codes will perform appropriate sanity checks. */
1337 int32_t rcPassUp; /* 0x00 */
1338 /** Execution flag, IEM_F_XXX. */
1339 uint32_t fExec; /* 0x04 */
1340
1341 /** @name Decoder state.
1342 * @{ */
1343#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1344# ifdef IEM_WITH_CODE_TLB
1345 /** The offset of the next instruction byte. */
1346 uint32_t offInstrNextByte; /* 0x08 */
1347 /** The number of bytes available at pbInstrBuf for the current instruction.
1348 * This takes the max opcode length into account so that doesn't need to be
1349 * checked separately. */
1350 uint32_t cbInstrBuf; /* 0x0c */
1351 /** Pointer to the page containing RIP, user specified buffer or abOpcode.
1352 * This can be NULL if the page isn't mappable for some reason, in which
1353 * case we'll do fallback stuff.
1354 *
1355 * If we're executing an instruction from a user specified buffer,
1356 * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page
1357 * aligned pointer but pointer to the user data.
1358 *
1359 * For instructions crossing pages, this will start on the first page and be
1360 * advanced to the next page by the time we've decoded the instruction. This
1361 * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt>
1362 */
1363 uint8_t const *pbInstrBuf; /* 0x10 */
1364# if ARCH_BITS == 32
1365 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */
1366# endif
1367 /** The program counter corresponding to pbInstrBuf.
1368 * This is set to a non-canonical address when we need to invalidate it. */
1369 uint64_t uInstrBufPc; /* 0x18 */
1370 /** The guest physical address corresponding to pbInstrBuf. */
1371 RTGCPHYS GCPhysInstrBuf; /* 0x20 */
1372 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots).
1373 * This takes the CS segment limit into account. */
1374 uint16_t cbInstrBufTotal; /* 0x28 */
1375 /** Offset into pbInstrBuf of the first byte of the current instruction.
1376 * Can be negative to efficiently handle cross page instructions. */
1377 int16_t offCurInstrStart; /* 0x2a */
1378
1379 /** The prefix mask (IEM_OP_PRF_XXX). */
1380 uint32_t fPrefixes; /* 0x2c */
1381 /** The extra REX ModR/M register field bit (REX.R << 3). */
1382 uint8_t uRexReg; /* 0x30 */
1383 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1384 * (REX.B << 3). */
1385 uint8_t uRexB; /* 0x31 */
1386 /** The extra REX SIB index field bit (REX.X << 3). */
1387 uint8_t uRexIndex; /* 0x32 */
1388
1389 /** The effective segment register (X86_SREG_XXX). */
1390 uint8_t iEffSeg; /* 0x33 */
1391
1392 /** The offset of the ModR/M byte relative to the start of the instruction. */
1393 uint8_t offModRm; /* 0x34 */
1394
1395# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1396 /** The current offset into abOpcode. */
1397 uint8_t offOpcode; /* 0x35 */
1398# else
1399 uint8_t bUnused; /* 0x35 */
1400# endif
1401# else /* !IEM_WITH_CODE_TLB */
1402 /** The size of what has currently been fetched into abOpcode. */
1403 uint8_t cbOpcode; /* 0x08 */
1404 /** The current offset into abOpcode. */
1405 uint8_t offOpcode; /* 0x09 */
1406 /** The offset of the ModR/M byte relative to the start of the instruction. */
1407 uint8_t offModRm; /* 0x0a */
1408
1409 /** The effective segment register (X86_SREG_XXX). */
1410 uint8_t iEffSeg; /* 0x0b */
1411
1412 /** The prefix mask (IEM_OP_PRF_XXX). */
1413 uint32_t fPrefixes; /* 0x0c */
1414 /** The extra REX ModR/M register field bit (REX.R << 3). */
1415 uint8_t uRexReg; /* 0x10 */
1416 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1417 * (REX.B << 3). */
1418 uint8_t uRexB; /* 0x11 */
1419 /** The extra REX SIB index field bit (REX.X << 3). */
1420 uint8_t uRexIndex; /* 0x12 */
1421
1422# endif /* !IEM_WITH_CODE_TLB */
1423
1424 /** The effective operand mode. */
1425 IEMMODE enmEffOpSize; /* 0x36, 0x13 */
1426 /** The default addressing mode. */
1427 IEMMODE enmDefAddrMode; /* 0x37, 0x14 */
1428 /** The effective addressing mode. */
1429 IEMMODE enmEffAddrMode; /* 0x38, 0x15 */
1430 /** The default operand mode. */
1431 IEMMODE enmDefOpSize; /* 0x39, 0x16 */
1432
1433 /** Prefix index (VEX.pp) for two byte and three byte tables. */
1434 uint8_t idxPrefix; /* 0x3a, 0x17 */
1435 /** 3rd VEX/EVEX/XOP register.
1436 * Please use IEM_GET_EFFECTIVE_VVVV to access. */
1437 uint8_t uVex3rdReg; /* 0x3b, 0x18 */
1438 /** The VEX/EVEX/XOP length field. */
1439 uint8_t uVexLength; /* 0x3c, 0x19 */
1440 /** Additional EVEX stuff. */
1441 uint8_t fEvexStuff; /* 0x3d, 0x1a */
1442
1443# ifndef IEM_WITH_CODE_TLB
1444 /** Explicit alignment padding. */
1445 uint8_t abAlignment2a[1]; /* 0x1b */
1446# endif
1447 /** The FPU opcode (FOP). */
1448 uint16_t uFpuOpcode; /* 0x3e, 0x1c */
1449# ifndef IEM_WITH_CODE_TLB
1450 /** Explicit alignment padding. */
1451 uint8_t abAlignment2b[2]; /* 0x1e */
1452# endif
1453
1454 /** The opcode bytes. */
1455 uint8_t abOpcode[15]; /* 0x40, 0x20 */
1456 /** Explicit alignment padding. */
1457# ifdef IEM_WITH_CODE_TLB
1458 //uint8_t abAlignment2c[0x4f - 0x4f]; /* 0x4f */
1459# else
1460 uint8_t abAlignment2c[0x4f - 0x2f]; /* 0x2f */
1461# endif
1462#else /* IEM_WITH_OPAQUE_DECODER_STATE */
1463 uint8_t abOpaqueDecoder[0x4f - 0x8];
1464#endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1465 /** @} */
1466
1467
1468 /** The number of active guest memory mappings. */
1469 uint8_t cActiveMappings; /* 0x4f, 0x4f */
1470
1471 /** Records for tracking guest memory mappings. */
1472 struct
1473 {
1474 /** The address of the mapped bytes. */
1475 R3R0PTRTYPE(void *) pv;
1476 /** The access flags (IEM_ACCESS_XXX).
1477 * IEM_ACCESS_INVALID if the entry is unused. */
1478 uint32_t fAccess;
1479#if HC_ARCH_BITS == 64
1480 uint32_t u32Alignment4; /**< Alignment padding. */
1481#endif
1482 } aMemMappings[3]; /* 0x50 LB 0x30 */
1483
1484 /** Locking records for the mapped memory. */
1485 union
1486 {
1487 PGMPAGEMAPLOCK Lock;
1488 uint64_t au64Padding[2];
1489 } aMemMappingLocks[3]; /* 0x80 LB 0x30 */
1490
1491 /** Bounce buffer info.
1492 * This runs in parallel to aMemMappings. */
1493 struct
1494 {
1495 /** The physical address of the first byte. */
1496 RTGCPHYS GCPhysFirst;
1497 /** The physical address of the second page. */
1498 RTGCPHYS GCPhysSecond;
1499 /** The number of bytes in the first page. */
1500 uint16_t cbFirst;
1501 /** The number of bytes in the second page. */
1502 uint16_t cbSecond;
1503 /** Whether it's unassigned memory. */
1504 bool fUnassigned;
1505 /** Explicit alignment padding. */
1506 bool afAlignment5[3];
1507 } aMemBbMappings[3]; /* 0xb0 LB 0x48 */
1508
1509 /** The flags of the current exception / interrupt. */
1510 uint32_t fCurXcpt; /* 0xf8 */
1511 /** The current exception / interrupt. */
1512 uint8_t uCurXcpt; /* 0xfc */
1513 /** Exception / interrupt recursion depth. */
1514 int8_t cXcptRecursions; /* 0xfb */
1515
1516 /** The next unused mapping index.
1517 * @todo try find room for this up with cActiveMappings. */
1518 uint8_t iNextMapping; /* 0xfd */
1519 uint8_t abAlignment7[1];
1520
1521 /** Bounce buffer storage.
1522 * This runs in parallel to aMemMappings and aMemBbMappings. */
1523 struct
1524 {
1525 uint8_t ab[512];
1526 } aBounceBuffers[3]; /* 0x100 LB 0x600 */
1527
1528
1529 /** Pointer set jump buffer - ring-3 context. */
1530 R3PTRTYPE(jmp_buf *) pJmpBufR3;
1531 /** Pointer set jump buffer - ring-0 context. */
1532 R0PTRTYPE(jmp_buf *) pJmpBufR0;
1533
1534 /** @todo Should move this near @a fCurXcpt later. */
1535 /** The CR2 for the current exception / interrupt. */
1536 uint64_t uCurXcptCr2;
1537 /** The error code for the current exception / interrupt. */
1538 uint32_t uCurXcptErr;
1539
1540 /** @name Statistics
1541 * @{ */
1542 /** The number of instructions we've executed. */
1543 uint32_t cInstructions;
1544 /** The number of potential exits. */
1545 uint32_t cPotentialExits;
1546 /** The number of bytes data or stack written (mostly for IEMExecOneEx).
1547 * This may contain uncommitted writes. */
1548 uint32_t cbWritten;
1549 /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
1550 uint32_t cRetInstrNotImplemented;
1551 /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
1552 uint32_t cRetAspectNotImplemented;
1553 /** Counts informational statuses returned (other than VINF_SUCCESS). */
1554 uint32_t cRetInfStatuses;
1555 /** Counts other error statuses returned. */
1556 uint32_t cRetErrStatuses;
1557 /** Number of times rcPassUp has been used. */
1558 uint32_t cRetPassUpStatus;
1559 /** Number of times RZ left with instruction commit pending for ring-3. */
1560 uint32_t cPendingCommit;
1561 /** Number of long jumps. */
1562 uint32_t cLongJumps;
1563 /** @} */
1564
1565 /** @name Target CPU information.
1566 * @{ */
1567#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1568 /** The target CPU. */
1569 uint8_t uTargetCpu;
1570#else
1571 uint8_t bTargetCpuPadding;
1572#endif
1573 /** For selecting assembly works matching the target CPU EFLAGS behaviour, see
1574 * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no
1575 * native host support and the 2nd for when there is.
1576 *
1577 * The two values are typically indexed by a g_CpumHostFeatures bit.
1578 *
1579 * This is for instance used for the BSF & BSR instructions where AMD and
1580 * Intel CPUs produce different EFLAGS. */
1581 uint8_t aidxTargetCpuEflFlavour[2];
1582
1583 /** The CPU vendor. */
1584 CPUMCPUVENDOR enmCpuVendor;
1585 /** @} */
1586
1587 /** @name Host CPU information.
1588 * @{ */
1589 /** The CPU vendor. */
1590 CPUMCPUVENDOR enmHostCpuVendor;
1591 /** @} */
1592
1593 /** Counts RDMSR \#GP(0) LogRel(). */
1594 uint8_t cLogRelRdMsr;
1595 /** Counts WRMSR \#GP(0) LogRel(). */
1596 uint8_t cLogRelWrMsr;
1597 /** Alignment padding. */
1598 uint8_t abAlignment9[46];
1599
1600 /** @name Recompilation
1601 * @{ */
1602 /** Pointer to the current translation block.
1603 * This can either be one being executed or one being compiled. */
1604 R3PTRTYPE(PIEMTB) pCurTbR3;
1605 /** Fixed TB used for threaded recompilation.
1606 * This is allocated once with maxed-out sizes and re-used afterwards. */
1607 R3PTRTYPE(PIEMTB) pThrdCompileTbR3;
1608 /** Pointer to the ring-3 TB cache for this EMT. */
1609 R3PTRTYPE(PIEMTBCACHE) pTbCacheR3;
1610 /** The PC (RIP) at the start of pCurTbR3/pCurTbR0.
1611 * The TBs are based on physical addresses, so this is needed to correleated
1612 * RIP to opcode bytes stored in the TB (AMD-V / VT-x). */
1613 uint64_t uCurTbStartPc;
1614 /** Number of threaded TBs executed. */
1615 uint64_t cTbExecThreaded;
1616 /** Number of native TBs executed. */
1617 uint64_t cTbExecNative;
1618 /** Whether we need to check the opcode bytes for the current instruction.
1619 * This is set by a previous instruction if it modified memory or similar. */
1620 bool fTbCheckOpcodes;
1621 /** Indicates whether and how we just branched - IEMBRANCHED_F_XXX. */
1622 uint8_t fTbBranched;
1623 /** Set when GCPhysInstrBuf is updated because of a page crossing. */
1624 bool fTbCrossedPage;
1625 /** Whether to end the current TB. */
1626 bool fEndTb;
1627 /** Number of instructions before we need emit an IRQ check call again.
1628 * This helps making sure we don't execute too long w/o checking for
1629 * interrupts and immediately following instructions that may enable
1630 * interrupts (e.g. POPF, IRET, STI). With STI an additional hack is
1631 * required to make sure we check following the next instruction as well, see
1632 * fTbCurInstrIsSti. */
1633 uint8_t cInstrTillIrqCheck;
1634 /** Indicates that the current instruction is an STI. This is set by the
1635 * iemCImpl_sti code and subsequently cleared by the recompiler. */
1636 bool fTbCurInstrIsSti;
1637 /** The size of the IEMTB::pabOpcodes allocation in pThrdCompileTbR3. */
1638 uint16_t cbOpcodesAllocated;
1639 /** Spaced reserved for recompiler data / alignment. */
1640 bool afRecompilerStuff1[4];
1641 /** The virtual sync time at the last timer poll call. */
1642 uint32_t msRecompilerPollNow;
1643 /** The IEM_CIMPL_F_XXX mask for the current instruction. */
1644 uint32_t fTbCurInstr;
1645 /** The IEM_CIMPL_F_XXX mask for the previous instruction. */
1646 uint32_t fTbPrevInstr;
1647 /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set. */
1648 RTGCPHYS GCPhysInstrBufPrev;
1649 /** Copy of IEMCPU::GCPhysInstrBuf after decoding a branch instruction.
1650 * This is used together with fTbBranched and GCVirtTbBranchSrcBuf to determin
1651 * whether a branch instruction jumps to a new page or stays within the
1652 * current one. */
1653 RTGCPHYS GCPhysTbBranchSrcBuf;
1654 /** Copy of IEMCPU::uInstrBufPc after decoding a branch instruction. */
1655 uint64_t GCVirtTbBranchSrcBuf;
1656 /** Pointer to the ring-3 TB allocator for this EMT. */
1657 R3PTRTYPE(PIEMTBALLOCATOR) pTbAllocatorR3;
1658 /** Pointer to the ring-3 executable memory allocator for this EMT. */
1659 R3PTRTYPE(struct IEMEXECMEMALLOCATOR *) pExecMemAllocatorR3;
1660 /** Pointer to the native recompiler state for ring-3. */
1661 R3PTRTYPE(struct IEMRECOMPILERSTATE *) pNativeRecompilerStateR3;
1662 /** Alignment padding. */
1663 uint64_t auAlignment10[4];
1664 /** Statistics: Times TB execution was broken off before reaching the end. */
1665 STAMCOUNTER StatTbExecBreaks;
1666 /** Statistics: Times BltIn_CheckIrq breaks out of the TB. */
1667 STAMCOUNTER StatCheckIrqBreaks;
1668 /** Statistics: Times BltIn_CheckMode breaks out of the TB. */
1669 STAMCOUNTER StatCheckModeBreaks;
1670 /** Statistics: Times a post jump target check missed and had to find new TB. */
1671 STAMCOUNTER StatCheckBranchMisses;
1672 /** Statistics: Times a jump or page crossing required a TB with CS.LIM checking. */
1673 STAMCOUNTER StatCheckNeedCsLimChecking;
1674 /** Threaded TB statistics: Number of instructions per TB. */
1675 STAMPROFILE StatTbThreadedInstr;
1676 /** Threaded TB statistics: Number of calls per TB. */
1677 STAMPROFILE StatTbThreadedCalls;
1678 /** Native TB statistics: Native code size per TB. */
1679 STAMPROFILE StatTbNativeCode;
1680 /** Native TB statistics: Profiling native recompilation. */
1681 STAMPROFILE StatNativeRecompilation;
1682 /** @} */
1683
1684 /** Data TLB.
1685 * @remarks Must be 64-byte aligned. */
1686 IEMTLB DataTlb;
1687 /** Instruction TLB.
1688 * @remarks Must be 64-byte aligned. */
1689 IEMTLB CodeTlb;
1690
1691 /** Exception statistics. */
1692 STAMCOUNTER aStatXcpts[32];
1693 /** Interrupt statistics. */
1694 uint32_t aStatInts[256];
1695
1696#if defined(VBOX_WITH_STATISTICS) && !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
1697 /** Instruction statistics for ring-0/raw-mode. */
1698 IEMINSTRSTATS StatsRZ;
1699 /** Instruction statistics for ring-3. */
1700 IEMINSTRSTATS StatsR3;
1701#endif
1702} IEMCPU;
1703AssertCompileMemberOffset(IEMCPU, cActiveMappings, 0x4f);
1704AssertCompileMemberAlignment(IEMCPU, aMemMappings, 16);
1705AssertCompileMemberAlignment(IEMCPU, aMemMappingLocks, 16);
1706AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64);
1707AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
1708AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
1709
1710/** Pointer to the per-CPU IEM state. */
1711typedef IEMCPU *PIEMCPU;
1712/** Pointer to the const per-CPU IEM state. */
1713typedef IEMCPU const *PCIEMCPU;
1714
1715
1716/** @def IEM_GET_CTX
1717 * Gets the guest CPU context for the calling EMT.
1718 * @returns PCPUMCTX
1719 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1720 */
1721#define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
1722
1723/** @def IEM_CTX_ASSERT
1724 * Asserts that the @a a_fExtrnMbz is present in the CPU context.
1725 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1726 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
1727 */
1728#define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) \
1729 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
1730 ("fExtrn=%#RX64 & fExtrnMbz=%#RX64 -> %#RX64\n", \
1731 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz), (a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz) ))
1732
1733/** @def IEM_CTX_IMPORT_RET
1734 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
1735 *
1736 * Will call the keep to import the bits as needed.
1737 *
1738 * Returns on import failure.
1739 *
1740 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1741 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
1742 */
1743#define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \
1744 do { \
1745 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1746 { /* likely */ } \
1747 else \
1748 { \
1749 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1750 AssertRCReturn(rcCtxImport, rcCtxImport); \
1751 } \
1752 } while (0)
1753
1754/** @def IEM_CTX_IMPORT_NORET
1755 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
1756 *
1757 * Will call the keep to import the bits as needed.
1758 *
1759 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1760 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
1761 */
1762#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \
1763 do { \
1764 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1765 { /* likely */ } \
1766 else \
1767 { \
1768 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1769 AssertLogRelRC(rcCtxImport); \
1770 } \
1771 } while (0)
1772
1773/** @def IEM_CTX_IMPORT_JMP
1774 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
1775 *
1776 * Will call the keep to import the bits as needed.
1777 *
1778 * Jumps on import failure.
1779 *
1780 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1781 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
1782 */
1783#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \
1784 do { \
1785 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1786 { /* likely */ } \
1787 else \
1788 { \
1789 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1790 AssertRCStmt(rcCtxImport, IEM_DO_LONGJMP(pVCpu, rcCtxImport)); \
1791 } \
1792 } while (0)
1793
1794
1795
1796/** @def IEM_GET_TARGET_CPU
1797 * Gets the current IEMTARGETCPU value.
1798 * @returns IEMTARGETCPU value.
1799 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1800 */
1801#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
1802# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
1803#else
1804# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
1805#endif
1806
1807/** @def IEM_GET_INSTR_LEN
1808 * Gets the instruction length. */
1809#ifdef IEM_WITH_CODE_TLB
1810# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(int32_t)(a_pVCpu)->iem.s.offCurInstrStart)
1811#else
1812# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offOpcode)
1813#endif
1814
1815/** @def IEM_TRY_SETJMP
1816 * Wrapper around setjmp / try, hiding all the ugly differences.
1817 *
1818 * @note Use with extreme care as this is a fragile macro.
1819 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1820 * @param a_rcTarget The variable that should receive the status code in case
1821 * of a longjmp/throw.
1822 */
1823/** @def IEM_TRY_SETJMP_AGAIN
1824 * For when setjmp / try is used again in the same variable scope as a previous
1825 * IEM_TRY_SETJMP invocation.
1826 */
1827/** @def IEM_CATCH_LONGJMP_BEGIN
1828 * Start wrapper for catch / setjmp-else.
1829 *
1830 * This will set up a scope.
1831 *
1832 * @note Use with extreme care as this is a fragile macro.
1833 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1834 * @param a_rcTarget The variable that should receive the status code in case
1835 * of a longjmp/throw.
1836 */
1837/** @def IEM_CATCH_LONGJMP_END
1838 * End wrapper for catch / setjmp-else.
1839 *
1840 * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
1841 * state.
1842 *
1843 * @note Use with extreme care as this is a fragile macro.
1844 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1845 */
1846#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
1847# ifdef IEM_WITH_THROW_CATCH
1848# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
1849 a_rcTarget = VINF_SUCCESS; \
1850 try
1851# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
1852 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
1853# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
1854 catch (int rcThrown) \
1855 { \
1856 a_rcTarget = rcThrown
1857# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
1858 } \
1859 ((void)0)
1860# else /* !IEM_WITH_THROW_CATCH */
1861# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
1862 jmp_buf JmpBuf; \
1863 jmp_buf * volatile pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
1864 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
1865 if ((rcStrict = setjmp(JmpBuf)) == 0)
1866# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
1867 pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
1868 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
1869 if ((rcStrict = setjmp(JmpBuf)) == 0)
1870# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
1871 else \
1872 { \
1873 ((void)0)
1874# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
1875 } \
1876 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
1877# endif /* !IEM_WITH_THROW_CATCH */
1878#endif /* IEM_WITH_SETJMP */
1879
1880
1881/**
1882 * Shared per-VM IEM data.
1883 */
1884typedef struct IEM
1885{
1886 /** The VMX APIC-access page handler type. */
1887 PGMPHYSHANDLERTYPE hVmxApicAccessPage;
1888#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
1889 /** Set if the CPUID host call functionality is enabled. */
1890 bool fCpuIdHostCall;
1891#endif
1892} IEM;
1893
1894
1895
1896/** @name IEM_ACCESS_XXX - Access details.
1897 * @{ */
1898#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
1899#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
1900#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
1901#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
1902#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
1903#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
1904#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
1905#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
1906#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
1907#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
1908/** The writes are partial, so if initialize the bounce buffer with the
1909 * orignal RAM content. */
1910#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
1911/** Used in aMemMappings to indicate that the entry is bounce buffered. */
1912#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
1913/** Bounce buffer with ring-3 write pending, first page. */
1914#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
1915/** Bounce buffer with ring-3 write pending, second page. */
1916#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
1917/** Not locked, accessed via the TLB. */
1918#define IEM_ACCESS_NOT_LOCKED UINT32_C(0x00001000)
1919/** Valid bit mask. */
1920#define IEM_ACCESS_VALID_MASK UINT32_C(0x00001fff)
1921/** Shift count for the TLB flags (upper word). */
1922#define IEM_ACCESS_SHIFT_TLB_FLAGS 16
1923
1924/** Read+write data alias. */
1925#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
1926/** Write data alias. */
1927#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
1928/** Read data alias. */
1929#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
1930/** Instruction fetch alias. */
1931#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
1932/** Stack write alias. */
1933#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
1934/** Stack read alias. */
1935#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
1936/** Stack read+write alias. */
1937#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
1938/** Read system table alias. */
1939#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
1940/** Read+write system table alias. */
1941#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
1942/** @} */
1943
1944/** @name Prefix constants (IEMCPU::fPrefixes)
1945 * @{ */
1946#define IEM_OP_PRF_SEG_CS RT_BIT_32(0) /**< CS segment prefix (0x2e). */
1947#define IEM_OP_PRF_SEG_SS RT_BIT_32(1) /**< SS segment prefix (0x36). */
1948#define IEM_OP_PRF_SEG_DS RT_BIT_32(2) /**< DS segment prefix (0x3e). */
1949#define IEM_OP_PRF_SEG_ES RT_BIT_32(3) /**< ES segment prefix (0x26). */
1950#define IEM_OP_PRF_SEG_FS RT_BIT_32(4) /**< FS segment prefix (0x64). */
1951#define IEM_OP_PRF_SEG_GS RT_BIT_32(5) /**< GS segment prefix (0x65). */
1952#define IEM_OP_PRF_SEG_MASK UINT32_C(0x3f)
1953
1954#define IEM_OP_PRF_SIZE_OP RT_BIT_32(8) /**< Operand size prefix (0x66). */
1955#define IEM_OP_PRF_SIZE_REX_W RT_BIT_32(9) /**< REX.W prefix (0x48-0x4f). */
1956#define IEM_OP_PRF_SIZE_ADDR RT_BIT_32(10) /**< Address size prefix (0x67). */
1957
1958#define IEM_OP_PRF_LOCK RT_BIT_32(16) /**< Lock prefix (0xf0). */
1959#define IEM_OP_PRF_REPNZ RT_BIT_32(17) /**< Repeat-not-zero prefix (0xf2). */
1960#define IEM_OP_PRF_REPZ RT_BIT_32(18) /**< Repeat-if-zero prefix (0xf3). */
1961
1962#define IEM_OP_PRF_REX RT_BIT_32(24) /**< Any REX prefix (0x40-0x4f). */
1963#define IEM_OP_PRF_REX_R RT_BIT_32(25) /**< REX.R prefix (0x44,0x45,0x46,0x47,0x4c,0x4d,0x4e,0x4f). */
1964#define IEM_OP_PRF_REX_B RT_BIT_32(26) /**< REX.B prefix (0x41,0x43,0x45,0x47,0x49,0x4b,0x4d,0x4f). */
1965#define IEM_OP_PRF_REX_X RT_BIT_32(27) /**< REX.X prefix (0x42,0x43,0x46,0x47,0x4a,0x4b,0x4e,0x4f). */
1966/** Mask with all the REX prefix flags.
1967 * This is generally for use when needing to undo the REX prefixes when they
1968 * are followed legacy prefixes and therefore does not immediately preceed
1969 * the first opcode byte.
1970 * For testing whether any REX prefix is present, use IEM_OP_PRF_REX instead. */
1971#define IEM_OP_PRF_REX_MASK (IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W )
1972
1973#define IEM_OP_PRF_VEX RT_BIT_32(28) /**< Indiciates VEX prefix. */
1974#define IEM_OP_PRF_EVEX RT_BIT_32(29) /**< Indiciates EVEX prefix. */
1975#define IEM_OP_PRF_XOP RT_BIT_32(30) /**< Indiciates XOP prefix. */
1976/** @} */
1977
1978/** @name IEMOPFORM_XXX - Opcode forms
1979 * @note These are ORed together with IEMOPHINT_XXX.
1980 * @{ */
1981/** ModR/M: reg, r/m */
1982#define IEMOPFORM_RM 0
1983/** ModR/M: reg, r/m (register) */
1984#define IEMOPFORM_RM_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
1985/** ModR/M: reg, r/m (memory) */
1986#define IEMOPFORM_RM_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
1987/** ModR/M: reg, r/m */
1988#define IEMOPFORM_RMI 1
1989/** ModR/M: reg, r/m (register) */
1990#define IEMOPFORM_RMI_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
1991/** ModR/M: reg, r/m (memory) */
1992#define IEMOPFORM_RMI_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
1993/** ModR/M: r/m, reg */
1994#define IEMOPFORM_MR 2
1995/** ModR/M: r/m (register), reg */
1996#define IEMOPFORM_MR_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
1997/** ModR/M: r/m (memory), reg */
1998#define IEMOPFORM_MR_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
1999/** ModR/M: r/m, reg */
2000#define IEMOPFORM_MRI 3
2001/** ModR/M: r/m (register), reg */
2002#define IEMOPFORM_MRI_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
2003/** ModR/M: r/m (memory), reg */
2004#define IEMOPFORM_MRI_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
2005/** ModR/M: r/m only */
2006#define IEMOPFORM_M 4
2007/** ModR/M: r/m only (register). */
2008#define IEMOPFORM_M_REG (IEMOPFORM_M | IEMOPFORM_MOD3)
2009/** ModR/M: r/m only (memory). */
2010#define IEMOPFORM_M_MEM (IEMOPFORM_M | IEMOPFORM_NOT_MOD3)
2011/** ModR/M: reg only */
2012#define IEMOPFORM_R 5
2013
2014/** VEX+ModR/M: reg, r/m */
2015#define IEMOPFORM_VEX_RM 8
2016/** VEX+ModR/M: reg, r/m (register) */
2017#define IEMOPFORM_VEX_RM_REG (IEMOPFORM_VEX_RM | IEMOPFORM_MOD3)
2018/** VEX+ModR/M: reg, r/m (memory) */
2019#define IEMOPFORM_VEX_RM_MEM (IEMOPFORM_VEX_RM | IEMOPFORM_NOT_MOD3)
2020/** VEX+ModR/M: r/m, reg */
2021#define IEMOPFORM_VEX_MR 9
2022/** VEX+ModR/M: r/m (register), reg */
2023#define IEMOPFORM_VEX_MR_REG (IEMOPFORM_VEX_MR | IEMOPFORM_MOD3)
2024/** VEX+ModR/M: r/m (memory), reg */
2025#define IEMOPFORM_VEX_MR_MEM (IEMOPFORM_VEX_MR | IEMOPFORM_NOT_MOD3)
2026/** VEX+ModR/M: r/m only */
2027#define IEMOPFORM_VEX_M 10
2028/** VEX+ModR/M: r/m only (register). */
2029#define IEMOPFORM_VEX_M_REG (IEMOPFORM_VEX_M | IEMOPFORM_MOD3)
2030/** VEX+ModR/M: r/m only (memory). */
2031#define IEMOPFORM_VEX_M_MEM (IEMOPFORM_VEX_M | IEMOPFORM_NOT_MOD3)
2032/** VEX+ModR/M: reg only */
2033#define IEMOPFORM_VEX_R 11
2034/** VEX+ModR/M: reg, vvvv, r/m */
2035#define IEMOPFORM_VEX_RVM 12
2036/** VEX+ModR/M: reg, vvvv, r/m (register). */
2037#define IEMOPFORM_VEX_RVM_REG (IEMOPFORM_VEX_RVM | IEMOPFORM_MOD3)
2038/** VEX+ModR/M: reg, vvvv, r/m (memory). */
2039#define IEMOPFORM_VEX_RVM_MEM (IEMOPFORM_VEX_RVM | IEMOPFORM_NOT_MOD3)
2040/** VEX+ModR/M: reg, r/m, vvvv */
2041#define IEMOPFORM_VEX_RMV 13
2042/** VEX+ModR/M: reg, r/m, vvvv (register). */
2043#define IEMOPFORM_VEX_RMV_REG (IEMOPFORM_VEX_RMV | IEMOPFORM_MOD3)
2044/** VEX+ModR/M: reg, r/m, vvvv (memory). */
2045#define IEMOPFORM_VEX_RMV_MEM (IEMOPFORM_VEX_RMV | IEMOPFORM_NOT_MOD3)
2046/** VEX+ModR/M: reg, r/m, imm8 */
2047#define IEMOPFORM_VEX_RMI 14
2048/** VEX+ModR/M: reg, r/m, imm8 (register). */
2049#define IEMOPFORM_VEX_RMI_REG (IEMOPFORM_VEX_RMI | IEMOPFORM_MOD3)
2050/** VEX+ModR/M: reg, r/m, imm8 (memory). */
2051#define IEMOPFORM_VEX_RMI_MEM (IEMOPFORM_VEX_RMI | IEMOPFORM_NOT_MOD3)
2052/** VEX+ModR/M: r/m, vvvv, reg */
2053#define IEMOPFORM_VEX_MVR 15
2054/** VEX+ModR/M: r/m, vvvv, reg (register) */
2055#define IEMOPFORM_VEX_MVR_REG (IEMOPFORM_VEX_MVR | IEMOPFORM_MOD3)
2056/** VEX+ModR/M: r/m, vvvv, reg (memory) */
2057#define IEMOPFORM_VEX_MVR_MEM (IEMOPFORM_VEX_MVR | IEMOPFORM_NOT_MOD3)
2058/** VEX+ModR/M+/n: vvvv, r/m */
2059#define IEMOPFORM_VEX_VM 16
2060/** VEX+ModR/M+/n: vvvv, r/m (register) */
2061#define IEMOPFORM_VEX_VM_REG (IEMOPFORM_VEX_VM | IEMOPFORM_MOD3)
2062/** VEX+ModR/M+/n: vvvv, r/m (memory) */
2063#define IEMOPFORM_VEX_VM_MEM (IEMOPFORM_VEX_VM | IEMOPFORM_NOT_MOD3)
2064
2065/** Fixed register instruction, no R/M. */
2066#define IEMOPFORM_FIXED 32
2067
2068/** The r/m is a register. */
2069#define IEMOPFORM_MOD3 RT_BIT_32(8)
2070/** The r/m is a memory access. */
2071#define IEMOPFORM_NOT_MOD3 RT_BIT_32(9)
2072/** @} */
2073
2074/** @name IEMOPHINT_XXX - Additional Opcode Hints
2075 * @note These are ORed together with IEMOPFORM_XXX.
2076 * @{ */
2077/** Ignores the operand size prefix (66h). */
2078#define IEMOPHINT_IGNORES_OZ_PFX RT_BIT_32(10)
2079/** Ignores REX.W (aka WIG). */
2080#define IEMOPHINT_IGNORES_REXW RT_BIT_32(11)
2081/** Both the operand size prefixes (66h + REX.W) are ignored. */
2082#define IEMOPHINT_IGNORES_OP_SIZES (IEMOPHINT_IGNORES_OZ_PFX | IEMOPHINT_IGNORES_REXW)
2083/** Allowed with the lock prefix. */
2084#define IEMOPHINT_LOCK_ALLOWED RT_BIT_32(11)
2085/** The VEX.L value is ignored (aka LIG). */
2086#define IEMOPHINT_VEX_L_IGNORED RT_BIT_32(12)
2087/** The VEX.L value must be zero (i.e. 128-bit width only). */
2088#define IEMOPHINT_VEX_L_ZERO RT_BIT_32(13)
2089/** The VEX.V value must be zero. */
2090#define IEMOPHINT_VEX_V_ZERO RT_BIT_32(14)
2091
2092/** Hint to IEMAllInstructionPython.py that this macro should be skipped. */
2093#define IEMOPHINT_SKIP_PYTHON RT_BIT_32(31)
2094/** @} */
2095
2096/**
2097 * Possible hardware task switch sources.
2098 */
2099typedef enum IEMTASKSWITCH
2100{
2101 /** Task switch caused by an interrupt/exception. */
2102 IEMTASKSWITCH_INT_XCPT = 1,
2103 /** Task switch caused by a far CALL. */
2104 IEMTASKSWITCH_CALL,
2105 /** Task switch caused by a far JMP. */
2106 IEMTASKSWITCH_JUMP,
2107 /** Task switch caused by an IRET. */
2108 IEMTASKSWITCH_IRET
2109} IEMTASKSWITCH;
2110AssertCompileSize(IEMTASKSWITCH, 4);
2111
2112/**
2113 * Possible CrX load (write) sources.
2114 */
2115typedef enum IEMACCESSCRX
2116{
2117 /** CrX access caused by 'mov crX' instruction. */
2118 IEMACCESSCRX_MOV_CRX,
2119 /** CrX (CR0) write caused by 'lmsw' instruction. */
2120 IEMACCESSCRX_LMSW,
2121 /** CrX (CR0) write caused by 'clts' instruction. */
2122 IEMACCESSCRX_CLTS,
2123 /** CrX (CR0) read caused by 'smsw' instruction. */
2124 IEMACCESSCRX_SMSW
2125} IEMACCESSCRX;
2126
2127#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2128/** @name IEM_SLAT_FAIL_XXX - Second-level address translation failure information.
2129 *
2130 * These flags provide further context to SLAT page-walk failures that could not be
2131 * determined by PGM (e.g, PGM is not privy to memory access permissions).
2132 *
2133 * @{
2134 */
2135/** Translating a nested-guest linear address failed accessing a nested-guest
2136 * physical address. */
2137# define IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR RT_BIT_32(0)
2138/** Translating a nested-guest linear address failed accessing a
2139 * paging-structure entry or updating accessed/dirty bits. */
2140# define IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE RT_BIT_32(1)
2141/** @} */
2142
2143DECLCALLBACK(FNPGMPHYSHANDLER) iemVmxApicAccessPageHandler;
2144# ifndef IN_RING3
2145DECLCALLBACK(FNPGMRZPHYSPFHANDLER) iemVmxApicAccessPagePfHandler;
2146# endif
2147#endif
2148
2149/**
2150 * Indicates to the verifier that the given flag set is undefined.
2151 *
2152 * Can be invoked again to add more flags.
2153 *
2154 * This is a NOOP if the verifier isn't compiled in.
2155 *
2156 * @note We're temporarily keeping this until code is converted to new
2157 * disassembler style opcode handling.
2158 */
2159#define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0)
2160
2161
2162/** @def IEM_DECL_IMPL_TYPE
2163 * For typedef'ing an instruction implementation function.
2164 *
2165 * @param a_RetType The return type.
2166 * @param a_Name The name of the type.
2167 * @param a_ArgList The argument list enclosed in parentheses.
2168 */
2169
2170/** @def IEM_DECL_IMPL_DEF
2171 * For defining an instruction implementation function.
2172 *
2173 * @param a_RetType The return type.
2174 * @param a_Name The name of the type.
2175 * @param a_ArgList The argument list enclosed in parentheses.
2176 */
2177
2178#if defined(__GNUC__) && defined(RT_ARCH_X86)
2179# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2180 __attribute__((__fastcall__)) a_RetType (a_Name) a_ArgList
2181# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2182 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2183# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2184 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2185
2186#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
2187# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2188 a_RetType (__fastcall a_Name) a_ArgList
2189# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2190 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2191# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2192 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2193
2194#elif __cplusplus >= 201700 /* P0012R1 support */
2195# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2196 a_RetType (VBOXCALL a_Name) a_ArgList RT_NOEXCEPT
2197# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2198 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2199# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2200 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2201
2202#else
2203# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2204 a_RetType (VBOXCALL a_Name) a_ArgList
2205# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2206 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2207# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2208 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2209
2210#endif
2211
2212/** Defined in IEMAllAImplC.cpp but also used by IEMAllAImplA.asm. */
2213RT_C_DECLS_BEGIN
2214extern uint8_t const g_afParity[256];
2215RT_C_DECLS_END
2216
2217
2218/** @name Arithmetic assignment operations on bytes (binary).
2219 * @{ */
2220typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU8, (uint8_t *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
2221typedef FNIEMAIMPLBINU8 *PFNIEMAIMPLBINU8;
2222FNIEMAIMPLBINU8 iemAImpl_add_u8, iemAImpl_add_u8_locked;
2223FNIEMAIMPLBINU8 iemAImpl_adc_u8, iemAImpl_adc_u8_locked;
2224FNIEMAIMPLBINU8 iemAImpl_sub_u8, iemAImpl_sub_u8_locked;
2225FNIEMAIMPLBINU8 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked;
2226FNIEMAIMPLBINU8 iemAImpl_or_u8, iemAImpl_or_u8_locked;
2227FNIEMAIMPLBINU8 iemAImpl_xor_u8, iemAImpl_xor_u8_locked;
2228FNIEMAIMPLBINU8 iemAImpl_and_u8, iemAImpl_and_u8_locked;
2229/** @} */
2230
2231/** @name Arithmetic assignment operations on words (binary).
2232 * @{ */
2233typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU16, (uint16_t *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
2234typedef FNIEMAIMPLBINU16 *PFNIEMAIMPLBINU16;
2235FNIEMAIMPLBINU16 iemAImpl_add_u16, iemAImpl_add_u16_locked;
2236FNIEMAIMPLBINU16 iemAImpl_adc_u16, iemAImpl_adc_u16_locked;
2237FNIEMAIMPLBINU16 iemAImpl_sub_u16, iemAImpl_sub_u16_locked;
2238FNIEMAIMPLBINU16 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked;
2239FNIEMAIMPLBINU16 iemAImpl_or_u16, iemAImpl_or_u16_locked;
2240FNIEMAIMPLBINU16 iemAImpl_xor_u16, iemAImpl_xor_u16_locked;
2241FNIEMAIMPLBINU16 iemAImpl_and_u16, iemAImpl_and_u16_locked;
2242/** @} */
2243
2244/** @name Arithmetic assignment operations on double words (binary).
2245 * @{ */
2246typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU32, (uint32_t *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
2247typedef FNIEMAIMPLBINU32 *PFNIEMAIMPLBINU32;
2248FNIEMAIMPLBINU32 iemAImpl_add_u32, iemAImpl_add_u32_locked;
2249FNIEMAIMPLBINU32 iemAImpl_adc_u32, iemAImpl_adc_u32_locked;
2250FNIEMAIMPLBINU32 iemAImpl_sub_u32, iemAImpl_sub_u32_locked;
2251FNIEMAIMPLBINU32 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked;
2252FNIEMAIMPLBINU32 iemAImpl_or_u32, iemAImpl_or_u32_locked;
2253FNIEMAIMPLBINU32 iemAImpl_xor_u32, iemAImpl_xor_u32_locked;
2254FNIEMAIMPLBINU32 iemAImpl_and_u32, iemAImpl_and_u32_locked;
2255FNIEMAIMPLBINU32 iemAImpl_blsi_u32, iemAImpl_blsi_u32_fallback;
2256FNIEMAIMPLBINU32 iemAImpl_blsr_u32, iemAImpl_blsr_u32_fallback;
2257FNIEMAIMPLBINU32 iemAImpl_blsmsk_u32, iemAImpl_blsmsk_u32_fallback;
2258/** @} */
2259
2260/** @name Arithmetic assignment operations on quad words (binary).
2261 * @{ */
2262typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU64, (uint64_t *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
2263typedef FNIEMAIMPLBINU64 *PFNIEMAIMPLBINU64;
2264FNIEMAIMPLBINU64 iemAImpl_add_u64, iemAImpl_add_u64_locked;
2265FNIEMAIMPLBINU64 iemAImpl_adc_u64, iemAImpl_adc_u64_locked;
2266FNIEMAIMPLBINU64 iemAImpl_sub_u64, iemAImpl_sub_u64_locked;
2267FNIEMAIMPLBINU64 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked;
2268FNIEMAIMPLBINU64 iemAImpl_or_u64, iemAImpl_or_u64_locked;
2269FNIEMAIMPLBINU64 iemAImpl_xor_u64, iemAImpl_xor_u64_locked;
2270FNIEMAIMPLBINU64 iemAImpl_and_u64, iemAImpl_and_u64_locked;
2271FNIEMAIMPLBINU64 iemAImpl_blsi_u64, iemAImpl_blsi_u64_fallback;
2272FNIEMAIMPLBINU64 iemAImpl_blsr_u64, iemAImpl_blsr_u64_fallback;
2273FNIEMAIMPLBINU64 iemAImpl_blsmsk_u64, iemAImpl_blsmsk_u64_fallback;
2274/** @} */
2275
2276typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU8,(uint8_t const *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
2277typedef FNIEMAIMPLBINROU8 *PFNIEMAIMPLBINROU8;
2278typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU16,(uint16_t const *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
2279typedef FNIEMAIMPLBINROU16 *PFNIEMAIMPLBINROU16;
2280typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU32,(uint32_t const *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
2281typedef FNIEMAIMPLBINROU32 *PFNIEMAIMPLBINROU32;
2282typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU64,(uint64_t const *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
2283typedef FNIEMAIMPLBINROU64 *PFNIEMAIMPLBINROU64;
2284
2285/** @name Compare operations (thrown in with the binary ops).
2286 * @{ */
2287FNIEMAIMPLBINROU8 iemAImpl_cmp_u8;
2288FNIEMAIMPLBINROU16 iemAImpl_cmp_u16;
2289FNIEMAIMPLBINROU32 iemAImpl_cmp_u32;
2290FNIEMAIMPLBINROU64 iemAImpl_cmp_u64;
2291/** @} */
2292
2293/** @name Test operations (thrown in with the binary ops).
2294 * @{ */
2295FNIEMAIMPLBINROU8 iemAImpl_test_u8;
2296FNIEMAIMPLBINROU16 iemAImpl_test_u16;
2297FNIEMAIMPLBINROU32 iemAImpl_test_u32;
2298FNIEMAIMPLBINROU64 iemAImpl_test_u64;
2299/** @} */
2300
2301/** @name Bit operations operations (thrown in with the binary ops).
2302 * @{ */
2303FNIEMAIMPLBINROU16 iemAImpl_bt_u16;
2304FNIEMAIMPLBINROU32 iemAImpl_bt_u32;
2305FNIEMAIMPLBINROU64 iemAImpl_bt_u64;
2306FNIEMAIMPLBINU16 iemAImpl_btc_u16, iemAImpl_btc_u16_locked;
2307FNIEMAIMPLBINU32 iemAImpl_btc_u32, iemAImpl_btc_u32_locked;
2308FNIEMAIMPLBINU64 iemAImpl_btc_u64, iemAImpl_btc_u64_locked;
2309FNIEMAIMPLBINU16 iemAImpl_btr_u16, iemAImpl_btr_u16_locked;
2310FNIEMAIMPLBINU32 iemAImpl_btr_u32, iemAImpl_btr_u32_locked;
2311FNIEMAIMPLBINU64 iemAImpl_btr_u64, iemAImpl_btr_u64_locked;
2312FNIEMAIMPLBINU16 iemAImpl_bts_u16, iemAImpl_bts_u16_locked;
2313FNIEMAIMPLBINU32 iemAImpl_bts_u32, iemAImpl_bts_u32_locked;
2314FNIEMAIMPLBINU64 iemAImpl_bts_u64, iemAImpl_bts_u64_locked;
2315/** @} */
2316
2317/** @name Arithmetic three operand operations on double words (binary).
2318 * @{ */
2319typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2, uint32_t *pEFlags));
2320typedef FNIEMAIMPLBINVEXU32 *PFNIEMAIMPLBINVEXU32;
2321FNIEMAIMPLBINVEXU32 iemAImpl_andn_u32, iemAImpl_andn_u32_fallback;
2322FNIEMAIMPLBINVEXU32 iemAImpl_bextr_u32, iemAImpl_bextr_u32_fallback;
2323FNIEMAIMPLBINVEXU32 iemAImpl_bzhi_u32, iemAImpl_bzhi_u32_fallback;
2324/** @} */
2325
2326/** @name Arithmetic three operand operations on quad words (binary).
2327 * @{ */
2328typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2, uint32_t *pEFlags));
2329typedef FNIEMAIMPLBINVEXU64 *PFNIEMAIMPLBINVEXU64;
2330FNIEMAIMPLBINVEXU64 iemAImpl_andn_u64, iemAImpl_andn_u64_fallback;
2331FNIEMAIMPLBINVEXU64 iemAImpl_bextr_u64, iemAImpl_bextr_u64_fallback;
2332FNIEMAIMPLBINVEXU64 iemAImpl_bzhi_u64, iemAImpl_bzhi_u64_fallback;
2333/** @} */
2334
2335/** @name Arithmetic three operand operations on double words w/o EFLAGS (binary).
2336 * @{ */
2337typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32NOEFL, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2));
2338typedef FNIEMAIMPLBINVEXU32NOEFL *PFNIEMAIMPLBINVEXU32NOEFL;
2339FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pdep_u32, iemAImpl_pdep_u32_fallback;
2340FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pext_u32, iemAImpl_pext_u32_fallback;
2341FNIEMAIMPLBINVEXU32NOEFL iemAImpl_sarx_u32, iemAImpl_sarx_u32_fallback;
2342FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shlx_u32, iemAImpl_shlx_u32_fallback;
2343FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shrx_u32, iemAImpl_shrx_u32_fallback;
2344FNIEMAIMPLBINVEXU32NOEFL iemAImpl_rorx_u32;
2345/** @} */
2346
2347/** @name Arithmetic three operand operations on quad words w/o EFLAGS (binary).
2348 * @{ */
2349typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64NOEFL, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2));
2350typedef FNIEMAIMPLBINVEXU64NOEFL *PFNIEMAIMPLBINVEXU64NOEFL;
2351FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pdep_u64, iemAImpl_pdep_u64_fallback;
2352FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pext_u64, iemAImpl_pext_u64_fallback;
2353FNIEMAIMPLBINVEXU64NOEFL iemAImpl_sarx_u64, iemAImpl_sarx_u64_fallback;
2354FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shlx_u64, iemAImpl_shlx_u64_fallback;
2355FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shrx_u64, iemAImpl_shrx_u64_fallback;
2356FNIEMAIMPLBINVEXU64NOEFL iemAImpl_rorx_u64;
2357/** @} */
2358
2359/** @name MULX 32-bit and 64-bit.
2360 * @{ */
2361typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU32, (uint32_t *puDst1, uint32_t *puDst2, uint32_t uSrc1, uint32_t uSrc2));
2362typedef FNIEMAIMPLMULXVEXU32 *PFNIEMAIMPLMULXVEXU32;
2363FNIEMAIMPLMULXVEXU32 iemAImpl_mulx_u32, iemAImpl_mulx_u32_fallback;
2364
2365typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU64, (uint64_t *puDst1, uint64_t *puDst2, uint64_t uSrc1, uint64_t uSrc2));
2366typedef FNIEMAIMPLMULXVEXU64 *PFNIEMAIMPLMULXVEXU64;
2367FNIEMAIMPLMULXVEXU64 iemAImpl_mulx_u64, iemAImpl_mulx_u64_fallback;
2368/** @} */
2369
2370
2371/** @name Exchange memory with register operations.
2372 * @{ */
2373IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_locked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2374IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_locked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2375IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_locked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2376IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_locked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2377IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_unlocked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2378IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_unlocked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2379IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_unlocked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2380IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_unlocked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2381/** @} */
2382
2383/** @name Exchange and add operations.
2384 * @{ */
2385IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2386IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2387IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2388IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2389IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8_locked, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2390IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16_locked,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2391IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32_locked,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2392IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64_locked,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2393/** @} */
2394
2395/** @name Compare and exchange.
2396 * @{ */
2397IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2398IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8_locked, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2399IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16, (uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2400IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16_locked,(uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2401IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32, (uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2402IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32_locked,(uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2403#if ARCH_BITS == 32
2404IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2405IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2406#else
2407IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2408IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2409#endif
2410IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2411 uint32_t *pEFlags));
2412IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b_locked,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2413 uint32_t *pEFlags));
2414IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2415 uint32_t *pEFlags));
2416IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_locked,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2417 uint32_t *pEFlags));
2418#ifndef RT_ARCH_ARM64
2419IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_fallback,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx,
2420 PRTUINT128U pu128RbxRcx, uint32_t *pEFlags));
2421#endif
2422/** @} */
2423
2424/** @name Memory ordering
2425 * @{ */
2426typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEMFENCE,(void));
2427typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE;
2428IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void));
2429IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void));
2430IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void));
2431#ifndef RT_ARCH_ARM64
2432IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void));
2433#endif
2434/** @} */
2435
2436/** @name Double precision shifts
2437 * @{ */
2438typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags));
2439typedef FNIEMAIMPLSHIFTDBLU16 *PFNIEMAIMPLSHIFTDBLU16;
2440typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU32,(uint32_t *pu32Dst, uint32_t u32Src, uint8_t cShift, uint32_t *pEFlags));
2441typedef FNIEMAIMPLSHIFTDBLU32 *PFNIEMAIMPLSHIFTDBLU32;
2442typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t cShift, uint32_t *pEFlags));
2443typedef FNIEMAIMPLSHIFTDBLU64 *PFNIEMAIMPLSHIFTDBLU64;
2444FNIEMAIMPLSHIFTDBLU16 iemAImpl_shld_u16, iemAImpl_shld_u16_amd, iemAImpl_shld_u16_intel;
2445FNIEMAIMPLSHIFTDBLU32 iemAImpl_shld_u32, iemAImpl_shld_u32_amd, iemAImpl_shld_u32_intel;
2446FNIEMAIMPLSHIFTDBLU64 iemAImpl_shld_u64, iemAImpl_shld_u64_amd, iemAImpl_shld_u64_intel;
2447FNIEMAIMPLSHIFTDBLU16 iemAImpl_shrd_u16, iemAImpl_shrd_u16_amd, iemAImpl_shrd_u16_intel;
2448FNIEMAIMPLSHIFTDBLU32 iemAImpl_shrd_u32, iemAImpl_shrd_u32_amd, iemAImpl_shrd_u32_intel;
2449FNIEMAIMPLSHIFTDBLU64 iemAImpl_shrd_u64, iemAImpl_shrd_u64_amd, iemAImpl_shrd_u64_intel;
2450/** @} */
2451
2452
2453/** @name Bit search operations (thrown in with the binary ops).
2454 * @{ */
2455FNIEMAIMPLBINU16 iemAImpl_bsf_u16, iemAImpl_bsf_u16_amd, iemAImpl_bsf_u16_intel;
2456FNIEMAIMPLBINU32 iemAImpl_bsf_u32, iemAImpl_bsf_u32_amd, iemAImpl_bsf_u32_intel;
2457FNIEMAIMPLBINU64 iemAImpl_bsf_u64, iemAImpl_bsf_u64_amd, iemAImpl_bsf_u64_intel;
2458FNIEMAIMPLBINU16 iemAImpl_bsr_u16, iemAImpl_bsr_u16_amd, iemAImpl_bsr_u16_intel;
2459FNIEMAIMPLBINU32 iemAImpl_bsr_u32, iemAImpl_bsr_u32_amd, iemAImpl_bsr_u32_intel;
2460FNIEMAIMPLBINU64 iemAImpl_bsr_u64, iemAImpl_bsr_u64_amd, iemAImpl_bsr_u64_intel;
2461FNIEMAIMPLBINU16 iemAImpl_lzcnt_u16, iemAImpl_lzcnt_u16_amd, iemAImpl_lzcnt_u16_intel;
2462FNIEMAIMPLBINU32 iemAImpl_lzcnt_u32, iemAImpl_lzcnt_u32_amd, iemAImpl_lzcnt_u32_intel;
2463FNIEMAIMPLBINU64 iemAImpl_lzcnt_u64, iemAImpl_lzcnt_u64_amd, iemAImpl_lzcnt_u64_intel;
2464FNIEMAIMPLBINU16 iemAImpl_tzcnt_u16, iemAImpl_tzcnt_u16_amd, iemAImpl_tzcnt_u16_intel;
2465FNIEMAIMPLBINU32 iemAImpl_tzcnt_u32, iemAImpl_tzcnt_u32_amd, iemAImpl_tzcnt_u32_intel;
2466FNIEMAIMPLBINU64 iemAImpl_tzcnt_u64, iemAImpl_tzcnt_u64_amd, iemAImpl_tzcnt_u64_intel;
2467FNIEMAIMPLBINU16 iemAImpl_popcnt_u16, iemAImpl_popcnt_u16_fallback;
2468FNIEMAIMPLBINU32 iemAImpl_popcnt_u32, iemAImpl_popcnt_u32_fallback;
2469FNIEMAIMPLBINU64 iemAImpl_popcnt_u64, iemAImpl_popcnt_u64_fallback;
2470/** @} */
2471
2472/** @name Signed multiplication operations (thrown in with the binary ops).
2473 * @{ */
2474FNIEMAIMPLBINU16 iemAImpl_imul_two_u16, iemAImpl_imul_two_u16_amd, iemAImpl_imul_two_u16_intel;
2475FNIEMAIMPLBINU32 iemAImpl_imul_two_u32, iemAImpl_imul_two_u32_amd, iemAImpl_imul_two_u32_intel;
2476FNIEMAIMPLBINU64 iemAImpl_imul_two_u64, iemAImpl_imul_two_u64_amd, iemAImpl_imul_two_u64_intel;
2477/** @} */
2478
2479/** @name Arithmetic assignment operations on bytes (unary).
2480 * @{ */
2481typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU8, (uint8_t *pu8Dst, uint32_t *pEFlags));
2482typedef FNIEMAIMPLUNARYU8 *PFNIEMAIMPLUNARYU8;
2483FNIEMAIMPLUNARYU8 iemAImpl_inc_u8, iemAImpl_inc_u8_locked;
2484FNIEMAIMPLUNARYU8 iemAImpl_dec_u8, iemAImpl_dec_u8_locked;
2485FNIEMAIMPLUNARYU8 iemAImpl_not_u8, iemAImpl_not_u8_locked;
2486FNIEMAIMPLUNARYU8 iemAImpl_neg_u8, iemAImpl_neg_u8_locked;
2487/** @} */
2488
2489/** @name Arithmetic assignment operations on words (unary).
2490 * @{ */
2491typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU16, (uint16_t *pu16Dst, uint32_t *pEFlags));
2492typedef FNIEMAIMPLUNARYU16 *PFNIEMAIMPLUNARYU16;
2493FNIEMAIMPLUNARYU16 iemAImpl_inc_u16, iemAImpl_inc_u16_locked;
2494FNIEMAIMPLUNARYU16 iemAImpl_dec_u16, iemAImpl_dec_u16_locked;
2495FNIEMAIMPLUNARYU16 iemAImpl_not_u16, iemAImpl_not_u16_locked;
2496FNIEMAIMPLUNARYU16 iemAImpl_neg_u16, iemAImpl_neg_u16_locked;
2497/** @} */
2498
2499/** @name Arithmetic assignment operations on double words (unary).
2500 * @{ */
2501typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU32, (uint32_t *pu32Dst, uint32_t *pEFlags));
2502typedef FNIEMAIMPLUNARYU32 *PFNIEMAIMPLUNARYU32;
2503FNIEMAIMPLUNARYU32 iemAImpl_inc_u32, iemAImpl_inc_u32_locked;
2504FNIEMAIMPLUNARYU32 iemAImpl_dec_u32, iemAImpl_dec_u32_locked;
2505FNIEMAIMPLUNARYU32 iemAImpl_not_u32, iemAImpl_not_u32_locked;
2506FNIEMAIMPLUNARYU32 iemAImpl_neg_u32, iemAImpl_neg_u32_locked;
2507/** @} */
2508
2509/** @name Arithmetic assignment operations on quad words (unary).
2510 * @{ */
2511typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU64, (uint64_t *pu64Dst, uint32_t *pEFlags));
2512typedef FNIEMAIMPLUNARYU64 *PFNIEMAIMPLUNARYU64;
2513FNIEMAIMPLUNARYU64 iemAImpl_inc_u64, iemAImpl_inc_u64_locked;
2514FNIEMAIMPLUNARYU64 iemAImpl_dec_u64, iemAImpl_dec_u64_locked;
2515FNIEMAIMPLUNARYU64 iemAImpl_not_u64, iemAImpl_not_u64_locked;
2516FNIEMAIMPLUNARYU64 iemAImpl_neg_u64, iemAImpl_neg_u64_locked;
2517/** @} */
2518
2519
2520/** @name Shift operations on bytes (Group 2).
2521 * @{ */
2522typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU8,(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags));
2523typedef FNIEMAIMPLSHIFTU8 *PFNIEMAIMPLSHIFTU8;
2524FNIEMAIMPLSHIFTU8 iemAImpl_rol_u8, iemAImpl_rol_u8_amd, iemAImpl_rol_u8_intel;
2525FNIEMAIMPLSHIFTU8 iemAImpl_ror_u8, iemAImpl_ror_u8_amd, iemAImpl_ror_u8_intel;
2526FNIEMAIMPLSHIFTU8 iemAImpl_rcl_u8, iemAImpl_rcl_u8_amd, iemAImpl_rcl_u8_intel;
2527FNIEMAIMPLSHIFTU8 iemAImpl_rcr_u8, iemAImpl_rcr_u8_amd, iemAImpl_rcr_u8_intel;
2528FNIEMAIMPLSHIFTU8 iemAImpl_shl_u8, iemAImpl_shl_u8_amd, iemAImpl_shl_u8_intel;
2529FNIEMAIMPLSHIFTU8 iemAImpl_shr_u8, iemAImpl_shr_u8_amd, iemAImpl_shr_u8_intel;
2530FNIEMAIMPLSHIFTU8 iemAImpl_sar_u8, iemAImpl_sar_u8_amd, iemAImpl_sar_u8_intel;
2531/** @} */
2532
2533/** @name Shift operations on words (Group 2).
2534 * @{ */
2535typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU16,(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags));
2536typedef FNIEMAIMPLSHIFTU16 *PFNIEMAIMPLSHIFTU16;
2537FNIEMAIMPLSHIFTU16 iemAImpl_rol_u16, iemAImpl_rol_u16_amd, iemAImpl_rol_u16_intel;
2538FNIEMAIMPLSHIFTU16 iemAImpl_ror_u16, iemAImpl_ror_u16_amd, iemAImpl_ror_u16_intel;
2539FNIEMAIMPLSHIFTU16 iemAImpl_rcl_u16, iemAImpl_rcl_u16_amd, iemAImpl_rcl_u16_intel;
2540FNIEMAIMPLSHIFTU16 iemAImpl_rcr_u16, iemAImpl_rcr_u16_amd, iemAImpl_rcr_u16_intel;
2541FNIEMAIMPLSHIFTU16 iemAImpl_shl_u16, iemAImpl_shl_u16_amd, iemAImpl_shl_u16_intel;
2542FNIEMAIMPLSHIFTU16 iemAImpl_shr_u16, iemAImpl_shr_u16_amd, iemAImpl_shr_u16_intel;
2543FNIEMAIMPLSHIFTU16 iemAImpl_sar_u16, iemAImpl_sar_u16_amd, iemAImpl_sar_u16_intel;
2544/** @} */
2545
2546/** @name Shift operations on double words (Group 2).
2547 * @{ */
2548typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU32,(uint32_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags));
2549typedef FNIEMAIMPLSHIFTU32 *PFNIEMAIMPLSHIFTU32;
2550FNIEMAIMPLSHIFTU32 iemAImpl_rol_u32, iemAImpl_rol_u32_amd, iemAImpl_rol_u32_intel;
2551FNIEMAIMPLSHIFTU32 iemAImpl_ror_u32, iemAImpl_ror_u32_amd, iemAImpl_ror_u32_intel;
2552FNIEMAIMPLSHIFTU32 iemAImpl_rcl_u32, iemAImpl_rcl_u32_amd, iemAImpl_rcl_u32_intel;
2553FNIEMAIMPLSHIFTU32 iemAImpl_rcr_u32, iemAImpl_rcr_u32_amd, iemAImpl_rcr_u32_intel;
2554FNIEMAIMPLSHIFTU32 iemAImpl_shl_u32, iemAImpl_shl_u32_amd, iemAImpl_shl_u32_intel;
2555FNIEMAIMPLSHIFTU32 iemAImpl_shr_u32, iemAImpl_shr_u32_amd, iemAImpl_shr_u32_intel;
2556FNIEMAIMPLSHIFTU32 iemAImpl_sar_u32, iemAImpl_sar_u32_amd, iemAImpl_sar_u32_intel;
2557/** @} */
2558
2559/** @name Shift operations on words (Group 2).
2560 * @{ */
2561typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU64,(uint64_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags));
2562typedef FNIEMAIMPLSHIFTU64 *PFNIEMAIMPLSHIFTU64;
2563FNIEMAIMPLSHIFTU64 iemAImpl_rol_u64, iemAImpl_rol_u64_amd, iemAImpl_rol_u64_intel;
2564FNIEMAIMPLSHIFTU64 iemAImpl_ror_u64, iemAImpl_ror_u64_amd, iemAImpl_ror_u64_intel;
2565FNIEMAIMPLSHIFTU64 iemAImpl_rcl_u64, iemAImpl_rcl_u64_amd, iemAImpl_rcl_u64_intel;
2566FNIEMAIMPLSHIFTU64 iemAImpl_rcr_u64, iemAImpl_rcr_u64_amd, iemAImpl_rcr_u64_intel;
2567FNIEMAIMPLSHIFTU64 iemAImpl_shl_u64, iemAImpl_shl_u64_amd, iemAImpl_shl_u64_intel;
2568FNIEMAIMPLSHIFTU64 iemAImpl_shr_u64, iemAImpl_shr_u64_amd, iemAImpl_shr_u64_intel;
2569FNIEMAIMPLSHIFTU64 iemAImpl_sar_u64, iemAImpl_sar_u64_amd, iemAImpl_sar_u64_intel;
2570/** @} */
2571
2572/** @name Multiplication and division operations.
2573 * @{ */
2574typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t *pEFlags));
2575typedef FNIEMAIMPLMULDIVU8 *PFNIEMAIMPLMULDIVU8;
2576FNIEMAIMPLMULDIVU8 iemAImpl_mul_u8, iemAImpl_mul_u8_amd, iemAImpl_mul_u8_intel;
2577FNIEMAIMPLMULDIVU8 iemAImpl_imul_u8, iemAImpl_imul_u8_amd, iemAImpl_imul_u8_intel;
2578FNIEMAIMPLMULDIVU8 iemAImpl_div_u8, iemAImpl_div_u8_amd, iemAImpl_div_u8_intel;
2579FNIEMAIMPLMULDIVU8 iemAImpl_idiv_u8, iemAImpl_idiv_u8_amd, iemAImpl_idiv_u8_intel;
2580
2581typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t *pEFlags));
2582typedef FNIEMAIMPLMULDIVU16 *PFNIEMAIMPLMULDIVU16;
2583FNIEMAIMPLMULDIVU16 iemAImpl_mul_u16, iemAImpl_mul_u16_amd, iemAImpl_mul_u16_intel;
2584FNIEMAIMPLMULDIVU16 iemAImpl_imul_u16, iemAImpl_imul_u16_amd, iemAImpl_imul_u16_intel;
2585FNIEMAIMPLMULDIVU16 iemAImpl_div_u16, iemAImpl_div_u16_amd, iemAImpl_div_u16_intel;
2586FNIEMAIMPLMULDIVU16 iemAImpl_idiv_u16, iemAImpl_idiv_u16_amd, iemAImpl_idiv_u16_intel;
2587
2588typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t *pEFlags));
2589typedef FNIEMAIMPLMULDIVU32 *PFNIEMAIMPLMULDIVU32;
2590FNIEMAIMPLMULDIVU32 iemAImpl_mul_u32, iemAImpl_mul_u32_amd, iemAImpl_mul_u32_intel;
2591FNIEMAIMPLMULDIVU32 iemAImpl_imul_u32, iemAImpl_imul_u32_amd, iemAImpl_imul_u32_intel;
2592FNIEMAIMPLMULDIVU32 iemAImpl_div_u32, iemAImpl_div_u32_amd, iemAImpl_div_u32_intel;
2593FNIEMAIMPLMULDIVU32 iemAImpl_idiv_u32, iemAImpl_idiv_u32_amd, iemAImpl_idiv_u32_intel;
2594
2595typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t *pEFlags));
2596typedef FNIEMAIMPLMULDIVU64 *PFNIEMAIMPLMULDIVU64;
2597FNIEMAIMPLMULDIVU64 iemAImpl_mul_u64, iemAImpl_mul_u64_amd, iemAImpl_mul_u64_intel;
2598FNIEMAIMPLMULDIVU64 iemAImpl_imul_u64, iemAImpl_imul_u64_amd, iemAImpl_imul_u64_intel;
2599FNIEMAIMPLMULDIVU64 iemAImpl_div_u64, iemAImpl_div_u64_amd, iemAImpl_div_u64_intel;
2600FNIEMAIMPLMULDIVU64 iemAImpl_idiv_u64, iemAImpl_idiv_u64_amd, iemAImpl_idiv_u64_intel;
2601/** @} */
2602
2603/** @name Byte Swap.
2604 * @{ */
2605IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u16,(uint32_t *pu32Dst)); /* Yes, 32-bit register access. */
2606IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u32,(uint32_t *pu32Dst));
2607IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u64,(uint64_t *pu64Dst));
2608/** @} */
2609
2610/** @name Misc.
2611 * @{ */
2612FNIEMAIMPLBINU16 iemAImpl_arpl;
2613/** @} */
2614
2615/** @name RDRAND and RDSEED
2616 * @{ */
2617typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU16,(uint16_t *puDst, uint32_t *pEFlags));
2618typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU32,(uint32_t *puDst, uint32_t *pEFlags));
2619typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU64,(uint64_t *puDst, uint32_t *pEFlags));
2620typedef FNIEMAIMPLRDRANDSEEDU16 *PFNIEMAIMPLRDRANDSEEDU16;
2621typedef FNIEMAIMPLRDRANDSEEDU32 *PFNIEMAIMPLRDRANDSEEDU32;
2622typedef FNIEMAIMPLRDRANDSEEDU64 *PFNIEMAIMPLRDRANDSEEDU64;
2623
2624FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdrand_u16, iemAImpl_rdrand_u16_fallback;
2625FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdrand_u32, iemAImpl_rdrand_u32_fallback;
2626FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdrand_u64, iemAImpl_rdrand_u64_fallback;
2627FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdseed_u16, iemAImpl_rdseed_u16_fallback;
2628FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdseed_u32, iemAImpl_rdseed_u32_fallback;
2629FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdseed_u64, iemAImpl_rdseed_u64_fallback;
2630/** @} */
2631
2632/** @name ADOX and ADCX
2633 * @{ */
2634typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLADXU32,(uint32_t *puDst, uint32_t *pfEFlags, uint32_t uSrc));
2635typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLADXU64,(uint64_t *puDst, uint32_t *pfEFlags, uint64_t uSrc));
2636typedef FNIEMAIMPLADXU32 *PFNIEMAIMPLADXU32;
2637typedef FNIEMAIMPLADXU64 *PFNIEMAIMPLADXU64;
2638
2639FNIEMAIMPLADXU32 iemAImpl_adcx_u32, iemAImpl_adcx_u32_fallback;
2640FNIEMAIMPLADXU64 iemAImpl_adcx_u64, iemAImpl_adcx_u64_fallback;
2641FNIEMAIMPLADXU32 iemAImpl_adox_u32, iemAImpl_adox_u32_fallback;
2642FNIEMAIMPLADXU64 iemAImpl_adox_u64, iemAImpl_adox_u64_fallback;
2643/** @} */
2644
2645/** @name FPU operations taking a 32-bit float argument
2646 * @{ */
2647typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2648 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
2649typedef FNIEMAIMPLFPUR32FSW *PFNIEMAIMPLFPUR32FSW;
2650
2651typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2652 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
2653typedef FNIEMAIMPLFPUR32 *PFNIEMAIMPLFPUR32;
2654
2655FNIEMAIMPLFPUR32FSW iemAImpl_fcom_r80_by_r32;
2656FNIEMAIMPLFPUR32 iemAImpl_fadd_r80_by_r32;
2657FNIEMAIMPLFPUR32 iemAImpl_fmul_r80_by_r32;
2658FNIEMAIMPLFPUR32 iemAImpl_fsub_r80_by_r32;
2659FNIEMAIMPLFPUR32 iemAImpl_fsubr_r80_by_r32;
2660FNIEMAIMPLFPUR32 iemAImpl_fdiv_r80_by_r32;
2661FNIEMAIMPLFPUR32 iemAImpl_fdivr_r80_by_r32;
2662
2663IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT32U pr32Val));
2664IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2665 PRTFLOAT32U pr32Val, PCRTFLOAT80U pr80Val));
2666/** @} */
2667
2668/** @name FPU operations taking a 64-bit float argument
2669 * @{ */
2670typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2671 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
2672typedef FNIEMAIMPLFPUR64FSW *PFNIEMAIMPLFPUR64FSW;
2673
2674typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2675 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
2676typedef FNIEMAIMPLFPUR64 *PFNIEMAIMPLFPUR64;
2677
2678FNIEMAIMPLFPUR64FSW iemAImpl_fcom_r80_by_r64;
2679FNIEMAIMPLFPUR64 iemAImpl_fadd_r80_by_r64;
2680FNIEMAIMPLFPUR64 iemAImpl_fmul_r80_by_r64;
2681FNIEMAIMPLFPUR64 iemAImpl_fsub_r80_by_r64;
2682FNIEMAIMPLFPUR64 iemAImpl_fsubr_r80_by_r64;
2683FNIEMAIMPLFPUR64 iemAImpl_fdiv_r80_by_r64;
2684FNIEMAIMPLFPUR64 iemAImpl_fdivr_r80_by_r64;
2685
2686IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT64U pr64Val));
2687IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2688 PRTFLOAT64U pr32Val, PCRTFLOAT80U pr80Val));
2689/** @} */
2690
2691/** @name FPU operations taking a 80-bit float argument
2692 * @{ */
2693typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2694 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2695typedef FNIEMAIMPLFPUR80 *PFNIEMAIMPLFPUR80;
2696FNIEMAIMPLFPUR80 iemAImpl_fadd_r80_by_r80;
2697FNIEMAIMPLFPUR80 iemAImpl_fmul_r80_by_r80;
2698FNIEMAIMPLFPUR80 iemAImpl_fsub_r80_by_r80;
2699FNIEMAIMPLFPUR80 iemAImpl_fsubr_r80_by_r80;
2700FNIEMAIMPLFPUR80 iemAImpl_fdiv_r80_by_r80;
2701FNIEMAIMPLFPUR80 iemAImpl_fdivr_r80_by_r80;
2702FNIEMAIMPLFPUR80 iemAImpl_fprem_r80_by_r80;
2703FNIEMAIMPLFPUR80 iemAImpl_fprem1_r80_by_r80;
2704FNIEMAIMPLFPUR80 iemAImpl_fscale_r80_by_r80;
2705
2706FNIEMAIMPLFPUR80 iemAImpl_fpatan_r80_by_r80, iemAImpl_fpatan_r80_by_r80_amd, iemAImpl_fpatan_r80_by_r80_intel;
2707FNIEMAIMPLFPUR80 iemAImpl_fyl2x_r80_by_r80, iemAImpl_fyl2x_r80_by_r80_amd, iemAImpl_fyl2x_r80_by_r80_intel;
2708FNIEMAIMPLFPUR80 iemAImpl_fyl2xp1_r80_by_r80, iemAImpl_fyl2xp1_r80_by_r80_amd, iemAImpl_fyl2xp1_r80_by_r80_intel;
2709
2710typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2711 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2712typedef FNIEMAIMPLFPUR80FSW *PFNIEMAIMPLFPUR80FSW;
2713FNIEMAIMPLFPUR80FSW iemAImpl_fcom_r80_by_r80;
2714FNIEMAIMPLFPUR80FSW iemAImpl_fucom_r80_by_r80;
2715
2716typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPUR80EFL,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
2717 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2718typedef FNIEMAIMPLFPUR80EFL *PFNIEMAIMPLFPUR80EFL;
2719FNIEMAIMPLFPUR80EFL iemAImpl_fcomi_r80_by_r80;
2720FNIEMAIMPLFPUR80EFL iemAImpl_fucomi_r80_by_r80;
2721
2722typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARY,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
2723typedef FNIEMAIMPLFPUR80UNARY *PFNIEMAIMPLFPUR80UNARY;
2724FNIEMAIMPLFPUR80UNARY iemAImpl_fabs_r80;
2725FNIEMAIMPLFPUR80UNARY iemAImpl_fchs_r80;
2726FNIEMAIMPLFPUR80UNARY iemAImpl_f2xm1_r80, iemAImpl_f2xm1_r80_amd, iemAImpl_f2xm1_r80_intel;
2727FNIEMAIMPLFPUR80UNARY iemAImpl_fsqrt_r80;
2728FNIEMAIMPLFPUR80UNARY iemAImpl_frndint_r80;
2729FNIEMAIMPLFPUR80UNARY iemAImpl_fsin_r80, iemAImpl_fsin_r80_amd, iemAImpl_fsin_r80_intel;
2730FNIEMAIMPLFPUR80UNARY iemAImpl_fcos_r80, iemAImpl_fcos_r80_amd, iemAImpl_fcos_r80_intel;
2731
2732typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYFSW,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw, PCRTFLOAT80U pr80Val));
2733typedef FNIEMAIMPLFPUR80UNARYFSW *PFNIEMAIMPLFPUR80UNARYFSW;
2734FNIEMAIMPLFPUR80UNARYFSW iemAImpl_ftst_r80;
2735FNIEMAIMPLFPUR80UNARYFSW iemAImpl_fxam_r80;
2736
2737typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80LDCONST,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes));
2738typedef FNIEMAIMPLFPUR80LDCONST *PFNIEMAIMPLFPUR80LDCONST;
2739FNIEMAIMPLFPUR80LDCONST iemAImpl_fld1;
2740FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2t;
2741FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2e;
2742FNIEMAIMPLFPUR80LDCONST iemAImpl_fldpi;
2743FNIEMAIMPLFPUR80LDCONST iemAImpl_fldlg2;
2744FNIEMAIMPLFPUR80LDCONST iemAImpl_fldln2;
2745FNIEMAIMPLFPUR80LDCONST iemAImpl_fldz;
2746
2747typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYTWO,(PCX86FXSTATE pFpuState, PIEMFPURESULTTWO pFpuResTwo,
2748 PCRTFLOAT80U pr80Val));
2749typedef FNIEMAIMPLFPUR80UNARYTWO *PFNIEMAIMPLFPUR80UNARYTWO;
2750FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fptan_r80_r80, iemAImpl_fptan_r80_r80_amd, iemAImpl_fptan_r80_r80_intel;
2751FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fxtract_r80_r80;
2752FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fsincos_r80_r80, iemAImpl_fsincos_r80_r80_amd, iemAImpl_fsincos_r80_r80_intel;
2753
2754IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
2755IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2756 PRTFLOAT80U pr80Dst, PCRTFLOAT80U pr80Src));
2757
2758IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_d80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTPBCD80U pd80Val));
2759IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_d80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2760 PRTPBCD80U pd80Dst, PCRTFLOAT80U pr80Src));
2761
2762/** @} */
2763
2764/** @name FPU operations taking a 16-bit signed integer argument
2765 * @{ */
2766typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2767 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
2768typedef FNIEMAIMPLFPUI16 *PFNIEMAIMPLFPUI16;
2769typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI16,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
2770 int16_t *pi16Dst, PCRTFLOAT80U pr80Src));
2771typedef FNIEMAIMPLFPUSTR80TOI16 *PFNIEMAIMPLFPUSTR80TOI16;
2772
2773FNIEMAIMPLFPUI16 iemAImpl_fiadd_r80_by_i16;
2774FNIEMAIMPLFPUI16 iemAImpl_fimul_r80_by_i16;
2775FNIEMAIMPLFPUI16 iemAImpl_fisub_r80_by_i16;
2776FNIEMAIMPLFPUI16 iemAImpl_fisubr_r80_by_i16;
2777FNIEMAIMPLFPUI16 iemAImpl_fidiv_r80_by_i16;
2778FNIEMAIMPLFPUI16 iemAImpl_fidivr_r80_by_i16;
2779
2780typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2781 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
2782typedef FNIEMAIMPLFPUI16FSW *PFNIEMAIMPLFPUI16FSW;
2783FNIEMAIMPLFPUI16FSW iemAImpl_ficom_r80_by_i16;
2784
2785IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int16_t const *pi16Val));
2786FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fist_r80_to_i16;
2787FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fistt_r80_to_i16, iemAImpl_fistt_r80_to_i16_amd, iemAImpl_fistt_r80_to_i16_intel;
2788/** @} */
2789
2790/** @name FPU operations taking a 32-bit signed integer argument
2791 * @{ */
2792typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2793 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
2794typedef FNIEMAIMPLFPUI32 *PFNIEMAIMPLFPUI32;
2795typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI32,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
2796 int32_t *pi32Dst, PCRTFLOAT80U pr80Src));
2797typedef FNIEMAIMPLFPUSTR80TOI32 *PFNIEMAIMPLFPUSTR80TOI32;
2798
2799FNIEMAIMPLFPUI32 iemAImpl_fiadd_r80_by_i32;
2800FNIEMAIMPLFPUI32 iemAImpl_fimul_r80_by_i32;
2801FNIEMAIMPLFPUI32 iemAImpl_fisub_r80_by_i32;
2802FNIEMAIMPLFPUI32 iemAImpl_fisubr_r80_by_i32;
2803FNIEMAIMPLFPUI32 iemAImpl_fidiv_r80_by_i32;
2804FNIEMAIMPLFPUI32 iemAImpl_fidivr_r80_by_i32;
2805
2806typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2807 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
2808typedef FNIEMAIMPLFPUI32FSW *PFNIEMAIMPLFPUI32FSW;
2809FNIEMAIMPLFPUI32FSW iemAImpl_ficom_r80_by_i32;
2810
2811IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int32_t const *pi32Val));
2812FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fist_r80_to_i32;
2813FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fistt_r80_to_i32;
2814/** @} */
2815
2816/** @name FPU operations taking a 64-bit signed integer argument
2817 * @{ */
2818typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI64,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
2819 int64_t *pi64Dst, PCRTFLOAT80U pr80Src));
2820typedef FNIEMAIMPLFPUSTR80TOI64 *PFNIEMAIMPLFPUSTR80TOI64;
2821
2822IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int64_t const *pi64Val));
2823FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fist_r80_to_i64;
2824FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fistt_r80_to_i64;
2825/** @} */
2826
2827
2828/** Temporary type representing a 256-bit vector register. */
2829typedef struct { uint64_t au64[4]; } IEMVMM256;
2830/** Temporary type pointing to a 256-bit vector register. */
2831typedef IEMVMM256 *PIEMVMM256;
2832/** Temporary type pointing to a const 256-bit vector register. */
2833typedef IEMVMM256 *PCIEMVMM256;
2834
2835
2836/** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1.
2837 * @{ */
2838typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *puDst, uint64_t const *puSrc));
2839typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF2U64;
2840typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U128,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc));
2841typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF2U128;
2842typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U128,(PX86XSAVEAREA pExtState, PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
2843typedef FNIEMAIMPLMEDIAF3U128 *PFNIEMAIMPLMEDIAF3U128;
2844typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U256,(PX86XSAVEAREA pExtState, PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
2845typedef FNIEMAIMPLMEDIAF3U256 *PFNIEMAIMPLMEDIAF3U256;
2846typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U64,(uint64_t *puDst, uint64_t const *puSrc));
2847typedef FNIEMAIMPLMEDIAOPTF2U64 *PFNIEMAIMPLMEDIAOPTF2U64;
2848typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128,(PRTUINT128U puDst, PCRTUINT128U puSrc));
2849typedef FNIEMAIMPLMEDIAOPTF2U128 *PFNIEMAIMPLMEDIAOPTF2U128;
2850typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
2851typedef FNIEMAIMPLMEDIAOPTF3U128 *PFNIEMAIMPLMEDIAOPTF3U128;
2852typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
2853typedef FNIEMAIMPLMEDIAOPTF3U256 *PFNIEMAIMPLMEDIAOPTF3U256;
2854typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256,(PRTUINT256U puDst, PCRTUINT256U puSrc));
2855typedef FNIEMAIMPLMEDIAOPTF2U256 *PFNIEMAIMPLMEDIAOPTF2U256;
2856FNIEMAIMPLMEDIAF2U64 iemAImpl_pshufb_u64, iemAImpl_pshufb_u64_fallback;
2857FNIEMAIMPLMEDIAF2U64 iemAImpl_pand_u64, iemAImpl_pandn_u64, iemAImpl_por_u64, iemAImpl_pxor_u64;
2858FNIEMAIMPLMEDIAF2U64 iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqd_u64;
2859FNIEMAIMPLMEDIAF2U64 iemAImpl_pcmpgtb_u64, iemAImpl_pcmpgtw_u64, iemAImpl_pcmpgtd_u64;
2860FNIEMAIMPLMEDIAF2U64 iemAImpl_paddb_u64, iemAImpl_paddsb_u64, iemAImpl_paddusb_u64;
2861FNIEMAIMPLMEDIAF2U64 iemAImpl_paddw_u64, iemAImpl_paddsw_u64, iemAImpl_paddusw_u64;
2862FNIEMAIMPLMEDIAF2U64 iemAImpl_paddd_u64;
2863FNIEMAIMPLMEDIAF2U64 iemAImpl_paddq_u64;
2864FNIEMAIMPLMEDIAF2U64 iemAImpl_psubb_u64, iemAImpl_psubsb_u64, iemAImpl_psubusb_u64;
2865FNIEMAIMPLMEDIAF2U64 iemAImpl_psubw_u64, iemAImpl_psubsw_u64, iemAImpl_psubusw_u64;
2866FNIEMAIMPLMEDIAF2U64 iemAImpl_psubd_u64;
2867FNIEMAIMPLMEDIAF2U64 iemAImpl_psubq_u64;
2868FNIEMAIMPLMEDIAF2U64 iemAImpl_pmaddwd_u64;
2869FNIEMAIMPLMEDIAF2U64 iemAImpl_pmullw_u64, iemAImpl_pmulhw_u64;
2870FNIEMAIMPLMEDIAF2U64 iemAImpl_pminub_u64, iemAImpl_pmaxub_u64;
2871FNIEMAIMPLMEDIAF2U64 iemAImpl_pminsw_u64, iemAImpl_pmaxsw_u64;
2872FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsb_u64, iemAImpl_pabsb_u64_fallback;
2873FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsw_u64, iemAImpl_pabsw_u64_fallback;
2874FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsd_u64, iemAImpl_pabsd_u64_fallback;
2875FNIEMAIMPLMEDIAF2U64 iemAImpl_psignb_u64, iemAImpl_psignb_u64_fallback;
2876FNIEMAIMPLMEDIAF2U64 iemAImpl_psignw_u64, iemAImpl_psignw_u64_fallback;
2877FNIEMAIMPLMEDIAF2U64 iemAImpl_psignd_u64, iemAImpl_psignd_u64_fallback;
2878FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddw_u64, iemAImpl_phaddw_u64_fallback;
2879FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddd_u64, iemAImpl_phaddd_u64_fallback;
2880FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubw_u64, iemAImpl_phsubw_u64_fallback;
2881FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubd_u64, iemAImpl_phsubd_u64_fallback;
2882FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddsw_u64, iemAImpl_phaddsw_u64_fallback;
2883FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubsw_u64, iemAImpl_phsubsw_u64_fallback;
2884FNIEMAIMPLMEDIAF2U64 iemAImpl_pmaddubsw_u64, iemAImpl_pmaddubsw_u64_fallback;
2885FNIEMAIMPLMEDIAF2U64 iemAImpl_pmulhrsw_u64, iemAImpl_pmulhrsw_u64_fallback;
2886FNIEMAIMPLMEDIAF2U64 iemAImpl_pmuludq_u64;
2887FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllw_u64, iemAImpl_psrlw_u64, iemAImpl_psraw_u64;
2888FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pslld_u64, iemAImpl_psrld_u64, iemAImpl_psrad_u64;
2889FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllq_u64, iemAImpl_psrlq_u64;
2890FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packsswb_u64, iemAImpl_packuswb_u64;
2891FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packssdw_u64;
2892FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmulhuw_u64;
2893FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pavgb_u64, iemAImpl_pavgw_u64;
2894FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psadbw_u64;
2895
2896FNIEMAIMPLMEDIAF2U128 iemAImpl_pshufb_u128, iemAImpl_pshufb_u128_fallback;
2897FNIEMAIMPLMEDIAF2U128 iemAImpl_pand_u128, iemAImpl_pandn_u128, iemAImpl_por_u128, iemAImpl_pxor_u128;
2898FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpeqb_u128, iemAImpl_pcmpeqw_u128, iemAImpl_pcmpeqd_u128;
2899FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpeqq_u128, iemAImpl_pcmpeqq_u128_fallback;
2900FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpgtb_u128, iemAImpl_pcmpgtw_u128, iemAImpl_pcmpgtd_u128;
2901FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpgtq_u128, iemAImpl_pcmpgtq_u128_fallback;
2902FNIEMAIMPLMEDIAF2U128 iemAImpl_paddb_u128, iemAImpl_paddsb_u128, iemAImpl_paddusb_u128;
2903FNIEMAIMPLMEDIAF2U128 iemAImpl_paddw_u128, iemAImpl_paddsw_u128, iemAImpl_paddusw_u128;
2904FNIEMAIMPLMEDIAF2U128 iemAImpl_paddd_u128;
2905FNIEMAIMPLMEDIAF2U128 iemAImpl_paddq_u128;
2906FNIEMAIMPLMEDIAF2U128 iemAImpl_psubb_u128, iemAImpl_psubsb_u128, iemAImpl_psubusb_u128;
2907FNIEMAIMPLMEDIAF2U128 iemAImpl_psubw_u128, iemAImpl_psubsw_u128, iemAImpl_psubusw_u128;
2908FNIEMAIMPLMEDIAF2U128 iemAImpl_psubd_u128;
2909FNIEMAIMPLMEDIAF2U128 iemAImpl_psubq_u128;
2910FNIEMAIMPLMEDIAF2U128 iemAImpl_pmullw_u128, iemAImpl_pmullw_u128_fallback;
2911FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulhw_u128;
2912FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulld_u128, iemAImpl_pmulld_u128_fallback;
2913FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaddwd_u128;
2914FNIEMAIMPLMEDIAF2U128 iemAImpl_pminub_u128;
2915FNIEMAIMPLMEDIAF2U128 iemAImpl_pminud_u128, iemAImpl_pminud_u128_fallback;
2916FNIEMAIMPLMEDIAF2U128 iemAImpl_pminuw_u128, iemAImpl_pminuw_u128_fallback;
2917FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsb_u128, iemAImpl_pminsb_u128_fallback;
2918FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsd_u128, iemAImpl_pminsd_u128_fallback;
2919FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsw_u128, iemAImpl_pminsw_u128_fallback;
2920FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxub_u128;
2921FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxud_u128, iemAImpl_pmaxud_u128_fallback;
2922FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxuw_u128, iemAImpl_pmaxuw_u128_fallback;
2923FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsb_u128, iemAImpl_pmaxsb_u128_fallback;
2924FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsw_u128;
2925FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsd_u128, iemAImpl_pmaxsd_u128_fallback;
2926FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsb_u128, iemAImpl_pabsb_u128_fallback;
2927FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsw_u128, iemAImpl_pabsw_u128_fallback;
2928FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsd_u128, iemAImpl_pabsd_u128_fallback;
2929FNIEMAIMPLMEDIAF2U128 iemAImpl_psignb_u128, iemAImpl_psignb_u128_fallback;
2930FNIEMAIMPLMEDIAF2U128 iemAImpl_psignw_u128, iemAImpl_psignw_u128_fallback;
2931FNIEMAIMPLMEDIAF2U128 iemAImpl_psignd_u128, iemAImpl_psignd_u128_fallback;
2932FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddw_u128, iemAImpl_phaddw_u128_fallback;
2933FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddd_u128, iemAImpl_phaddd_u128_fallback;
2934FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubw_u128, iemAImpl_phsubw_u128_fallback;
2935FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubd_u128, iemAImpl_phsubd_u128_fallback;
2936FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddsw_u128, iemAImpl_phaddsw_u128_fallback;
2937FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubsw_u128, iemAImpl_phsubsw_u128_fallback;
2938FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaddubsw_u128, iemAImpl_pmaddubsw_u128_fallback;
2939FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulhrsw_u128, iemAImpl_pmulhrsw_u128_fallback;
2940FNIEMAIMPLMEDIAF2U128 iemAImpl_pmuludq_u128;
2941FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packsswb_u128, iemAImpl_packuswb_u128;
2942FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packssdw_u128, iemAImpl_packusdw_u128;
2943FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllw_u128, iemAImpl_psrlw_u128, iemAImpl_psraw_u128;
2944FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pslld_u128, iemAImpl_psrld_u128, iemAImpl_psrad_u128;
2945FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllq_u128, iemAImpl_psrlq_u128;
2946FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhuw_u128;
2947FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pavgb_u128, iemAImpl_pavgw_u128;
2948FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psadbw_u128;
2949FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuldq_u128, iemAImpl_pmuldq_u128_fallback;
2950FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpcklps_u128, iemAImpl_unpcklpd_u128;
2951FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpckhps_u128, iemAImpl_unpckhpd_u128;
2952FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phminposuw_u128, iemAImpl_phminposuw_u128_fallback;
2953
2954FNIEMAIMPLMEDIAF3U128 iemAImpl_vpshufb_u128, iemAImpl_vpshufb_u128_fallback;
2955FNIEMAIMPLMEDIAF3U128 iemAImpl_vpand_u128, iemAImpl_vpand_u128_fallback;
2956FNIEMAIMPLMEDIAF3U128 iemAImpl_vpandn_u128, iemAImpl_vpandn_u128_fallback;
2957FNIEMAIMPLMEDIAF3U128 iemAImpl_vpor_u128, iemAImpl_vpor_u128_fallback;
2958FNIEMAIMPLMEDIAF3U128 iemAImpl_vpxor_u128, iemAImpl_vpxor_u128_fallback;
2959FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqb_u128, iemAImpl_vpcmpeqb_u128_fallback;
2960FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqw_u128, iemAImpl_vpcmpeqw_u128_fallback;
2961FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqd_u128, iemAImpl_vpcmpeqd_u128_fallback;
2962FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqq_u128, iemAImpl_vpcmpeqq_u128_fallback;
2963FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtb_u128, iemAImpl_vpcmpgtb_u128_fallback;
2964FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtw_u128, iemAImpl_vpcmpgtw_u128_fallback;
2965FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtd_u128, iemAImpl_vpcmpgtd_u128_fallback;
2966FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtq_u128, iemAImpl_vpcmpgtq_u128_fallback;
2967FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddb_u128, iemAImpl_vpaddb_u128_fallback;
2968FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddw_u128, iemAImpl_vpaddw_u128_fallback;
2969FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddd_u128, iemAImpl_vpaddd_u128_fallback;
2970FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddq_u128, iemAImpl_vpaddq_u128_fallback;
2971FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubb_u128, iemAImpl_vpsubb_u128_fallback;
2972FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubw_u128, iemAImpl_vpsubw_u128_fallback;
2973FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubd_u128, iemAImpl_vpsubd_u128_fallback;
2974FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubq_u128, iemAImpl_vpsubq_u128_fallback;
2975FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminub_u128, iemAImpl_vpminub_u128_fallback;
2976FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminuw_u128, iemAImpl_vpminuw_u128_fallback;
2977FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminud_u128, iemAImpl_vpminud_u128_fallback;
2978FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsb_u128, iemAImpl_vpminsb_u128_fallback;
2979FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsw_u128, iemAImpl_vpminsw_u128_fallback;
2980FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsd_u128, iemAImpl_vpminsd_u128_fallback;
2981FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxub_u128, iemAImpl_vpmaxub_u128_fallback;
2982FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxuw_u128, iemAImpl_vpmaxuw_u128_fallback;
2983FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxud_u128, iemAImpl_vpmaxud_u128_fallback;
2984FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsb_u128, iemAImpl_vpmaxsb_u128_fallback;
2985FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsw_u128, iemAImpl_vpmaxsw_u128_fallback;
2986FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsd_u128, iemAImpl_vpmaxsd_u128_fallback;
2987FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpacksswb_u128, iemAImpl_vpacksswb_u128_fallback;
2988FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackssdw_u128, iemAImpl_vpackssdw_u128_fallback;
2989FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackuswb_u128, iemAImpl_vpackuswb_u128_fallback;
2990FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackusdw_u128, iemAImpl_vpackusdw_u128_fallback;
2991FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmullw_u128, iemAImpl_vpmullw_u128_fallback;
2992FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulld_u128, iemAImpl_vpmulld_u128_fallback;
2993FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhw_u128, iemAImpl_vpmulhw_u128_fallback;
2994FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhuw_u128, iemAImpl_vpmulhuw_u128_fallback;
2995FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgb_u128, iemAImpl_vpavgb_u128_fallback;
2996FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgw_u128, iemAImpl_vpavgw_u128_fallback;
2997FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignb_u128, iemAImpl_vpsignb_u128_fallback;
2998FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignw_u128, iemAImpl_vpsignw_u128_fallback;
2999FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignd_u128, iemAImpl_vpsignd_u128_fallback;
3000FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddw_u128, iemAImpl_vphaddw_u128_fallback;
3001FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddd_u128, iemAImpl_vphaddd_u128_fallback;
3002FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubw_u128, iemAImpl_vphsubw_u128_fallback;
3003FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubd_u128, iemAImpl_vphsubd_u128_fallback;
3004FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddsw_u128, iemAImpl_vphaddsw_u128_fallback;
3005FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubsw_u128, iemAImpl_vphsubsw_u128_fallback;
3006FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddubsw_u128, iemAImpl_vpmaddubsw_u128_fallback;
3007FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhrsw_u128, iemAImpl_vpmulhrsw_u128_fallback;
3008FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsadbw_u128, iemAImpl_vpsadbw_u128_fallback;
3009FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuldq_u128, iemAImpl_vpmuldq_u128_fallback;
3010FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuludq_u128, iemAImpl_vpmuludq_u128_fallback;
3011FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsb_u128, iemAImpl_vpsubsb_u128_fallback;
3012FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsw_u128, iemAImpl_vpsubsw_u128_fallback;
3013FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusb_u128, iemAImpl_vpsubusb_u128_fallback;
3014FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusw_u128, iemAImpl_vpsubusw_u128_fallback;
3015FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusb_u128, iemAImpl_vpaddusb_u128_fallback;
3016FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusw_u128, iemAImpl_vpaddusw_u128_fallback;
3017FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsb_u128, iemAImpl_vpaddsb_u128_fallback;
3018FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsw_u128, iemAImpl_vpaddsw_u128_fallback;
3019
3020FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsb_u128, iemAImpl_vpabsb_u128_fallback;
3021FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsw_u128, iemAImpl_vpabsd_u128_fallback;
3022FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsd_u128, iemAImpl_vpabsw_u128_fallback;
3023FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vphminposuw_u128, iemAImpl_vphminposuw_u128_fallback;
3024
3025FNIEMAIMPLMEDIAF3U256 iemAImpl_vpshufb_u256, iemAImpl_vpshufb_u256_fallback;
3026FNIEMAIMPLMEDIAF3U256 iemAImpl_vpand_u256, iemAImpl_vpand_u256_fallback;
3027FNIEMAIMPLMEDIAF3U256 iemAImpl_vpandn_u256, iemAImpl_vpandn_u256_fallback;
3028FNIEMAIMPLMEDIAF3U256 iemAImpl_vpor_u256, iemAImpl_vpor_u256_fallback;
3029FNIEMAIMPLMEDIAF3U256 iemAImpl_vpxor_u256, iemAImpl_vpxor_u256_fallback;
3030FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqb_u256, iemAImpl_vpcmpeqb_u256_fallback;
3031FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqw_u256, iemAImpl_vpcmpeqw_u256_fallback;
3032FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqd_u256, iemAImpl_vpcmpeqd_u256_fallback;
3033FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqq_u256, iemAImpl_vpcmpeqq_u256_fallback;
3034FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtb_u256, iemAImpl_vpcmpgtb_u256_fallback;
3035FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtw_u256, iemAImpl_vpcmpgtw_u256_fallback;
3036FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtd_u256, iemAImpl_vpcmpgtd_u256_fallback;
3037FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtq_u256, iemAImpl_vpcmpgtq_u256_fallback;
3038FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddb_u256, iemAImpl_vpaddb_u256_fallback;
3039FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddw_u256, iemAImpl_vpaddw_u256_fallback;
3040FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddd_u256, iemAImpl_vpaddd_u256_fallback;
3041FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddq_u256, iemAImpl_vpaddq_u256_fallback;
3042FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubb_u256, iemAImpl_vpsubb_u256_fallback;
3043FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubw_u256, iemAImpl_vpsubw_u256_fallback;
3044FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubd_u256, iemAImpl_vpsubd_u256_fallback;
3045FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubq_u256, iemAImpl_vpsubq_u256_fallback;
3046FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminub_u256, iemAImpl_vpminub_u256_fallback;
3047FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminuw_u256, iemAImpl_vpminuw_u256_fallback;
3048FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminud_u256, iemAImpl_vpminud_u256_fallback;
3049FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsb_u256, iemAImpl_vpminsb_u256_fallback;
3050FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsw_u256, iemAImpl_vpminsw_u256_fallback;
3051FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsd_u256, iemAImpl_vpminsd_u256_fallback;
3052FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxub_u256, iemAImpl_vpmaxub_u256_fallback;
3053FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxuw_u256, iemAImpl_vpmaxuw_u256_fallback;
3054FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxud_u256, iemAImpl_vpmaxud_u256_fallback;
3055FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsb_u256, iemAImpl_vpmaxsb_u256_fallback;
3056FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsw_u256, iemAImpl_vpmaxsw_u256_fallback;
3057FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsd_u256, iemAImpl_vpmaxsd_u256_fallback;
3058FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpacksswb_u256, iemAImpl_vpacksswb_u256_fallback;
3059FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackssdw_u256, iemAImpl_vpackssdw_u256_fallback;
3060FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackuswb_u256, iemAImpl_vpackuswb_u256_fallback;
3061FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackusdw_u256, iemAImpl_vpackusdw_u256_fallback;
3062FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmullw_u256, iemAImpl_vpmullw_u256_fallback;
3063FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulld_u256, iemAImpl_vpmulld_u256_fallback;
3064FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhw_u256, iemAImpl_vpmulhw_u256_fallback;
3065FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhuw_u256, iemAImpl_vpmulhuw_u256_fallback;
3066FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgb_u256, iemAImpl_vpavgb_u256_fallback;
3067FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgw_u256, iemAImpl_vpavgw_u256_fallback;
3068FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignb_u256, iemAImpl_vpsignb_u256_fallback;
3069FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignw_u256, iemAImpl_vpsignw_u256_fallback;
3070FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignd_u256, iemAImpl_vpsignd_u256_fallback;
3071FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddw_u256, iemAImpl_vphaddw_u256_fallback;
3072FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddd_u256, iemAImpl_vphaddd_u256_fallback;
3073FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubw_u256, iemAImpl_vphsubw_u256_fallback;
3074FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubd_u256, iemAImpl_vphsubd_u256_fallback;
3075FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddsw_u256, iemAImpl_vphaddsw_u256_fallback;
3076FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubsw_u256, iemAImpl_vphsubsw_u256_fallback;
3077FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddubsw_u256, iemAImpl_vpmaddubsw_u256_fallback;
3078FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhrsw_u256, iemAImpl_vpmulhrsw_u256_fallback;
3079FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsadbw_u256, iemAImpl_vpsadbw_u256_fallback;
3080FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuldq_u256, iemAImpl_vpmuldq_u256_fallback;
3081FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuludq_u256, iemAImpl_vpmuludq_u256_fallback;
3082FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsb_u256, iemAImpl_vpsubsb_u256_fallback;
3083FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsw_u256, iemAImpl_vpsubsw_u256_fallback;
3084FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusb_u256, iemAImpl_vpsubusb_u256_fallback;
3085FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusw_u256, iemAImpl_vpsubusw_u256_fallback;
3086FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusb_u256, iemAImpl_vpaddusb_u256_fallback;
3087FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusw_u256, iemAImpl_vpaddusw_u256_fallback;
3088FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsb_u256, iemAImpl_vpaddsb_u256_fallback;
3089FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsw_u256, iemAImpl_vpaddsw_u256_fallback;
3090
3091FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsb_u256, iemAImpl_vpabsb_u256_fallback;
3092FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsw_u256, iemAImpl_vpabsw_u256_fallback;
3093FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsd_u256, iemAImpl_vpabsd_u256_fallback;
3094/** @} */
3095
3096/** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1.
3097 * @{ */
3098FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpcklbw_u64, iemAImpl_punpcklwd_u64, iemAImpl_punpckldq_u64;
3099FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128;
3100FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpcklbw_u128, iemAImpl_vpunpcklbw_u128_fallback,
3101 iemAImpl_vpunpcklwd_u128, iemAImpl_vpunpcklwd_u128_fallback,
3102 iemAImpl_vpunpckldq_u128, iemAImpl_vpunpckldq_u128_fallback,
3103 iemAImpl_vpunpcklqdq_u128, iemAImpl_vpunpcklqdq_u128_fallback,
3104 iemAImpl_vunpcklps_u128, iemAImpl_vunpcklps_u128_fallback,
3105 iemAImpl_vunpcklpd_u128, iemAImpl_vunpcklpd_u128_fallback,
3106 iemAImpl_vunpckhps_u128, iemAImpl_vunpckhps_u128_fallback,
3107 iemAImpl_vunpckhpd_u128, iemAImpl_vunpckhpd_u128_fallback;
3108
3109FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpcklbw_u256, iemAImpl_vpunpcklbw_u256_fallback,
3110 iemAImpl_vpunpcklwd_u256, iemAImpl_vpunpcklwd_u256_fallback,
3111 iemAImpl_vpunpckldq_u256, iemAImpl_vpunpckldq_u256_fallback,
3112 iemAImpl_vpunpcklqdq_u256, iemAImpl_vpunpcklqdq_u256_fallback,
3113 iemAImpl_vunpcklps_u256, iemAImpl_vunpcklps_u256_fallback,
3114 iemAImpl_vunpcklpd_u256, iemAImpl_vunpcklpd_u256_fallback,
3115 iemAImpl_vunpckhps_u256, iemAImpl_vunpckhps_u256_fallback,
3116 iemAImpl_vunpckhpd_u256, iemAImpl_vunpckhpd_u256_fallback;
3117/** @} */
3118
3119/** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1.
3120 * @{ */
3121FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64;
3122FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128;
3123FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpckhbw_u128, iemAImpl_vpunpckhbw_u128_fallback,
3124 iemAImpl_vpunpckhwd_u128, iemAImpl_vpunpckhwd_u128_fallback,
3125 iemAImpl_vpunpckhdq_u128, iemAImpl_vpunpckhdq_u128_fallback,
3126 iemAImpl_vpunpckhqdq_u128, iemAImpl_vpunpckhqdq_u128_fallback;
3127FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpckhbw_u256, iemAImpl_vpunpckhbw_u256_fallback,
3128 iemAImpl_vpunpckhwd_u256, iemAImpl_vpunpckhwd_u256_fallback,
3129 iemAImpl_vpunpckhdq_u256, iemAImpl_vpunpckhdq_u256_fallback,
3130 iemAImpl_vpunpckhqdq_u256, iemAImpl_vpunpckhqdq_u256_fallback;
3131/** @} */
3132
3133/** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil)
3134 * @{ */
3135typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3136typedef FNIEMAIMPLMEDIAPSHUFU128 *PFNIEMAIMPLMEDIAPSHUFU128;
3137typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
3138typedef FNIEMAIMPLMEDIAPSHUFU256 *PFNIEMAIMPLMEDIAPSHUFU256;
3139IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw_u64,(uint64_t *puDst, uint64_t const *puSrc, uint8_t bEvil));
3140FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_pshufhw_u128, iemAImpl_pshuflw_u128, iemAImpl_pshufd_u128;
3141#ifndef IEM_WITHOUT_ASSEMBLY
3142FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256, iemAImpl_vpshuflw_u256, iemAImpl_vpshufd_u256;
3143#endif
3144FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256_fallback, iemAImpl_vpshuflw_u256_fallback, iemAImpl_vpshufd_u256_fallback;
3145/** @} */
3146
3147/** @name Media (SSE/MMX/AVX) operation: Shift Immediate Stuff (evil)
3148 * @{ */
3149typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU64,(uint64_t *puDst, uint8_t bShift));
3150typedef FNIEMAIMPLMEDIAPSHIFTU64 *PFNIEMAIMPLMEDIAPSHIFTU64;
3151typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU128,(PRTUINT128U puDst, uint8_t bShift));
3152typedef FNIEMAIMPLMEDIAPSHIFTU128 *PFNIEMAIMPLMEDIAPSHIFTU128;
3153typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU256,(PRTUINT256U puDst, uint8_t bShift));
3154typedef FNIEMAIMPLMEDIAPSHIFTU256 *PFNIEMAIMPLMEDIAPSHIFTU256;
3155FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psllw_imm_u64, iemAImpl_pslld_imm_u64, iemAImpl_psllq_imm_u64;
3156FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psrlw_imm_u64, iemAImpl_psrld_imm_u64, iemAImpl_psrlq_imm_u64;
3157FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psraw_imm_u64, iemAImpl_psrad_imm_u64;
3158FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psllw_imm_u128, iemAImpl_pslld_imm_u128, iemAImpl_psllq_imm_u128;
3159FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psrlw_imm_u128, iemAImpl_psrld_imm_u128, iemAImpl_psrlq_imm_u128;
3160FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psraw_imm_u128, iemAImpl_psrad_imm_u128;
3161FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_pslldq_imm_u128, iemAImpl_psrldq_imm_u128;
3162/** @} */
3163
3164/** @name Media (SSE/MMX/AVX) operation: Move Byte Mask
3165 * @{ */
3166IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u64,(uint64_t *pu64Dst, uint64_t const *puSrc));
3167IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u128,(uint64_t *pu64Dst, PCRTUINT128U puSrc));
3168#ifndef IEM_WITHOUT_ASSEMBLY
3169IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3170#endif
3171IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256_fallback,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3172/** @} */
3173
3174/** @name Media (SSE/MMX/AVX) operations: Variable Blend Packed Bytes/R32/R64.
3175 * @{ */
3176typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puMask));
3177typedef FNIEMAIMPLBLENDU128 *PFNIEMAIMPLBLENDU128;
3178typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, PCRTUINT128U puMask));
3179typedef FNIEMAIMPLAVXBLENDU128 *PFNIEMAIMPLAVXBLENDU128;
3180typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, PCRTUINT256U puMask));
3181typedef FNIEMAIMPLAVXBLENDU256 *PFNIEMAIMPLAVXBLENDU256;
3182
3183FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128;
3184FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128_fallback;
3185FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128;
3186FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128_fallback;
3187FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256;
3188FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256_fallback;
3189
3190FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128;
3191FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128_fallback;
3192FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128;
3193FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128_fallback;
3194FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256;
3195FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256_fallback;
3196
3197FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128;
3198FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128_fallback;
3199FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128;
3200FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128_fallback;
3201FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256;
3202FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256_fallback;
3203/** @} */
3204
3205
3206/** @name Media (SSE/MMX/AVX) operation: Sort this later
3207 * @{ */
3208IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3209IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3210IEM_DECL_IMPL_DEF(void, iemAImpl_vmovshdup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3211IEM_DECL_IMPL_DEF(void, iemAImpl_vmovshdup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3212IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3213IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3214
3215IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3216IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3217IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3218IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3219IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3220
3221IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3222IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3223IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3224IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3225IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3226
3227IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3228IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3229IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3230IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3231IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3232
3233IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3234IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3235IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3236IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3237IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3238
3239IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3240IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3241IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3242IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3243IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3244
3245IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3246IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3247IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3248IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3249IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3250
3251IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3252IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3253IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3254IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3255IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3256
3257IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3258IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3259IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3260IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3261IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3262
3263IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3264IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3265IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3266IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3267IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3268
3269IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3270IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3271IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3272IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3273IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3274
3275IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3276IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3277IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3278IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3279IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3280
3281IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3282IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3283IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3284IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3285IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3286
3287IEM_DECL_IMPL_DEF(void, iemAImpl_shufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3288IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3289IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3290IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3291IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3292
3293IEM_DECL_IMPL_DEF(void, iemAImpl_shufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3294IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3295IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3296IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3297IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3298
3299IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3300IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64_fallback,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3301
3302IEM_DECL_IMPL_DEF(void, iemAImpl_pinsrw_u64,(uint64_t *pu64Dst, uint16_t u16Src, uint8_t bEvil));
3303IEM_DECL_IMPL_DEF(void, iemAImpl_pinsrw_u128,(PRTUINT128U puDst, uint16_t u16Src, uint8_t bEvil));
3304IEM_DECL_IMPL_DEF(void, iemAImpl_vpinsrw_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint16_t u16Src, uint8_t bEvil));
3305IEM_DECL_IMPL_DEF(void, iemAImpl_vpinsrw_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint16_t u16Src, uint8_t bEvil));
3306
3307IEM_DECL_IMPL_DEF(void, iemAImpl_pextrw_u64,(uint16_t *pu16Dst, uint64_t u64Src, uint8_t bEvil));
3308IEM_DECL_IMPL_DEF(void, iemAImpl_pextrw_u128,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3309IEM_DECL_IMPL_DEF(void, iemAImpl_vpextrw_u128,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3310IEM_DECL_IMPL_DEF(void, iemAImpl_vpextrw_u128_fallback,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3311
3312IEM_DECL_IMPL_DEF(void, iemAImpl_movmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3313IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3314IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3315IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3316IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3317
3318IEM_DECL_IMPL_DEF(void, iemAImpl_movmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3319IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3320IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3321IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3322IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3323
3324
3325typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3326typedef FNIEMAIMPLMEDIAOPTF2U128IMM8 *PFNIEMAIMPLMEDIAOPTF2U128IMM8;
3327typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3328typedef FNIEMAIMPLMEDIAOPTF3U128IMM8 *PFNIEMAIMPLMEDIAOPTF3U128IMM8;
3329typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3330typedef FNIEMAIMPLMEDIAOPTF3U256IMM8 *PFNIEMAIMPLMEDIAOPTF3U256IMM8;
3331
3332FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_palignr_u128, iemAImpl_palignr_u128_fallback;
3333FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pblendw_u128, iemAImpl_pblendw_u128_fallback;
3334FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendps_u128, iemAImpl_blendps_u128_fallback;
3335FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendpd_u128, iemAImpl_blendpd_u128_fallback;
3336
3337FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpalignr_u128, iemAImpl_vpalignr_u128_fallback;
3338FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendw_u128, iemAImpl_vpblendw_u128_fallback;
3339FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendps_u128, iemAImpl_vblendps_u128_fallback;
3340FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendpd_u128, iemAImpl_vblendpd_u128_fallback;
3341
3342FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpalignr_u256, iemAImpl_vpalignr_u256_fallback;
3343FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendw_u256, iemAImpl_vpblendw_u256_fallback;
3344FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendps_u256, iemAImpl_vblendps_u256_fallback;
3345FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendpd_u256, iemAImpl_vblendpd_u256_fallback;
3346FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2i128_u256, iemAImpl_vperm2i128_u256_fallback;
3347FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2f128_u256, iemAImpl_vperm2f128_u256_fallback;
3348
3349FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesimc_u128, iemAImpl_aesimc_u128_fallback;
3350FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenc_u128, iemAImpl_aesenc_u128_fallback;
3351FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenclast_u128, iemAImpl_aesenclast_u128_fallback;
3352FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdec_u128, iemAImpl_aesdec_u128_fallback;
3353FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdeclast_u128, iemAImpl_aesdeclast_u128_fallback;
3354
3355FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesimc_u128, iemAImpl_vaesimc_u128_fallback;
3356FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenc_u128, iemAImpl_vaesenc_u128_fallback;
3357FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenclast_u128, iemAImpl_vaesenclast_u128_fallback;
3358FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdec_u128, iemAImpl_vaesdec_u128_fallback;
3359FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdeclast_u128, iemAImpl_vaesdeclast_u128_fallback;
3360
3361FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_aeskeygenassist_u128, iemAImpl_aeskeygenassist_u128_fallback;
3362
3363FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vaeskeygenassist_u128, iemAImpl_vaeskeygenassist_u128_fallback;
3364
3365FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1nexte_u128, iemAImpl_sha1nexte_u128_fallback;
3366FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg1_u128, iemAImpl_sha1msg1_u128_fallback;
3367FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg2_u128, iemAImpl_sha1msg2_u128_fallback;
3368FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg1_u128, iemAImpl_sha256msg1_u128_fallback;
3369FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg2_u128, iemAImpl_sha256msg2_u128_fallback;
3370FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_sha1rnds4_u128, iemAImpl_sha1rnds4_u128_fallback;
3371IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3372IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3373
3374typedef struct IEMPCMPISTRXSRC
3375{
3376 RTUINT128U uSrc1;
3377 RTUINT128U uSrc2;
3378} IEMPCMPISTRXSRC;
3379typedef IEMPCMPISTRXSRC *PIEMPCMPISTRXSRC;
3380typedef const IEMPCMPISTRXSRC *PCIEMPCMPISTRXSRC;
3381
3382typedef struct IEMPCMPESTRXSRC
3383{
3384 RTUINT128U uSrc1;
3385 RTUINT128U uSrc2;
3386 uint64_t u64Rax;
3387 uint64_t u64Rdx;
3388} IEMPCMPESTRXSRC;
3389typedef IEMPCMPESTRXSRC *PIEMPCMPESTRXSRC;
3390typedef const IEMPCMPESTRXSRC *PCIEMPCMPESTRXSRC;
3391
3392typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
3393typedef FNIEMAIMPLPCMPISTRIU128IMM8 *PFNIEMAIMPLPCMPISTRIU128IMM8;
3394typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3395typedef FNIEMAIMPLPCMPESTRIU128IMM8 *PFNIEMAIMPLPCMPESTRIU128IMM8;
3396
3397typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
3398typedef FNIEMAIMPLPCMPISTRMU128IMM8 *PFNIEMAIMPLPCMPISTRMU128IMM8;
3399typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3400typedef FNIEMAIMPLPCMPESTRMU128IMM8 *PFNIEMAIMPLPCMPESTRMU128IMM8;
3401
3402FNIEMAIMPLPCMPISTRIU128IMM8 iemAImpl_pcmpistri_u128, iemAImpl_pcmpistri_u128_fallback;
3403FNIEMAIMPLPCMPESTRIU128IMM8 iemAImpl_pcmpestri_u128, iemAImpl_pcmpestri_u128_fallback;
3404FNIEMAIMPLPCMPISTRMU128IMM8 iemAImpl_pcmpistrm_u128, iemAImpl_pcmpistrm_u128_fallback;
3405FNIEMAIMPLPCMPESTRMU128IMM8 iemAImpl_pcmpestrm_u128, iemAImpl_pcmpestrm_u128_fallback;
3406
3407FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pclmulqdq_u128, iemAImpl_pclmulqdq_u128_fallback;
3408FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpclmulqdq_u128, iemAImpl_vpclmulqdq_u128_fallback;
3409
3410FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_mpsadbw_u128, iemAImpl_mpsadbw_u128_fallback;
3411FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vmpsadbw_u128, iemAImpl_vmpsadbw_u128_fallback;
3412FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vmpsadbw_u256, iemAImpl_vmpsadbw_u256_fallback;
3413/** @} */
3414
3415/** @name Media Odds and Ends
3416 * @{ */
3417typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U8,(uint32_t *puDst, uint8_t uSrc));
3418typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U16,(uint32_t *puDst, uint16_t uSrc));
3419typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U32,(uint32_t *puDst, uint32_t uSrc));
3420typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U64,(uint32_t *puDst, uint64_t uSrc));
3421FNIEMAIMPLCR32U8 iemAImpl_crc32_u8, iemAImpl_crc32_u8_fallback;
3422FNIEMAIMPLCR32U16 iemAImpl_crc32_u16, iemAImpl_crc32_u16_fallback;
3423FNIEMAIMPLCR32U32 iemAImpl_crc32_u32, iemAImpl_crc32_u32_fallback;
3424FNIEMAIMPLCR32U64 iemAImpl_crc32_u64, iemAImpl_crc32_u64_fallback;
3425
3426typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL128,(PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint32_t *pEFlags));
3427typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL256,(PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint32_t *pEFlags));
3428FNIEMAIMPLF2EFL128 iemAImpl_ptest_u128;
3429FNIEMAIMPLF2EFL256 iemAImpl_vptest_u256, iemAImpl_vptest_u256_fallback;
3430
3431typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I32U64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int32_t *pi32Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
3432typedef FNIEMAIMPLSSEF2I32U64 *PFNIEMAIMPLSSEF2I32U64;
3433typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I64U64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int64_t *pi64Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
3434typedef FNIEMAIMPLSSEF2I64U64 *PFNIEMAIMPLSSEF2I64U64;
3435typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I32U32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int32_t *pi32Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
3436typedef FNIEMAIMPLSSEF2I32U32 *PFNIEMAIMPLSSEF2I32U32;
3437typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I64U32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int64_t *pi64Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
3438typedef FNIEMAIMPLSSEF2I64U32 *PFNIEMAIMPLSSEF2I64U32;
3439
3440FNIEMAIMPLSSEF2I32U64 iemAImpl_cvttsd2si_i32_r64;
3441FNIEMAIMPLSSEF2I32U64 iemAImpl_cvtsd2si_i32_r64;
3442
3443FNIEMAIMPLSSEF2I64U64 iemAImpl_cvttsd2si_i64_r64;
3444FNIEMAIMPLSSEF2I64U64 iemAImpl_cvtsd2si_i64_r64;
3445
3446FNIEMAIMPLSSEF2I32U32 iemAImpl_cvttss2si_i32_r32;
3447FNIEMAIMPLSSEF2I32U32 iemAImpl_cvtss2si_i32_r32;
3448
3449FNIEMAIMPLSSEF2I64U32 iemAImpl_cvttss2si_i64_r32;
3450FNIEMAIMPLSSEF2I64U32 iemAImpl_cvtss2si_i64_r32;
3451
3452typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R32I32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT32U pr32Dst, const int32_t *pi32Src));
3453typedef FNIEMAIMPLSSEF2R32I32 *PFNIEMAIMPLSSEF2R32I32;
3454typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R32I64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT32U pr32Dst, const int64_t *pi64Src));
3455typedef FNIEMAIMPLSSEF2R32I64 *PFNIEMAIMPLSSEF2R32I64;
3456
3457FNIEMAIMPLSSEF2R32I32 iemAImpl_cvtsi2ss_r32_i32;
3458FNIEMAIMPLSSEF2R32I64 iemAImpl_cvtsi2ss_r32_i64;
3459
3460typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R64I32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT64U pr64Dst, const int32_t *pi32Src));
3461typedef FNIEMAIMPLSSEF2R64I32 *PFNIEMAIMPLSSEF2R64I32;
3462typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R64I64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT64U pr64Dst, const int64_t *pi64Src));
3463typedef FNIEMAIMPLSSEF2R64I64 *PFNIEMAIMPLSSEF2R64I64;
3464
3465FNIEMAIMPLSSEF2R64I32 iemAImpl_cvtsi2sd_r64_i32;
3466FNIEMAIMPLSSEF2R64I64 iemAImpl_cvtsi2sd_r64_i64;
3467
3468
3469typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFLMXCSR128,(uint32_t *pfMxcsr, uint32_t *pfEFlags, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
3470typedef FNIEMAIMPLF2EFLMXCSR128 *PFNIEMAIMPLF2EFLMXCSR128;
3471
3472FNIEMAIMPLF2EFLMXCSR128 iemAImpl_ucomiss_u128;
3473FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vucomiss_u128, iemAImpl_vucomiss_u128_fallback;
3474
3475FNIEMAIMPLF2EFLMXCSR128 iemAImpl_ucomisd_u128;
3476FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vucomisd_u128, iemAImpl_vucomisd_u128_fallback;
3477
3478FNIEMAIMPLF2EFLMXCSR128 iemAImpl_comiss_u128;
3479FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vcomiss_u128, iemAImpl_vcomiss_u128_fallback;
3480
3481FNIEMAIMPLF2EFLMXCSR128 iemAImpl_comisd_u128;
3482FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vcomisd_u128, iemAImpl_vcomisd_u128_fallback;
3483
3484
3485typedef struct IEMMEDIAF2XMMSRC
3486{
3487 X86XMMREG uSrc1;
3488 X86XMMREG uSrc2;
3489} IEMMEDIAF2XMMSRC;
3490typedef IEMMEDIAF2XMMSRC *PIEMMEDIAF2XMMSRC;
3491typedef const IEMMEDIAF2XMMSRC *PCIEMMEDIAF2XMMSRC;
3492
3493typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRF2XMMIMM8,(uint32_t *pfMxcsr, PX86XMMREG puDst, PCIEMMEDIAF2XMMSRC puSrc, uint8_t bEvil));
3494typedef FNIEMAIMPLMXCSRF2XMMIMM8 *PFNIEMAIMPLMXCSRF2XMMIMM8;
3495
3496FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpps_u128;
3497FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmppd_u128;
3498FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpss_u128;
3499FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpsd_u128;
3500FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundss_u128;
3501FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundsd_u128;
3502
3503FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundps_u128, iemAImpl_roundps_u128_fallback;
3504FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundpd_u128, iemAImpl_roundpd_u128_fallback;
3505
3506FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dpps_u128, iemAImpl_dpps_u128_fallback;
3507FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dppd_u128, iemAImpl_dppd_u128_fallback;
3508
3509typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU64U128,(uint32_t *pfMxcsr, uint64_t *pu64Dst, PCX86XMMREG pSrc));
3510typedef FNIEMAIMPLMXCSRU64U128 *PFNIEMAIMPLMXCSRU64U128;
3511
3512FNIEMAIMPLMXCSRU64U128 iemAImpl_cvtpd2pi_u128;
3513FNIEMAIMPLMXCSRU64U128 iemAImpl_cvttpd2pi_u128;
3514
3515typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU128U64,(uint32_t *pfMxcsr, PX86XMMREG pDst, uint64_t u64Src));
3516typedef FNIEMAIMPLMXCSRU128U64 *PFNIEMAIMPLMXCSRU128U64;
3517
3518FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2ps_u128;
3519FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2pd_u128;
3520
3521typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU64U64,(uint32_t *pfMxcsr, uint64_t *pu64Dst, uint64_t u64Src));
3522typedef FNIEMAIMPLMXCSRU64U64 *PFNIEMAIMPLMXCSRU64U64;
3523
3524FNIEMAIMPLMXCSRU64U64 iemAImpl_cvtps2pi_u128;
3525FNIEMAIMPLMXCSRU64U64 iemAImpl_cvttps2pi_u128;
3526
3527/** @} */
3528
3529
3530/** @name Function tables.
3531 * @{
3532 */
3533
3534/**
3535 * Function table for a binary operator providing implementation based on
3536 * operand size.
3537 */
3538typedef struct IEMOPBINSIZES
3539{
3540 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
3541 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
3542 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
3543 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
3544} IEMOPBINSIZES;
3545/** Pointer to a binary operator function table. */
3546typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
3547
3548
3549/**
3550 * Function table for a unary operator providing implementation based on
3551 * operand size.
3552 */
3553typedef struct IEMOPUNARYSIZES
3554{
3555 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
3556 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
3557 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
3558 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
3559} IEMOPUNARYSIZES;
3560/** Pointer to a unary operator function table. */
3561typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
3562
3563
3564/**
3565 * Function table for a shift operator providing implementation based on
3566 * operand size.
3567 */
3568typedef struct IEMOPSHIFTSIZES
3569{
3570 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
3571 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
3572 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
3573 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
3574} IEMOPSHIFTSIZES;
3575/** Pointer to a shift operator function table. */
3576typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
3577
3578
3579/**
3580 * Function table for a multiplication or division operation.
3581 */
3582typedef struct IEMOPMULDIVSIZES
3583{
3584 PFNIEMAIMPLMULDIVU8 pfnU8;
3585 PFNIEMAIMPLMULDIVU16 pfnU16;
3586 PFNIEMAIMPLMULDIVU32 pfnU32;
3587 PFNIEMAIMPLMULDIVU64 pfnU64;
3588} IEMOPMULDIVSIZES;
3589/** Pointer to a multiplication or division operation function table. */
3590typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
3591
3592
3593/**
3594 * Function table for a double precision shift operator providing implementation
3595 * based on operand size.
3596 */
3597typedef struct IEMOPSHIFTDBLSIZES
3598{
3599 PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16;
3600 PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32;
3601 PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64;
3602} IEMOPSHIFTDBLSIZES;
3603/** Pointer to a double precision shift function table. */
3604typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
3605
3606
3607/**
3608 * Function table for media instruction taking two full sized media source
3609 * registers and one full sized destination register (AVX).
3610 */
3611typedef struct IEMOPMEDIAF3
3612{
3613 PFNIEMAIMPLMEDIAF3U128 pfnU128;
3614 PFNIEMAIMPLMEDIAF3U256 pfnU256;
3615} IEMOPMEDIAF3;
3616/** Pointer to a media operation function table for 3 full sized ops (AVX). */
3617typedef IEMOPMEDIAF3 const *PCIEMOPMEDIAF3;
3618
3619/** @def IEMOPMEDIAF3_INIT_VARS_EX
3620 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3621 * given functions as initializers. For use in AVX functions where a pair of
3622 * functions are only used once and the function table need not be public. */
3623#ifndef TST_IEM_CHECK_MC
3624# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3625# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3626 static IEMOPMEDIAF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3627 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3628# else
3629# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3630 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3631# endif
3632#else
3633# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3634#endif
3635/** @def IEMOPMEDIAF3_INIT_VARS
3636 * Generate AVX function tables for the @a a_InstrNm instruction.
3637 * @sa IEMOPMEDIAF3_INIT_VARS_EX */
3638#define IEMOPMEDIAF3_INIT_VARS(a_InstrNm) \
3639 IEMOPMEDIAF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3640 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3641
3642/**
3643 * Function table for media instruction taking two full sized media source
3644 * registers and one full sized destination register, but no additional state
3645 * (AVX).
3646 */
3647typedef struct IEMOPMEDIAOPTF3
3648{
3649 PFNIEMAIMPLMEDIAOPTF3U128 pfnU128;
3650 PFNIEMAIMPLMEDIAOPTF3U256 pfnU256;
3651} IEMOPMEDIAOPTF3;
3652/** Pointer to a media operation function table for 3 full sized ops (AVX). */
3653typedef IEMOPMEDIAOPTF3 const *PCIEMOPMEDIAOPTF3;
3654
3655/** @def IEMOPMEDIAOPTF3_INIT_VARS_EX
3656 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3657 * given functions as initializers. For use in AVX functions where a pair of
3658 * functions are only used once and the function table need not be public. */
3659#ifndef TST_IEM_CHECK_MC
3660# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3661# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3662 static IEMOPMEDIAOPTF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3663 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3664# else
3665# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3666 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3667# endif
3668#else
3669# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3670#endif
3671/** @def IEMOPMEDIAOPTF3_INIT_VARS
3672 * Generate AVX function tables for the @a a_InstrNm instruction.
3673 * @sa IEMOPMEDIAOPTF3_INIT_VARS_EX */
3674#define IEMOPMEDIAOPTF3_INIT_VARS(a_InstrNm) \
3675 IEMOPMEDIAOPTF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3676 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3677
3678/**
3679 * Function table for media instruction taking one full sized media source
3680 * registers and one full sized destination register, but no additional state
3681 * (AVX).
3682 */
3683typedef struct IEMOPMEDIAOPTF2
3684{
3685 PFNIEMAIMPLMEDIAOPTF2U128 pfnU128;
3686 PFNIEMAIMPLMEDIAOPTF2U256 pfnU256;
3687} IEMOPMEDIAOPTF2;
3688/** Pointer to a media operation function table for 2 full sized ops (AVX). */
3689typedef IEMOPMEDIAOPTF2 const *PCIEMOPMEDIAOPTF2;
3690
3691/** @def IEMOPMEDIAOPTF2_INIT_VARS_EX
3692 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3693 * given functions as initializers. For use in AVX functions where a pair of
3694 * functions are only used once and the function table need not be public. */
3695#ifndef TST_IEM_CHECK_MC
3696# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3697# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3698 static IEMOPMEDIAOPTF2 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3699 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3700# else
3701# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3702 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3703# endif
3704#else
3705# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3706#endif
3707/** @def IEMOPMEDIAOPTF2_INIT_VARS
3708 * Generate AVX function tables for the @a a_InstrNm instruction.
3709 * @sa IEMOPMEDIAOPTF2_INIT_VARS_EX */
3710#define IEMOPMEDIAOPTF2_INIT_VARS(a_InstrNm) \
3711 IEMOPMEDIAOPTF2_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3712 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3713
3714/**
3715 * Function table for media instruction taking two full sized media source
3716 * registers and one full sized destination register and an 8-bit immediate, but no additional state
3717 * (AVX).
3718 */
3719typedef struct IEMOPMEDIAOPTF3IMM8
3720{
3721 PFNIEMAIMPLMEDIAOPTF3U128IMM8 pfnU128;
3722 PFNIEMAIMPLMEDIAOPTF3U256IMM8 pfnU256;
3723} IEMOPMEDIAOPTF3IMM8;
3724/** Pointer to a media operation function table for 3 full sized ops (AVX). */
3725typedef IEMOPMEDIAOPTF3IMM8 const *PCIEMOPMEDIAOPTF3IMM8;
3726
3727/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX
3728 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3729 * given functions as initializers. For use in AVX functions where a pair of
3730 * functions are only used once and the function table need not be public. */
3731#ifndef TST_IEM_CHECK_MC
3732# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3733# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3734 static IEMOPMEDIAOPTF3IMM8 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3735 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3736# else
3737# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3738 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3739# endif
3740#else
3741# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3742#endif
3743/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS
3744 * Generate AVX function tables for the @a a_InstrNm instruction.
3745 * @sa IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX */
3746#define IEMOPMEDIAOPTF3IMM8_INIT_VARS(a_InstrNm) \
3747 IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3748 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3749/** @} */
3750
3751
3752/**
3753 * Function table for blend type instruction taking three full sized media source
3754 * registers and one full sized destination register, but no additional state
3755 * (AVX).
3756 */
3757typedef struct IEMOPBLENDOP
3758{
3759 PFNIEMAIMPLAVXBLENDU128 pfnU128;
3760 PFNIEMAIMPLAVXBLENDU256 pfnU256;
3761} IEMOPBLENDOP;
3762/** Pointer to a media operation function table for 4 full sized ops (AVX). */
3763typedef IEMOPBLENDOP const *PCIEMOPBLENDOP;
3764
3765/** @def IEMOPBLENDOP_INIT_VARS_EX
3766 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3767 * given functions as initializers. For use in AVX functions where a pair of
3768 * functions are only used once and the function table need not be public. */
3769#ifndef TST_IEM_CHECK_MC
3770# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3771# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3772 static IEMOPBLENDOP const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3773 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3774# else
3775# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3776 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3777# endif
3778#else
3779# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3780#endif
3781/** @def IEMOPBLENDOP_INIT_VARS
3782 * Generate AVX function tables for the @a a_InstrNm instruction.
3783 * @sa IEMOPBLENDOP_INIT_VARS_EX */
3784#define IEMOPBLENDOP_INIT_VARS(a_InstrNm) \
3785 IEMOPBLENDOP_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3786 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3787
3788
3789/** @name SSE/AVX single/double precision floating point operations.
3790 * @{ */
3791/**
3792 * A SSE result.
3793 */
3794typedef struct IEMSSERESULT
3795{
3796 /** The output value. */
3797 X86XMMREG uResult;
3798 /** The output status. */
3799 uint32_t MXCSR;
3800} IEMSSERESULT;
3801AssertCompileMemberOffset(IEMSSERESULT, MXCSR, 128 / 8);
3802/** Pointer to a SSE result. */
3803typedef IEMSSERESULT *PIEMSSERESULT;
3804/** Pointer to a const SSE result. */
3805typedef IEMSSERESULT const *PCIEMSSERESULT;
3806
3807
3808/**
3809 * A AVX128 result.
3810 */
3811typedef struct IEMAVX128RESULT
3812{
3813 /** The output value. */
3814 X86XMMREG uResult;
3815 /** The output status. */
3816 uint32_t MXCSR;
3817} IEMAVX128RESULT;
3818AssertCompileMemberOffset(IEMAVX128RESULT, MXCSR, 128 / 8);
3819/** Pointer to a AVX128 result. */
3820typedef IEMAVX128RESULT *PIEMAVX128RESULT;
3821/** Pointer to a const AVX128 result. */
3822typedef IEMAVX128RESULT const *PCIEMAVX128RESULT;
3823
3824
3825/**
3826 * A AVX256 result.
3827 */
3828typedef struct IEMAVX256RESULT
3829{
3830 /** The output value. */
3831 X86YMMREG uResult;
3832 /** The output status. */
3833 uint32_t MXCSR;
3834} IEMAVX256RESULT;
3835AssertCompileMemberOffset(IEMAVX256RESULT, MXCSR, 256 / 8);
3836/** Pointer to a AVX256 result. */
3837typedef IEMAVX256RESULT *PIEMAVX256RESULT;
3838/** Pointer to a const AVX256 result. */
3839typedef IEMAVX256RESULT const *PCIEMAVX256RESULT;
3840
3841
3842typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
3843typedef FNIEMAIMPLFPSSEF2U128 *PFNIEMAIMPLFPSSEF2U128;
3844typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128R32,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
3845typedef FNIEMAIMPLFPSSEF2U128R32 *PFNIEMAIMPLFPSSEF2U128R32;
3846typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128R64,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
3847typedef FNIEMAIMPLFPSSEF2U128R64 *PFNIEMAIMPLFPSSEF2U128R64;
3848
3849typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
3850typedef FNIEMAIMPLFPAVXF3U128 *PFNIEMAIMPLFPAVXF3U128;
3851typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128R32,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
3852typedef FNIEMAIMPLFPAVXF3U128R32 *PFNIEMAIMPLFPAVXF3U128R32;
3853typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128R64,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
3854typedef FNIEMAIMPLFPAVXF3U128R64 *PFNIEMAIMPLFPAVXF3U128R64;
3855
3856typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U256,(PX86XSAVEAREA pExtState, PIEMAVX256RESULT pResult, PCX86YMMREG puSrc1, PCX86YMMREG puSrc2));
3857typedef FNIEMAIMPLFPAVXF3U256 *PFNIEMAIMPLFPAVXF3U256;
3858
3859FNIEMAIMPLFPSSEF2U128 iemAImpl_addps_u128;
3860FNIEMAIMPLFPSSEF2U128 iemAImpl_addpd_u128;
3861FNIEMAIMPLFPSSEF2U128 iemAImpl_mulps_u128;
3862FNIEMAIMPLFPSSEF2U128 iemAImpl_mulpd_u128;
3863FNIEMAIMPLFPSSEF2U128 iemAImpl_subps_u128;
3864FNIEMAIMPLFPSSEF2U128 iemAImpl_subpd_u128;
3865FNIEMAIMPLFPSSEF2U128 iemAImpl_minps_u128;
3866FNIEMAIMPLFPSSEF2U128 iemAImpl_minpd_u128;
3867FNIEMAIMPLFPSSEF2U128 iemAImpl_divps_u128;
3868FNIEMAIMPLFPSSEF2U128 iemAImpl_divpd_u128;
3869FNIEMAIMPLFPSSEF2U128 iemAImpl_maxps_u128;
3870FNIEMAIMPLFPSSEF2U128 iemAImpl_maxpd_u128;
3871FNIEMAIMPLFPSSEF2U128 iemAImpl_haddps_u128;
3872FNIEMAIMPLFPSSEF2U128 iemAImpl_haddpd_u128;
3873FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubps_u128;
3874FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubpd_u128;
3875FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtps_u128;
3876FNIEMAIMPLFPSSEF2U128 iemAImpl_rsqrtps_u128;
3877FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtpd_u128;
3878FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubps_u128;
3879FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubpd_u128;
3880FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2ps_u128;
3881FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2pd_u128;
3882
3883FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2ps_u128;
3884FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2dq_u128;
3885FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttps2dq_u128;
3886FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttpd2dq_u128;
3887FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2pd_u128;
3888FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2dq_u128;
3889
3890FNIEMAIMPLFPSSEF2U128R32 iemAImpl_addss_u128_r32;
3891FNIEMAIMPLFPSSEF2U128R64 iemAImpl_addsd_u128_r64;
3892FNIEMAIMPLFPSSEF2U128R32 iemAImpl_mulss_u128_r32;
3893FNIEMAIMPLFPSSEF2U128R64 iemAImpl_mulsd_u128_r64;
3894FNIEMAIMPLFPSSEF2U128R32 iemAImpl_subss_u128_r32;
3895FNIEMAIMPLFPSSEF2U128R64 iemAImpl_subsd_u128_r64;
3896FNIEMAIMPLFPSSEF2U128R32 iemAImpl_minss_u128_r32;
3897FNIEMAIMPLFPSSEF2U128R64 iemAImpl_minsd_u128_r64;
3898FNIEMAIMPLFPSSEF2U128R32 iemAImpl_divss_u128_r32;
3899FNIEMAIMPLFPSSEF2U128R64 iemAImpl_divsd_u128_r64;
3900FNIEMAIMPLFPSSEF2U128R32 iemAImpl_maxss_u128_r32;
3901FNIEMAIMPLFPSSEF2U128R64 iemAImpl_maxsd_u128_r64;
3902FNIEMAIMPLFPSSEF2U128R32 iemAImpl_cvtss2sd_u128_r32;
3903FNIEMAIMPLFPSSEF2U128R64 iemAImpl_cvtsd2ss_u128_r64;
3904FNIEMAIMPLFPSSEF2U128R32 iemAImpl_sqrtss_u128_r32;
3905FNIEMAIMPLFPSSEF2U128R64 iemAImpl_sqrtsd_u128_r64;
3906FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rsqrtss_u128_r32;
3907
3908FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddps_u128, iemAImpl_vaddps_u128_fallback;
3909FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddpd_u128, iemAImpl_vaddpd_u128_fallback;
3910FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulps_u128, iemAImpl_vmulps_u128_fallback;
3911FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulpd_u128, iemAImpl_vmulpd_u128_fallback;
3912FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubps_u128, iemAImpl_vsubps_u128_fallback;
3913FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubpd_u128, iemAImpl_vsubpd_u128_fallback;
3914FNIEMAIMPLFPAVXF3U128 iemAImpl_vminps_u128, iemAImpl_vminps_u128_fallback;
3915FNIEMAIMPLFPAVXF3U128 iemAImpl_vminpd_u128, iemAImpl_vminpd_u128_fallback;
3916FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivps_u128, iemAImpl_vdivps_u128_fallback;
3917FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivpd_u128, iemAImpl_vdivpd_u128_fallback;
3918FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxps_u128, iemAImpl_vmaxps_u128_fallback;
3919FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxpd_u128, iemAImpl_vmaxpd_u128_fallback;
3920FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddps_u128, iemAImpl_vhaddps_u128_fallback;
3921FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddpd_u128, iemAImpl_vhaddpd_u128_fallback;
3922FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubps_u128, iemAImpl_vhsubps_u128_fallback;
3923FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubpd_u128, iemAImpl_vhsubpd_u128_fallback;
3924FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtps_u128, iemAImpl_vsqrtps_u128_fallback;
3925FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtpd_u128, iemAImpl_vsqrtpd_u128_fallback;
3926FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubps_u128, iemAImpl_vaddsubps_u128_fallback;
3927FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubpd_u128, iemAImpl_vaddsubpd_u128_fallback;
3928FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtpd2ps_u128, iemAImpl_vcvtpd2ps_u128_fallback;
3929FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtps2pd_u128, iemAImpl_vcvtps2pd_u128_fallback;
3930
3931FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vaddss_u128_r32, iemAImpl_vaddss_u128_r32_fallback;
3932FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vaddsd_u128_r64, iemAImpl_vaddsd_u128_r64_fallback;
3933FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmulss_u128_r32, iemAImpl_vmulss_u128_r32_fallback;
3934FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmulsd_u128_r64, iemAImpl_vmulsd_u128_r64_fallback;
3935FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsubss_u128_r32, iemAImpl_vsubss_u128_r32_fallback;
3936FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsubsd_u128_r64, iemAImpl_vsubsd_u128_r64_fallback;
3937FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vminss_u128_r32, iemAImpl_vminss_u128_r32_fallback;
3938FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vminsd_u128_r64, iemAImpl_vminsd_u128_r64_fallback;
3939FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vdivss_u128_r32, iemAImpl_vdivss_u128_r32_fallback;
3940FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vdivsd_u128_r64, iemAImpl_vdivsd_u128_r64_fallback;
3941FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmaxss_u128_r32, iemAImpl_vmaxss_u128_r32_fallback;
3942FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmaxsd_u128_r64, iemAImpl_vmaxsd_u128_r64_fallback;
3943FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsqrtss_u128_r32, iemAImpl_vsqrtss_u128_r32_fallback;
3944FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsqrtsd_u128_r64, iemAImpl_vsqrtsd_u128_r64_fallback;
3945
3946FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddps_u256, iemAImpl_vaddps_u256_fallback;
3947FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddpd_u256, iemAImpl_vaddpd_u256_fallback;
3948FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulps_u256, iemAImpl_vmulps_u256_fallback;
3949FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulpd_u256, iemAImpl_vmulpd_u256_fallback;
3950FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubps_u256, iemAImpl_vsubps_u256_fallback;
3951FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubpd_u256, iemAImpl_vsubpd_u256_fallback;
3952FNIEMAIMPLFPAVXF3U256 iemAImpl_vminps_u256, iemAImpl_vminps_u256_fallback;
3953FNIEMAIMPLFPAVXF3U256 iemAImpl_vminpd_u256, iemAImpl_vminpd_u256_fallback;
3954FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivps_u256, iemAImpl_vdivps_u256_fallback;
3955FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivpd_u256, iemAImpl_vdivpd_u256_fallback;
3956FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxps_u256, iemAImpl_vmaxps_u256_fallback;
3957FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxpd_u256, iemAImpl_vmaxpd_u256_fallback;
3958FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddps_u256, iemAImpl_vhaddps_u256_fallback;
3959FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddpd_u256, iemAImpl_vhaddpd_u256_fallback;
3960FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubps_u256, iemAImpl_vhsubps_u256_fallback;
3961FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubpd_u256, iemAImpl_vhsubpd_u256_fallback;
3962FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubps_u256, iemAImpl_vhaddsubps_u256_fallback;
3963FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubpd_u256, iemAImpl_vhaddsubpd_u256_fallback;
3964FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtpd2ps_u256, iemAImpl_vcvtpd2ps_u256_fallback;
3965FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtps2pd_u256, iemAImpl_vcvtps2pd_u256_fallback;
3966/** @} */
3967
3968/** @name C instruction implementations for anything slightly complicated.
3969 * @{ */
3970
3971/**
3972 * For typedef'ing or declaring a C instruction implementation function taking
3973 * no extra arguments.
3974 *
3975 * @param a_Name The name of the type.
3976 */
3977# define IEM_CIMPL_DECL_TYPE_0(a_Name) \
3978 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
3979/**
3980 * For defining a C instruction implementation function taking no extra
3981 * arguments.
3982 *
3983 * @param a_Name The name of the function
3984 */
3985# define IEM_CIMPL_DEF_0(a_Name) \
3986 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
3987/**
3988 * Prototype version of IEM_CIMPL_DEF_0.
3989 */
3990# define IEM_CIMPL_PROTO_0(a_Name) \
3991 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
3992/**
3993 * For calling a C instruction implementation function taking no extra
3994 * arguments.
3995 *
3996 * This special call macro adds default arguments to the call and allow us to
3997 * change these later.
3998 *
3999 * @param a_fn The name of the function.
4000 */
4001# define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr)
4002
4003/** Type for a C instruction implementation function taking no extra
4004 * arguments. */
4005typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
4006/** Function pointer type for a C instruction implementation function taking
4007 * no extra arguments. */
4008typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
4009
4010/**
4011 * For typedef'ing or declaring a C instruction implementation function taking
4012 * one extra argument.
4013 *
4014 * @param a_Name The name of the type.
4015 * @param a_Type0 The argument type.
4016 * @param a_Arg0 The argument name.
4017 */
4018# define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
4019 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4020/**
4021 * For defining a C instruction implementation function taking one extra
4022 * argument.
4023 *
4024 * @param a_Name The name of the function
4025 * @param a_Type0 The argument type.
4026 * @param a_Arg0 The argument name.
4027 */
4028# define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
4029 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4030/**
4031 * Prototype version of IEM_CIMPL_DEF_1.
4032 */
4033# define IEM_CIMPL_PROTO_1(a_Name, a_Type0, a_Arg0) \
4034 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4035/**
4036 * For calling a C instruction implementation function taking one extra
4037 * argument.
4038 *
4039 * This special call macro adds default arguments to the call and allow us to
4040 * change these later.
4041 *
4042 * @param a_fn The name of the function.
4043 * @param a0 The name of the 1st argument.
4044 */
4045# define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0))
4046
4047/**
4048 * For typedef'ing or declaring a C instruction implementation function taking
4049 * two extra arguments.
4050 *
4051 * @param a_Name The name of the type.
4052 * @param a_Type0 The type of the 1st argument
4053 * @param a_Arg0 The name of the 1st argument.
4054 * @param a_Type1 The type of the 2nd argument.
4055 * @param a_Arg1 The name of the 2nd argument.
4056 */
4057# define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4058 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4059/**
4060 * For defining a C instruction implementation function taking two extra
4061 * arguments.
4062 *
4063 * @param a_Name The name of the function.
4064 * @param a_Type0 The type of the 1st argument
4065 * @param a_Arg0 The name of the 1st argument.
4066 * @param a_Type1 The type of the 2nd argument.
4067 * @param a_Arg1 The name of the 2nd argument.
4068 */
4069# define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4070 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4071/**
4072 * Prototype version of IEM_CIMPL_DEF_2.
4073 */
4074# define IEM_CIMPL_PROTO_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4075 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4076/**
4077 * For calling a C instruction implementation function taking two extra
4078 * arguments.
4079 *
4080 * This special call macro adds default arguments to the call and allow us to
4081 * change these later.
4082 *
4083 * @param a_fn The name of the function.
4084 * @param a0 The name of the 1st argument.
4085 * @param a1 The name of the 2nd argument.
4086 */
4087# define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1))
4088
4089/**
4090 * For typedef'ing or declaring a C instruction implementation function taking
4091 * three extra arguments.
4092 *
4093 * @param a_Name The name of the type.
4094 * @param a_Type0 The type of the 1st argument
4095 * @param a_Arg0 The name of the 1st argument.
4096 * @param a_Type1 The type of the 2nd argument.
4097 * @param a_Arg1 The name of the 2nd argument.
4098 * @param a_Type2 The type of the 3rd argument.
4099 * @param a_Arg2 The name of the 3rd argument.
4100 */
4101# define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4102 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4103/**
4104 * For defining a C instruction implementation function taking three extra
4105 * arguments.
4106 *
4107 * @param a_Name The name of the function.
4108 * @param a_Type0 The type of the 1st argument
4109 * @param a_Arg0 The name of the 1st argument.
4110 * @param a_Type1 The type of the 2nd argument.
4111 * @param a_Arg1 The name of the 2nd argument.
4112 * @param a_Type2 The type of the 3rd argument.
4113 * @param a_Arg2 The name of the 3rd argument.
4114 */
4115# define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4116 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4117/**
4118 * Prototype version of IEM_CIMPL_DEF_3.
4119 */
4120# define IEM_CIMPL_PROTO_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4121 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4122/**
4123 * For calling a C instruction implementation function taking three extra
4124 * arguments.
4125 *
4126 * This special call macro adds default arguments to the call and allow us to
4127 * change these later.
4128 *
4129 * @param a_fn The name of the function.
4130 * @param a0 The name of the 1st argument.
4131 * @param a1 The name of the 2nd argument.
4132 * @param a2 The name of the 3rd argument.
4133 */
4134# define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
4135
4136
4137/**
4138 * For typedef'ing or declaring a C instruction implementation function taking
4139 * four extra arguments.
4140 *
4141 * @param a_Name The name of the type.
4142 * @param a_Type0 The type of the 1st argument
4143 * @param a_Arg0 The name of the 1st argument.
4144 * @param a_Type1 The type of the 2nd argument.
4145 * @param a_Arg1 The name of the 2nd argument.
4146 * @param a_Type2 The type of the 3rd argument.
4147 * @param a_Arg2 The name of the 3rd argument.
4148 * @param a_Type3 The type of the 4th argument.
4149 * @param a_Arg3 The name of the 4th argument.
4150 */
4151# define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4152 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
4153/**
4154 * For defining a C instruction implementation function taking four extra
4155 * arguments.
4156 *
4157 * @param a_Name The name of the function.
4158 * @param a_Type0 The type of the 1st argument
4159 * @param a_Arg0 The name of the 1st argument.
4160 * @param a_Type1 The type of the 2nd argument.
4161 * @param a_Arg1 The name of the 2nd argument.
4162 * @param a_Type2 The type of the 3rd argument.
4163 * @param a_Arg2 The name of the 3rd argument.
4164 * @param a_Type3 The type of the 4th argument.
4165 * @param a_Arg3 The name of the 4th argument.
4166 */
4167# define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4168 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4169 a_Type2 a_Arg2, a_Type3 a_Arg3))
4170/**
4171 * Prototype version of IEM_CIMPL_DEF_4.
4172 */
4173# define IEM_CIMPL_PROTO_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4174 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4175 a_Type2 a_Arg2, a_Type3 a_Arg3))
4176/**
4177 * For calling a C instruction implementation function taking four extra
4178 * arguments.
4179 *
4180 * This special call macro adds default arguments to the call and allow us to
4181 * change these later.
4182 *
4183 * @param a_fn The name of the function.
4184 * @param a0 The name of the 1st argument.
4185 * @param a1 The name of the 2nd argument.
4186 * @param a2 The name of the 3rd argument.
4187 * @param a3 The name of the 4th argument.
4188 */
4189# define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
4190
4191
4192/**
4193 * For typedef'ing or declaring a C instruction implementation function taking
4194 * five extra arguments.
4195 *
4196 * @param a_Name The name of the type.
4197 * @param a_Type0 The type of the 1st argument
4198 * @param a_Arg0 The name of the 1st argument.
4199 * @param a_Type1 The type of the 2nd argument.
4200 * @param a_Arg1 The name of the 2nd argument.
4201 * @param a_Type2 The type of the 3rd argument.
4202 * @param a_Arg2 The name of the 3rd argument.
4203 * @param a_Type3 The type of the 4th argument.
4204 * @param a_Arg3 The name of the 4th argument.
4205 * @param a_Type4 The type of the 5th argument.
4206 * @param a_Arg4 The name of the 5th argument.
4207 */
4208# define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4209 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, \
4210 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
4211 a_Type3 a_Arg3, a_Type4 a_Arg4))
4212/**
4213 * For defining a C instruction implementation function taking five extra
4214 * arguments.
4215 *
4216 * @param a_Name The name of the function.
4217 * @param a_Type0 The type of the 1st argument
4218 * @param a_Arg0 The name of the 1st argument.
4219 * @param a_Type1 The type of the 2nd argument.
4220 * @param a_Arg1 The name of the 2nd argument.
4221 * @param a_Type2 The type of the 3rd argument.
4222 * @param a_Arg2 The name of the 3rd argument.
4223 * @param a_Type3 The type of the 4th argument.
4224 * @param a_Arg3 The name of the 4th argument.
4225 * @param a_Type4 The type of the 5th argument.
4226 * @param a_Arg4 The name of the 5th argument.
4227 */
4228# define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4229 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4230 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4231/**
4232 * Prototype version of IEM_CIMPL_DEF_5.
4233 */
4234# define IEM_CIMPL_PROTO_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4235 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4236 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4237/**
4238 * For calling a C instruction implementation function taking five extra
4239 * arguments.
4240 *
4241 * This special call macro adds default arguments to the call and allow us to
4242 * change these later.
4243 *
4244 * @param a_fn The name of the function.
4245 * @param a0 The name of the 1st argument.
4246 * @param a1 The name of the 2nd argument.
4247 * @param a2 The name of the 3rd argument.
4248 * @param a3 The name of the 4th argument.
4249 * @param a4 The name of the 5th argument.
4250 */
4251# define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
4252
4253/** @} */
4254
4255
4256/** @name Opcode Decoder Function Types.
4257 * @{ */
4258
4259/** @typedef PFNIEMOP
4260 * Pointer to an opcode decoder function.
4261 */
4262
4263/** @def FNIEMOP_DEF
4264 * Define an opcode decoder function.
4265 *
4266 * We're using macors for this so that adding and removing parameters as well as
4267 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
4268 *
4269 * @param a_Name The function name.
4270 */
4271
4272/** @typedef PFNIEMOPRM
4273 * Pointer to an opcode decoder function with RM byte.
4274 */
4275
4276/** @def FNIEMOPRM_DEF
4277 * Define an opcode decoder function with RM byte.
4278 *
4279 * We're using macors for this so that adding and removing parameters as well as
4280 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
4281 *
4282 * @param a_Name The function name.
4283 */
4284
4285#if defined(__GNUC__) && defined(RT_ARCH_X86)
4286typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
4287typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4288# define FNIEMOP_DEF(a_Name) \
4289 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
4290# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4291 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4292# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4293 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4294
4295#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
4296typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
4297typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4298# define FNIEMOP_DEF(a_Name) \
4299 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4300# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4301 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4302# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4303 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4304
4305#elif defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
4306typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4307typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4308# define FNIEMOP_DEF(a_Name) \
4309 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
4310# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4311 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4312# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4313 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4314
4315#else
4316typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4317typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4318# define FNIEMOP_DEF(a_Name) \
4319 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4320# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4321 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4322# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4323 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4324
4325#endif
4326#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
4327
4328/**
4329 * Call an opcode decoder function.
4330 *
4331 * We're using macors for this so that adding and removing parameters can be
4332 * done as we please. See FNIEMOP_DEF.
4333 */
4334#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
4335
4336/**
4337 * Call a common opcode decoder function taking one extra argument.
4338 *
4339 * We're using macors for this so that adding and removing parameters can be
4340 * done as we please. See FNIEMOP_DEF_1.
4341 */
4342#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
4343
4344/**
4345 * Call a common opcode decoder function taking one extra argument.
4346 *
4347 * We're using macors for this so that adding and removing parameters can be
4348 * done as we please. See FNIEMOP_DEF_1.
4349 */
4350#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
4351/** @} */
4352
4353
4354/** @name Misc Helpers
4355 * @{ */
4356
4357/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
4358 * due to GCC lacking knowledge about the value range of a switch. */
4359#if RT_CPLUSPLUS_PREREQ(202000)
4360# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: [[unlikely]] AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4361#else
4362# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4363#endif
4364
4365/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
4366#if RT_CPLUSPLUS_PREREQ(202000)
4367# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: [[unlikely]] AssertFailedReturn(a_RetValue)
4368#else
4369# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
4370#endif
4371
4372/**
4373 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4374 * occation.
4375 */
4376#ifdef LOG_ENABLED
4377# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4378 do { \
4379 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
4380 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4381 } while (0)
4382#else
4383# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4384 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4385#endif
4386
4387/**
4388 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4389 * occation using the supplied logger statement.
4390 *
4391 * @param a_LoggerArgs What to log on failure.
4392 */
4393#ifdef LOG_ENABLED
4394# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4395 do { \
4396 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
4397 /*LogFunc(a_LoggerArgs);*/ \
4398 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4399 } while (0)
4400#else
4401# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4402 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4403#endif
4404
4405/**
4406 * Gets the CPU mode (from fExec) as a IEMMODE value.
4407 *
4408 * @returns IEMMODE
4409 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4410 */
4411#define IEM_GET_CPU_MODE(a_pVCpu) ((a_pVCpu)->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK)
4412
4413/**
4414 * Check if we're currently executing in real or virtual 8086 mode.
4415 *
4416 * @returns @c true if it is, @c false if not.
4417 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4418 */
4419#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (( ((a_pVCpu)->iem.s.fExec ^ IEM_F_MODE_X86_PROT_MASK) \
4420 & (IEM_F_MODE_X86_V86_MASK | IEM_F_MODE_X86_PROT_MASK)) != 0)
4421
4422/**
4423 * Check if we're currently executing in virtual 8086 mode.
4424 *
4425 * @returns @c true if it is, @c false if not.
4426 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4427 */
4428#define IEM_IS_V86_MODE(a_pVCpu) (((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_V86_MASK) != 0)
4429
4430/**
4431 * Check if we're currently executing in long mode.
4432 *
4433 * @returns @c true if it is, @c false if not.
4434 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4435 */
4436#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
4437
4438/**
4439 * Check if we're currently executing in a 16-bit code segment.
4440 *
4441 * @returns @c true if it is, @c false if not.
4442 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4443 */
4444#define IEM_IS_16BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_16BIT)
4445
4446/**
4447 * Check if we're currently executing in a 32-bit code segment.
4448 *
4449 * @returns @c true if it is, @c false if not.
4450 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4451 */
4452#define IEM_IS_32BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_32BIT)
4453
4454/**
4455 * Check if we're currently executing in a 64-bit code segment.
4456 *
4457 * @returns @c true if it is, @c false if not.
4458 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4459 */
4460#define IEM_IS_64BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_64BIT)
4461
4462/**
4463 * Check if we're currently executing in real mode.
4464 *
4465 * @returns @c true if it is, @c false if not.
4466 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4467 */
4468#define IEM_IS_REAL_MODE(a_pVCpu) (!((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_PROT_MASK))
4469
4470/**
4471 * Gets the current protection level (CPL).
4472 *
4473 * @returns 0..3
4474 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4475 */
4476#define IEM_GET_CPL(a_pVCpu) (((a_pVCpu)->iem.s.fExec >> IEM_F_X86_CPL_SHIFT) & IEM_F_X86_CPL_SMASK)
4477
4478/**
4479 * Sets the current protection level (CPL).
4480 *
4481 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4482 */
4483#define IEM_SET_CPL(a_pVCpu, a_uCpl) \
4484 do { (a_pVCpu)->iem.s.fExec = ((a_pVCpu)->iem.s.fExec & ~IEM_F_X86_CPL_MASK) | ((a_uCpl) << IEM_F_X86_CPL_SHIFT); } while (0)
4485
4486/**
4487 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
4488 * @returns PCCPUMFEATURES
4489 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4490 */
4491#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
4492
4493/**
4494 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
4495 * @returns PCCPUMFEATURES
4496 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4497 */
4498#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&g_CpumHostFeatures.s)
4499
4500/**
4501 * Evaluates to true if we're presenting an Intel CPU to the guest.
4502 */
4503#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
4504
4505/**
4506 * Evaluates to true if we're presenting an AMD CPU to the guest.
4507 */
4508#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
4509
4510/**
4511 * Check if the address is canonical.
4512 */
4513#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
4514
4515/** Checks if the ModR/M byte is in register mode or not. */
4516#define IEM_IS_MODRM_REG_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT) )
4517/** Checks if the ModR/M byte is in memory mode or not. */
4518#define IEM_IS_MODRM_MEM_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT) )
4519
4520/**
4521 * Gets the register (reg) part of a ModR/M encoding, with REX.R added in.
4522 *
4523 * For use during decoding.
4524 */
4525#define IEM_GET_MODRM_REG(a_pVCpu, a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | (a_pVCpu)->iem.s.uRexReg )
4526/**
4527 * Gets the r/m part of a ModR/M encoding as a register index, with REX.B added in.
4528 *
4529 * For use during decoding.
4530 */
4531#define IEM_GET_MODRM_RM(a_pVCpu, a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) | (a_pVCpu)->iem.s.uRexB )
4532
4533/**
4534 * Gets the register (reg) part of a ModR/M encoding, without REX.R.
4535 *
4536 * For use during decoding.
4537 */
4538#define IEM_GET_MODRM_REG_8(a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) )
4539/**
4540 * Gets the r/m part of a ModR/M encoding as a register index, without REX.B.
4541 *
4542 * For use during decoding.
4543 */
4544#define IEM_GET_MODRM_RM_8(a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) )
4545
4546/**
4547 * Gets the register (reg) part of a ModR/M encoding as an extended 8-bit
4548 * register index, with REX.R added in.
4549 *
4550 * For use during decoding.
4551 *
4552 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
4553 */
4554#define IEM_GET_MODRM_REG_EX8(a_pVCpu, a_bRm) \
4555 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
4556 || !((a_bRm) & (4 << X86_MODRM_REG_SHIFT)) /* IEM_GET_MODRM_REG(pVCpu, a_bRm) < 4 */ \
4557 ? IEM_GET_MODRM_REG(pVCpu, a_bRm) : (((a_bRm) >> X86_MODRM_REG_SHIFT) & 3) | 16)
4558/**
4559 * Gets the r/m part of a ModR/M encoding as an extended 8-bit register index,
4560 * with REX.B added in.
4561 *
4562 * For use during decoding.
4563 *
4564 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
4565 */
4566#define IEM_GET_MODRM_RM_EX8(a_pVCpu, a_bRm) \
4567 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
4568 || !((a_bRm) & 4) /* IEM_GET_MODRM_RM(pVCpu, a_bRm) < 4 */ \
4569 ? IEM_GET_MODRM_RM(pVCpu, a_bRm) : ((a_bRm) & 3) | 16)
4570
4571/**
4572 * Combines the prefix REX and ModR/M byte for passing to
4573 * iemOpHlpCalcRmEffAddrThreadedAddr64().
4574 *
4575 * @returns The ModRM byte but with bit 3 set to REX.B and bit 4 to REX.X.
4576 * The two bits are part of the REG sub-field, which isn't needed in
4577 * iemOpHlpCalcRmEffAddrThreadedAddr64().
4578 *
4579 * For use during decoding/recompiling.
4580 */
4581#define IEM_GET_MODRM_EX(a_pVCpu, a_bRm) \
4582 ( ((a_bRm) & ~X86_MODRM_REG_MASK) \
4583 | (uint8_t)( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X)) >> (26 - 3) ) )
4584AssertCompile(IEM_OP_PRF_REX_B == RT_BIT_32(26));
4585AssertCompile(IEM_OP_PRF_REX_X == RT_BIT_32(27));
4586
4587/**
4588 * Gets the effective VEX.VVVV value.
4589 *
4590 * The 4th bit is ignored if not 64-bit code.
4591 * @returns effective V-register value.
4592 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4593 */
4594#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
4595 (IEM_IS_64BIT_CODE(a_pVCpu) ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
4596
4597
4598/**
4599 * Checks if we're executing inside an AMD-V or VT-x guest.
4600 */
4601#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
4602# define IEM_IS_IN_GUEST(a_pVCpu) RT_BOOL((a_pVCpu)->iem.s.fExec & IEM_F_X86_CTX_IN_GUEST)
4603#else
4604# define IEM_IS_IN_GUEST(a_pVCpu) false
4605#endif
4606
4607
4608#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4609
4610/**
4611 * Check if the guest has entered VMX root operation.
4612 */
4613# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
4614
4615/**
4616 * Check if the guest has entered VMX non-root operation.
4617 */
4618# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) ( ((a_pVCpu)->iem.s.fExec & (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST)) \
4619 == (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST) )
4620
4621/**
4622 * Check if the nested-guest has the given Pin-based VM-execution control set.
4623 */
4624# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
4625
4626/**
4627 * Check if the nested-guest has the given Processor-based VM-execution control set.
4628 */
4629# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
4630
4631/**
4632 * Check if the nested-guest has the given Secondary Processor-based VM-execution
4633 * control set.
4634 */
4635# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
4636
4637/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
4638# define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
4639
4640/** Whether a shadow VMCS is present for the given VCPU. */
4641# define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
4642
4643/** Gets the VMXON region pointer. */
4644# define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
4645
4646/** Gets the guest-physical address of the current VMCS for the given VCPU. */
4647# define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
4648
4649/** Whether a current VMCS is present for the given VCPU. */
4650# define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
4651
4652/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
4653# define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
4654 do \
4655 { \
4656 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
4657 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
4658 } while (0)
4659
4660/** Clears any current VMCS for the given VCPU. */
4661# define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
4662 do \
4663 { \
4664 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
4665 } while (0)
4666
4667/**
4668 * Invokes the VMX VM-exit handler for an instruction intercept.
4669 */
4670# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
4671 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
4672
4673/**
4674 * Invokes the VMX VM-exit handler for an instruction intercept where the
4675 * instruction provides additional VM-exit information.
4676 */
4677# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
4678 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
4679
4680/**
4681 * Invokes the VMX VM-exit handler for a task switch.
4682 */
4683# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
4684 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
4685
4686/**
4687 * Invokes the VMX VM-exit handler for MWAIT.
4688 */
4689# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
4690 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
4691
4692/**
4693 * Invokes the VMX VM-exit handler for EPT faults.
4694 */
4695# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
4696 do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
4697
4698/**
4699 * Invokes the VMX VM-exit handler.
4700 */
4701# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
4702 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
4703
4704#else
4705# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
4706# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
4707# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
4708# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
4709# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
4710# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4711# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4712# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4713# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4714# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4715# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
4716
4717#endif
4718
4719#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4720/**
4721 * Checks if we're executing a guest using AMD-V.
4722 */
4723# define IEM_SVM_IS_IN_GUEST(a_pVCpu) ( (a_pVCpu->iem.s.fExec & (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST)) \
4724 == (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST))
4725/**
4726 * Check if an SVM control/instruction intercept is set.
4727 */
4728# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
4729 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
4730
4731/**
4732 * Check if an SVM read CRx intercept is set.
4733 */
4734# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
4735 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
4736
4737/**
4738 * Check if an SVM write CRx intercept is set.
4739 */
4740# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
4741 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
4742
4743/**
4744 * Check if an SVM read DRx intercept is set.
4745 */
4746# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
4747 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
4748
4749/**
4750 * Check if an SVM write DRx intercept is set.
4751 */
4752# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
4753 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
4754
4755/**
4756 * Check if an SVM exception intercept is set.
4757 */
4758# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
4759 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
4760
4761/**
4762 * Invokes the SVM \#VMEXIT handler for the nested-guest.
4763 */
4764# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
4765 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
4766
4767/**
4768 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
4769 * corresponding decode assist information.
4770 */
4771# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
4772 do \
4773 { \
4774 uint64_t uExitInfo1; \
4775 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
4776 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
4777 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
4778 else \
4779 uExitInfo1 = 0; \
4780 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
4781 } while (0)
4782
4783/** Check and handles SVM nested-guest instruction intercept and updates
4784 * NRIP if needed.
4785 */
4786# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
4787 do \
4788 { \
4789 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
4790 { \
4791 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
4792 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
4793 } \
4794 } while (0)
4795
4796/** Checks and handles SVM nested-guest CR0 read intercept. */
4797# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
4798 do \
4799 { \
4800 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
4801 { /* probably likely */ } \
4802 else \
4803 { \
4804 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
4805 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
4806 } \
4807 } while (0)
4808
4809/**
4810 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
4811 */
4812# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) \
4813 do { \
4814 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
4815 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_cbInstr)); \
4816 } while (0)
4817
4818#else
4819# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
4820# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
4821# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
4822# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
4823# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
4824# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
4825# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
4826# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
4827# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, \
4828 a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
4829# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
4830# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) do { } while (0)
4831
4832#endif
4833
4834/** @} */
4835
4836uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu);
4837VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu);
4838
4839
4840/**
4841 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
4842 */
4843typedef union IEMSELDESC
4844{
4845 /** The legacy view. */
4846 X86DESC Legacy;
4847 /** The long mode view. */
4848 X86DESC64 Long;
4849} IEMSELDESC;
4850/** Pointer to a selector descriptor table entry. */
4851typedef IEMSELDESC *PIEMSELDESC;
4852
4853/** @name Raising Exceptions.
4854 * @{ */
4855VBOXSTRICTRC iemTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, uint32_t uNextEip, uint32_t fFlags,
4856 uint16_t uErr, uint64_t uCr2, RTSEL SelTSS, PIEMSELDESC pNewDescTSS) RT_NOEXCEPT;
4857
4858VBOXSTRICTRC iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
4859 uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
4860#ifdef IEM_WITH_SETJMP
4861DECL_NO_RETURN(void) iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
4862 uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
4863#endif
4864VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
4865VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
4866VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT;
4867VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
4868VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT;
4869VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4870VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT;
4871VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
4872VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
4873/*VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;*/
4874VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4875VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
4876VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
4877VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4878VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4879VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
4880#ifdef IEM_WITH_SETJMP
4881DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
4882#endif
4883VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
4884VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT;
4885VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
4886#ifdef IEM_WITH_SETJMP
4887DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
4888#endif
4889VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
4890#ifdef IEM_WITH_SETJMP
4891DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP;
4892#endif
4893VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
4894#ifdef IEM_WITH_SETJMP
4895DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
4896#endif
4897VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
4898#ifdef IEM_WITH_SETJMP
4899DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
4900#endif
4901VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
4902VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
4903#ifdef IEM_WITH_SETJMP
4904DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
4905#endif
4906VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
4907
4908void iemLogSyscallProtModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
4909
4910IEM_CIMPL_DEF_0(iemCImplRaiseDivideError);
4911IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix);
4912IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode);
4913
4914/**
4915 * Macro for calling iemCImplRaiseDivideError().
4916 *
4917 * This is for things that will _always_ decode to an \#DE, taking the
4918 * recompiler into consideration and everything.
4919 *
4920 * @return Strict VBox status code.
4921 */
4922#define IEMOP_RAISE_DIVIDE_ERROR_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseDivideError)
4923
4924/**
4925 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4926 *
4927 * This is for things that will _always_ decode to an \#UD, taking the
4928 * recompiler into consideration and everything.
4929 *
4930 * @return Strict VBox status code.
4931 */
4932#define IEMOP_RAISE_INVALID_LOCK_PREFIX_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidLockPrefix)
4933
4934/**
4935 * Macro for calling iemCImplRaiseInvalidOpcode() for decode/static \#UDs.
4936 *
4937 * This is for things that will _always_ decode to an \#UD, taking the
4938 * recompiler into consideration and everything.
4939 *
4940 * @return Strict VBox status code.
4941 */
4942#define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidOpcode)
4943
4944/**
4945 * Macro for calling iemCImplRaiseInvalidOpcode() for runtime-style \#UDs.
4946 *
4947 * Using this macro means you've got _buggy_ _code_ and are doing things that
4948 * belongs exclusively in IEMAllCImpl.cpp during decoding.
4949 *
4950 * @return Strict VBox status code.
4951 * @see IEMOP_RAISE_INVALID_OPCODE_RET
4952 */
4953#define IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidOpcode)
4954
4955/** @} */
4956
4957/** @name Register Access.
4958 * @{ */
4959VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4960 IEMMODE enmEffOpSize) RT_NOEXCEPT;
4961VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT;
4962VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4963 IEMMODE enmEffOpSize) RT_NOEXCEPT;
4964VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewRip) RT_NOEXCEPT;
4965VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewRip) RT_NOEXCEPT;
4966VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT;
4967/** @} */
4968
4969/** @name FPU access and helpers.
4970 * @{ */
4971void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
4972void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4973void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
4974void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
4975void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
4976void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4977 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4978void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4979 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4980void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
4981void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
4982void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
4983void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4984void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
4985void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4986void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
4987void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4988void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
4989void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4990void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
4991void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
4992void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
4993void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
4994void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4995/** @} */
4996
4997/** @name SSE+AVX SIMD access and helpers.
4998 * @{ */
4999void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT;
5000void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT;
5001/** @} */
5002
5003/** @name Memory access.
5004 * @{ */
5005
5006/** Report a \#GP instead of \#AC and do not restrict to ring-3 */
5007#define IEM_MEMMAP_F_ALIGN_GP RT_BIT_32(16)
5008/** SSE access that should report a \#GP instead of \#AC, unless MXCSR.MM=1
5009 * when it works like normal \#AC. Always used with IEM_MEMMAP_F_ALIGN_GP. */
5010#define IEM_MEMMAP_F_ALIGN_SSE RT_BIT_32(17)
5011/** If \#AC is applicable, raise it. Always used with IEM_MEMMAP_F_ALIGN_GP.
5012 * Users include FXSAVE & FXRSTOR. */
5013#define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18)
5014
5015VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5016 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
5017VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
5018#ifndef IN_RING3
5019VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
5020#endif
5021void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
5022VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
5023VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5024VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT;
5025
5026void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr);
5027void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr);
5028#ifdef IEM_WITH_CODE_TLB
5029void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP;
5030#else
5031VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT;
5032#endif
5033#ifdef IEM_WITH_SETJMP
5034uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5035uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5036uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5037uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5038#else
5039VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT;
5040VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5041VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5042VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5043VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5044VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5045VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5046VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5047VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5048VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5049VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5050#endif
5051
5052VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5053VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5054VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5055VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5056VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5057VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5058VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5059VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5060VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5061VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5062VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5063VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5064VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
5065 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT;
5066#ifdef IEM_WITH_SETJMP
5067uint8_t iemMemFetchDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5068uint16_t iemMemFetchDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5069uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5070uint32_t iemMemFlatFetchDataU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5071uint64_t iemMemFetchDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5072uint64_t iemMemFetchDataU64AlignedU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5073void iemMemFetchDataR80SafeJmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5074void iemMemFetchDataD80SafeJmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5075void iemMemFetchDataU128SafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5076void iemMemFetchDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5077void iemMemFetchDataU256SafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5078void iemMemFetchDataU256AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5079# if 0 /* these are inlined now */
5080uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5081uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5082uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5083uint32_t iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5084uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5085uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5086# endif
5087void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5088void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5089void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5090void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5091void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5092void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5093#endif
5094
5095VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5096VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5097VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5098VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5099VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT;
5100
5101VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT;
5102VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT;
5103VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT;
5104VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT;
5105VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5106VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5107VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5108VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5109VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5110#ifdef IEM_WITH_SETJMP
5111void iemMemStoreDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5112void iemMemStoreDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5113void iemMemStoreDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5114void iemMemStoreDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5115void iemMemStoreDataU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5116void iemMemStoreDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5117void iemMemStoreDataU256SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5118void iemMemStoreDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5119#if 0
5120void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5121void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5122void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5123void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5124#endif
5125void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5126void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5127void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5128void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5129#endif
5130
5131#ifdef IEM_WITH_SETJMP
5132uint8_t *iemMemMapDataU8RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5133uint8_t *iemMemMapDataU8WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5134uint8_t const *iemMemMapDataU8RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5135uint16_t *iemMemMapDataU16RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5136uint16_t *iemMemMapDataU16WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5137uint16_t const *iemMemMapDataU16RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5138uint32_t *iemMemMapDataU32RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5139uint32_t *iemMemMapDataU32WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5140uint32_t const *iemMemMapDataU32RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5141uint64_t *iemMemMapDataU64RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5142uint64_t *iemMemMapDataU64WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5143uint64_t const *iemMemMapDataU64RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5144
5145void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5146void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5147void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5148#endif
5149
5150VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5151 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
5152VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT;
5153VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
5154VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5155VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT;
5156VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5157VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5158VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5159VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5160VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5161 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
5162VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
5163 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT;
5164VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT;
5165VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT;
5166VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT;
5167VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT;
5168VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5169VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5170VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5171
5172#ifdef IEM_WITH_SETJMP
5173void iemMemStackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5174void iemMemStackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5175void iemMemStackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5176void iemMemStackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5177uint16_t iemMemStackPopU16SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5178uint32_t iemMemStackPopU32SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5179uint64_t iemMemStackPopU64SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5180
5181void iemMemFlat32StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5182void iemMemFlat32StackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5183void iemMemFlat32StackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5184uint16_t iemMemFlat32StackPopU16SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5185uint32_t iemMemFlat32StackPopU32SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5186
5187void iemMemFlat64StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5188void iemMemFlat64StackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5189uint16_t iemMemFlat64StackPopU16SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5190uint64_t iemMemFlat64StackPopU64SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5191#endif
5192
5193/** @} */
5194
5195/** @name IEMAllCImpl.cpp
5196 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/'
5197 * @{ */
5198IEM_CIMPL_PROTO_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5199IEM_CIMPL_PROTO_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5200IEM_CIMPL_PROTO_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5201IEM_CIMPL_PROTO_0(iemCImpl_popa_16);
5202IEM_CIMPL_PROTO_0(iemCImpl_popa_32);
5203IEM_CIMPL_PROTO_0(iemCImpl_pusha_16);
5204IEM_CIMPL_PROTO_0(iemCImpl_pusha_32);
5205IEM_CIMPL_PROTO_1(iemCImpl_pushf, IEMMODE, enmEffOpSize);
5206IEM_CIMPL_PROTO_1(iemCImpl_popf, IEMMODE, enmEffOpSize);
5207IEM_CIMPL_PROTO_1(iemCImpl_call_16, uint16_t, uNewPC);
5208IEM_CIMPL_PROTO_1(iemCImpl_call_rel_16, int16_t, offDisp);
5209IEM_CIMPL_PROTO_1(iemCImpl_call_32, uint32_t, uNewPC);
5210IEM_CIMPL_PROTO_1(iemCImpl_call_rel_32, int32_t, offDisp);
5211IEM_CIMPL_PROTO_1(iemCImpl_call_64, uint64_t, uNewPC);
5212IEM_CIMPL_PROTO_1(iemCImpl_call_rel_64, int64_t, offDisp);
5213IEM_CIMPL_PROTO_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5214IEM_CIMPL_PROTO_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5215typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5216typedef FNIEMCIMPLFARBRANCH *PFNIEMCIMPLFARBRANCH;
5217IEM_CIMPL_PROTO_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop);
5218IEM_CIMPL_PROTO_0(iemCImpl_retn_16);
5219IEM_CIMPL_PROTO_0(iemCImpl_retn_32);
5220IEM_CIMPL_PROTO_0(iemCImpl_retn_64);
5221IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_16, uint16_t, cbPop);
5222IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_32, uint16_t, cbPop);
5223IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_64, uint16_t, cbPop);
5224IEM_CIMPL_PROTO_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters);
5225IEM_CIMPL_PROTO_1(iemCImpl_leave, IEMMODE, enmEffOpSize);
5226IEM_CIMPL_PROTO_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt);
5227IEM_CIMPL_PROTO_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize);
5228IEM_CIMPL_PROTO_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp);
5229IEM_CIMPL_PROTO_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize);
5230IEM_CIMPL_PROTO_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize);
5231IEM_CIMPL_PROTO_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize);
5232IEM_CIMPL_PROTO_1(iemCImpl_iret, IEMMODE, enmEffOpSize);
5233IEM_CIMPL_PROTO_0(iemCImpl_loadall286);
5234IEM_CIMPL_PROTO_0(iemCImpl_syscall);
5235IEM_CIMPL_PROTO_1(iemCImpl_sysret, IEMMODE, enmEffOpSize);
5236IEM_CIMPL_PROTO_0(iemCImpl_sysenter);
5237IEM_CIMPL_PROTO_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize);
5238IEM_CIMPL_PROTO_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel);
5239IEM_CIMPL_PROTO_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel);
5240IEM_CIMPL_PROTO_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize);
5241IEM_CIMPL_PROTO_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize);
5242IEM_CIMPL_PROTO_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite);
5243IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar);
5244IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar);
5245IEM_CIMPL_PROTO_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5246IEM_CIMPL_PROTO_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5247IEM_CIMPL_PROTO_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5248IEM_CIMPL_PROTO_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5249IEM_CIMPL_PROTO_1(iemCImpl_lldt, uint16_t, uNewLdt);
5250IEM_CIMPL_PROTO_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5251IEM_CIMPL_PROTO_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5252IEM_CIMPL_PROTO_1(iemCImpl_ltr, uint16_t, uNewTr);
5253IEM_CIMPL_PROTO_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5254IEM_CIMPL_PROTO_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5255IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg);
5256IEM_CIMPL_PROTO_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5257IEM_CIMPL_PROTO_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5258IEM_CIMPL_PROTO_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg);
5259IEM_CIMPL_PROTO_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg);
5260IEM_CIMPL_PROTO_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst);
5261IEM_CIMPL_PROTO_0(iemCImpl_clts);
5262IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg);
5263IEM_CIMPL_PROTO_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg);
5264IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg);
5265IEM_CIMPL_PROTO_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg);
5266IEM_CIMPL_PROTO_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage);
5267IEM_CIMPL_PROTO_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType);
5268IEM_CIMPL_PROTO_0(iemCImpl_invd);
5269IEM_CIMPL_PROTO_0(iemCImpl_wbinvd);
5270IEM_CIMPL_PROTO_0(iemCImpl_rsm);
5271IEM_CIMPL_PROTO_0(iemCImpl_rdtsc);
5272IEM_CIMPL_PROTO_0(iemCImpl_rdtscp);
5273IEM_CIMPL_PROTO_0(iemCImpl_rdpmc);
5274IEM_CIMPL_PROTO_0(iemCImpl_rdmsr);
5275IEM_CIMPL_PROTO_0(iemCImpl_wrmsr);
5276IEM_CIMPL_PROTO_3(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5277IEM_CIMPL_PROTO_2(iemCImpl_in_eAX_DX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5278IEM_CIMPL_PROTO_3(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5279IEM_CIMPL_PROTO_2(iemCImpl_out_DX_eAX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5280IEM_CIMPL_PROTO_0(iemCImpl_cli);
5281IEM_CIMPL_PROTO_0(iemCImpl_sti);
5282IEM_CIMPL_PROTO_0(iemCImpl_hlt);
5283IEM_CIMPL_PROTO_1(iemCImpl_monitor, uint8_t, iEffSeg);
5284IEM_CIMPL_PROTO_0(iemCImpl_mwait);
5285IEM_CIMPL_PROTO_0(iemCImpl_swapgs);
5286IEM_CIMPL_PROTO_0(iemCImpl_cpuid);
5287IEM_CIMPL_PROTO_1(iemCImpl_aad, uint8_t, bImm);
5288IEM_CIMPL_PROTO_1(iemCImpl_aam, uint8_t, bImm);
5289IEM_CIMPL_PROTO_0(iemCImpl_daa);
5290IEM_CIMPL_PROTO_0(iemCImpl_das);
5291IEM_CIMPL_PROTO_0(iemCImpl_aaa);
5292IEM_CIMPL_PROTO_0(iemCImpl_aas);
5293IEM_CIMPL_PROTO_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound);
5294IEM_CIMPL_PROTO_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound);
5295IEM_CIMPL_PROTO_0(iemCImpl_xgetbv);
5296IEM_CIMPL_PROTO_0(iemCImpl_xsetbv);
5297IEM_CIMPL_PROTO_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
5298 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags);
5299IEM_CIMPL_PROTO_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5300IEM_CIMPL_PROTO_1(iemCImpl_finit, bool, fCheckXcpts);
5301IEM_CIMPL_PROTO_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5302IEM_CIMPL_PROTO_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5303IEM_CIMPL_PROTO_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5304IEM_CIMPL_PROTO_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5305IEM_CIMPL_PROTO_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5306IEM_CIMPL_PROTO_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5307IEM_CIMPL_PROTO_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5308IEM_CIMPL_PROTO_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5309IEM_CIMPL_PROTO_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5310IEM_CIMPL_PROTO_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5311IEM_CIMPL_PROTO_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5312IEM_CIMPL_PROTO_1(iemCImpl_fldcw, uint16_t, u16Fcw);
5313IEM_CIMPL_PROTO_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode);
5314IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode);
5315IEM_CIMPL_PROTO_2(iemCImpl_rdseed, uint8_t, iReg, IEMMODE, enmEffOpSize);
5316IEM_CIMPL_PROTO_2(iemCImpl_rdrand, uint8_t, iReg, IEMMODE, enmEffOpSize);
5317/** @} */
5318
5319/** @name IEMAllCImplStrInstr.cpp.h
5320 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/' -e 's/RT_CONCAT4(//' \
5321 * -e 's/,ADDR_SIZE)/64/g' -e 's/,OP_SIZE,/64/g' -e 's/,OP_rAX,/rax/g' IEMAllCImplStrInstr.cpp.h
5322 * @{ */
5323IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr16, uint8_t, iEffSeg);
5324IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr16, uint8_t, iEffSeg);
5325IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m16);
5326IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m16);
5327IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr16, uint8_t, iEffSeg);
5328IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m16);
5329IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m16, int8_t, iEffSeg);
5330IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr16, bool, fIoChecked);
5331IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr16, bool, fIoChecked);
5332IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5333IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5334
5335IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr16, uint8_t, iEffSeg);
5336IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr16, uint8_t, iEffSeg);
5337IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m16);
5338IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m16);
5339IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr16, uint8_t, iEffSeg);
5340IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m16);
5341IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m16, int8_t, iEffSeg);
5342IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr16, bool, fIoChecked);
5343IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr16, bool, fIoChecked);
5344IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5345IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5346
5347IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr16, uint8_t, iEffSeg);
5348IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr16, uint8_t, iEffSeg);
5349IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m16);
5350IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m16);
5351IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr16, uint8_t, iEffSeg);
5352IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m16);
5353IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m16, int8_t, iEffSeg);
5354IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr16, bool, fIoChecked);
5355IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr16, bool, fIoChecked);
5356IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5357IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5358
5359
5360IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr32, uint8_t, iEffSeg);
5361IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr32, uint8_t, iEffSeg);
5362IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m32);
5363IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m32);
5364IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr32, uint8_t, iEffSeg);
5365IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m32);
5366IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m32, int8_t, iEffSeg);
5367IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr32, bool, fIoChecked);
5368IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr32, bool, fIoChecked);
5369IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5370IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5371
5372IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr32, uint8_t, iEffSeg);
5373IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr32, uint8_t, iEffSeg);
5374IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m32);
5375IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m32);
5376IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr32, uint8_t, iEffSeg);
5377IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m32);
5378IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m32, int8_t, iEffSeg);
5379IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr32, bool, fIoChecked);
5380IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr32, bool, fIoChecked);
5381IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5382IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5383
5384IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr32, uint8_t, iEffSeg);
5385IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr32, uint8_t, iEffSeg);
5386IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m32);
5387IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m32);
5388IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr32, uint8_t, iEffSeg);
5389IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m32);
5390IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m32, int8_t, iEffSeg);
5391IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr32, bool, fIoChecked);
5392IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr32, bool, fIoChecked);
5393IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5394IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5395
5396IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr32, uint8_t, iEffSeg);
5397IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr32, uint8_t, iEffSeg);
5398IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m32);
5399IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m32);
5400IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr32, uint8_t, iEffSeg);
5401IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m32);
5402IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m32, int8_t, iEffSeg);
5403IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr32, bool, fIoChecked);
5404IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr32, bool, fIoChecked);
5405IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5406IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5407
5408
5409IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr64, uint8_t, iEffSeg);
5410IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr64, uint8_t, iEffSeg);
5411IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m64);
5412IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m64);
5413IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr64, uint8_t, iEffSeg);
5414IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m64);
5415IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m64, int8_t, iEffSeg);
5416IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr64, bool, fIoChecked);
5417IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr64, bool, fIoChecked);
5418IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5419IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5420
5421IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr64, uint8_t, iEffSeg);
5422IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr64, uint8_t, iEffSeg);
5423IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m64);
5424IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m64);
5425IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr64, uint8_t, iEffSeg);
5426IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m64);
5427IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m64, int8_t, iEffSeg);
5428IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr64, bool, fIoChecked);
5429IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr64, bool, fIoChecked);
5430IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5431IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5432
5433IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr64, uint8_t, iEffSeg);
5434IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr64, uint8_t, iEffSeg);
5435IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m64);
5436IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m64);
5437IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr64, uint8_t, iEffSeg);
5438IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m64);
5439IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m64, int8_t, iEffSeg);
5440IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr64, bool, fIoChecked);
5441IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr64, bool, fIoChecked);
5442IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5443IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5444
5445IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr64, uint8_t, iEffSeg);
5446IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr64, uint8_t, iEffSeg);
5447IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m64);
5448IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m64);
5449IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr64, uint8_t, iEffSeg);
5450IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m64);
5451IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m64, int8_t, iEffSeg);
5452IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr64, bool, fIoChecked);
5453IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr64, bool, fIoChecked);
5454IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5455IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5456/** @} */
5457
5458#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5459VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual) RT_NOEXCEPT;
5460VBOXSTRICTRC iemVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr) RT_NOEXCEPT;
5461VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr) RT_NOEXCEPT;
5462VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr) RT_NOEXCEPT;
5463VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr) RT_NOEXCEPT;
5464VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
5465VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT;
5466VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu) RT_NOEXCEPT;
5467VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPUCC pVCpu, bool fMonitorHwArmed, uint8_t cbInstr) RT_NOEXCEPT;
5468VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port,
5469 bool fImm, uint8_t cbAccess, uint8_t cbInstr) RT_NOEXCEPT;
5470VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess,
5471 bool fRep, VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr) RT_NOEXCEPT;
5472VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5473VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5474VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5475VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPUCC pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5476VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5477VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPUCC pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5478VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT;
5479VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPUCC pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw,
5480 RTGCPTR GCPtrEffDst, uint8_t cbInstr) RT_NOEXCEPT;
5481VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr) RT_NOEXCEPT;
5482VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPUCC pVCpu) RT_NOEXCEPT;
5483VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess, size_t cbAccess, uint32_t fAccess) RT_NOEXCEPT;
5484uint32_t iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg) RT_NOEXCEPT;
5485void iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg) RT_NOEXCEPT;
5486VBOXSTRICTRC iemVmxInvvpid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
5487 uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT;
5488bool iemVmxIsRdmsrWrmsrInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr) RT_NOEXCEPT;
5489IEM_CIMPL_PROTO_0(iemCImpl_vmxoff);
5490IEM_CIMPL_PROTO_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon);
5491IEM_CIMPL_PROTO_0(iemCImpl_vmlaunch);
5492IEM_CIMPL_PROTO_0(iemCImpl_vmresume);
5493IEM_CIMPL_PROTO_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5494IEM_CIMPL_PROTO_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5495IEM_CIMPL_PROTO_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5496IEM_CIMPL_PROTO_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64VmcsField);
5497IEM_CIMPL_PROTO_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64VmcsField);
5498IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg64, uint64_t *, pu64Dst, uint64_t, u64VmcsField);
5499IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg32, uint64_t *, pu32Dst, uint32_t, u32VmcsField);
5500IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64VmcsField);
5501IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32VmcsField);
5502IEM_CIMPL_PROTO_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType);
5503IEM_CIMPL_PROTO_3(iemCImpl_invept, uint8_t, iEffSeg, RTGCPTR, GCPtrInveptDesc, uint64_t, uInveptType);
5504IEM_CIMPL_PROTO_0(iemCImpl_vmx_pause);
5505#endif
5506
5507#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5508VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) RT_NOEXCEPT;
5509VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2) RT_NOEXCEPT;
5510VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPUCC pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
5511 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) RT_NOEXCEPT;
5512VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPUCC pVCpu, uint32_t idMsr, bool fWrite, uint8_t cbInstr) RT_NOEXCEPT;
5513IEM_CIMPL_PROTO_0(iemCImpl_vmrun);
5514IEM_CIMPL_PROTO_0(iemCImpl_vmload);
5515IEM_CIMPL_PROTO_0(iemCImpl_vmsave);
5516IEM_CIMPL_PROTO_0(iemCImpl_clgi);
5517IEM_CIMPL_PROTO_0(iemCImpl_stgi);
5518IEM_CIMPL_PROTO_0(iemCImpl_invlpga);
5519IEM_CIMPL_PROTO_0(iemCImpl_skinit);
5520IEM_CIMPL_PROTO_0(iemCImpl_svm_pause);
5521#endif
5522
5523IEM_CIMPL_PROTO_0(iemCImpl_vmcall); /* vmx */
5524IEM_CIMPL_PROTO_0(iemCImpl_vmmcall); /* svm */
5525IEM_CIMPL_PROTO_1(iemCImpl_Hypercall, uint16_t, uDisOpcode); /* both */
5526
5527extern const PFNIEMOP g_apfnIemInterpretOnlyOneByteMap[256];
5528extern const PFNIEMOP g_apfnIemInterpretOnlyTwoByteMap[1024];
5529extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f3a[1024];
5530extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f38[1024];
5531extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap1[1024];
5532extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap2[1024];
5533extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap3[1024];
5534
5535/*
5536 * Recompiler related stuff.
5537 */
5538extern const PFNIEMOP g_apfnIemThreadedRecompilerOneByteMap[256];
5539extern const PFNIEMOP g_apfnIemThreadedRecompilerTwoByteMap[1024];
5540extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f3a[1024];
5541extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f38[1024];
5542extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap1[1024];
5543extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap2[1024];
5544extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap3[1024];
5545
5546DECLCALLBACK(int) iemTbInit(PVMCC pVM, uint32_t cInitialTbs, uint32_t cMaxTbs,
5547 uint64_t cbInitialExec, uint64_t cbMaxExec, uint32_t cbChunkExec);
5548void iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb, bool fSafeToFree);
5549void iemTbAllocatorProcessDelayedFrees(PVMCPU pVCpu, PIEMTBALLOCATOR pTbAllocator);
5550
5551
5552/** @todo FNIEMTHREADEDFUNC and friends may need more work... */
5553#if defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
5554typedef VBOXSTRICTRC /*__attribute__((__nothrow__))*/ FNIEMTHREADEDFUNC(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
5555typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
5556# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
5557 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
5558# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
5559 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
5560
5561#else
5562typedef VBOXSTRICTRC (FNIEMTHREADEDFUNC)(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
5563typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
5564# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
5565 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
5566# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
5567 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
5568#endif
5569
5570
5571IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_DeferToCImpl0);
5572
5573IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckIrq);
5574IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckMode);
5575IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckHwInstrBps);
5576IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLim);
5577
5578IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes);
5579IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodes);
5580IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim);
5581
5582/* Branching: */
5583IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes);
5584IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodes);
5585IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim);
5586
5587IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb);
5588IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb);
5589IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim);
5590
5591/* Natural page crossing: */
5592IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb);
5593IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb);
5594IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim);
5595
5596IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb);
5597IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb);
5598IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim);
5599
5600IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb);
5601IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb);
5602IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim);
5603
5604bool iemThreadedCompileEmitIrqCheckBefore(PVMCPUCC pVCpu, PIEMTB pTb);
5605bool iemThreadedCompileBeginEmitCallsComplications(PVMCPUCC pVCpu, PIEMTB pTb);
5606
5607/* Native recompiler public bits: */
5608DECLHIDDEN(PIEMTB) iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT;
5609int iemExecMemAllocatorInit(PVMCPU pVCpu, uint64_t cbMax, uint64_t cbInitial, uint32_t cbChunk);
5610void iemExecMemAllocatorFree(PVMCPU pVCpu, void *pv, size_t cb);
5611
5612
5613/** @} */
5614
5615RT_C_DECLS_END
5616
5617#endif /* !VMM_INCLUDED_SRC_include_IEMInternal_h */
5618
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette