VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInternal.h@ 101682

Last change on this file since 101682 was 101682, checked in by vboxsync, 17 months ago

VMM/IEM,VBox/err.h: Refactored the native recompiler code to throw/longjmp on errors rather than returning UINT32_MAX/UINT8_MAX. This should make it easier to pinpoint why recompilation fails (we've got an RC) and get rid of hundreds of AssertReturn statements that clutters up the code and introduces lots of unnecessary branches. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 282.9 KB
Line 
1/* $Id: IEMInternal.h 101682 2023-10-31 12:18:44Z vboxsync $ */
2/** @file
3 * IEM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInternal_h
29#define VMM_INCLUDED_SRC_include_IEMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/vmm/cpum.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/stam.h>
38#include <VBox/param.h>
39
40#include <iprt/setjmp-without-sigmask.h>
41#include <iprt/list.h>
42
43
44RT_C_DECLS_BEGIN
45
46
47/** @defgroup grp_iem_int Internals
48 * @ingroup grp_iem
49 * @internal
50 * @{
51 */
52
53/** For expanding symbol in slickedit and other products tagging and
54 * crossreferencing IEM symbols. */
55#ifndef IEM_STATIC
56# define IEM_STATIC static
57#endif
58
59/** @def IEM_WITH_SETJMP
60 * Enables alternative status code handling using setjmps.
61 *
62 * This adds a bit of expense via the setjmp() call since it saves all the
63 * non-volatile registers. However, it eliminates return code checks and allows
64 * for more optimal return value passing (return regs instead of stack buffer).
65 */
66#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
67# define IEM_WITH_SETJMP
68#endif
69
70/** @def IEM_WITH_THROW_CATCH
71 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
72 * mode code when IEM_WITH_SETJMP is in effect.
73 *
74 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
75 * setjmp/long resulted in bs2-test-1 running 3.00% faster and all but on test
76 * result value improving by more than 1%. (Best out of three.)
77 *
78 * With Visual C++ 2019 and code TLB on windows, using throw/catch instead of
79 * setjmp/long resulted in bs2-test-1 running 3.68% faster and all but some of
80 * the MMIO and CPUID tests ran noticeably faster. Variation is greater than on
81 * Linux, but it should be quite a bit faster for normal code.
82 */
83#if (defined(__cplusplus) && defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
84 || defined(DOXYGEN_RUNNING)
85# define IEM_WITH_THROW_CATCH
86#endif
87
88/** @def IEM_DO_LONGJMP
89 *
90 * Wrapper around longjmp / throw.
91 *
92 * @param a_pVCpu The CPU handle.
93 * @param a_rc The status code jump back with / throw.
94 */
95#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
96# ifdef IEM_WITH_THROW_CATCH
97# define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
98# else
99# define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
100# endif
101#endif
102
103/** For use with IEM function that may do a longjmp (when enabled).
104 *
105 * Visual C++ has trouble longjmp'ing from/over functions with the noexcept
106 * attribute. So, we indicate that function that may be part of a longjmp may
107 * throw "exceptions" and that the compiler should definitely not generate and
108 * std::terminate calling unwind code.
109 *
110 * Here is one example of this ending in std::terminate:
111 * @code{.txt}
11200 00000041`cadfda10 00007ffc`5d5a1f9f ucrtbase!abort+0x4e
11301 00000041`cadfda40 00007ffc`57af229a ucrtbase!terminate+0x1f
11402 00000041`cadfda70 00007ffb`eec91030 VCRUNTIME140!__std_terminate+0xa [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\ehhelpers.cpp @ 192]
11503 00000041`cadfdaa0 00007ffb`eec92c6d VCRUNTIME140_1!_CallSettingFrame+0x20 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\handlers.asm @ 50]
11604 00000041`cadfdad0 00007ffb`eec93ae5 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToState+0x241 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 1085]
11705 00000041`cadfdc00 00007ffb`eec92258 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToEmptyState+0x2d [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 218]
11806 00000041`cadfdc30 00007ffb`eec940e9 VCRUNTIME140_1!__InternalCxxFrameHandler<__FrameHandler4>+0x194 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 304]
11907 00000041`cadfdcd0 00007ffc`5f9f249f VCRUNTIME140_1!__CxxFrameHandler4+0xa9 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 290]
12008 00000041`cadfdd40 00007ffc`5f980939 ntdll!RtlpExecuteHandlerForUnwind+0xf
12109 00000041`cadfdd70 00007ffc`5f9a0edd ntdll!RtlUnwindEx+0x339
1220a 00000041`cadfe490 00007ffc`57aff976 ntdll!RtlUnwind+0xcd
1230b 00000041`cadfea00 00007ffb`e1b5de01 VCRUNTIME140!__longjmp_internal+0xe6 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\longjmp.asm @ 140]
1240c (Inline Function) --------`-------- VBoxVMM!iemOpcodeGetNextU8SlowJmp+0x95 [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 1155]
1250d 00000041`cadfea50 00007ffb`e1b60f6b VBoxVMM!iemOpcodeGetNextU8Jmp+0xc1 [L:\vbox-intern\src\VBox\VMM\include\IEMInline.h @ 402]
1260e 00000041`cadfea90 00007ffb`e1cc6201 VBoxVMM!IEMExecForExits+0xdb [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 10185]
1270f 00000041`cadfec70 00007ffb`e1d0df8d VBoxVMM!EMHistoryExec+0x4f1 [L:\vbox-intern\src\VBox\VMM\VMMAll\EMAll.cpp @ 452]
12810 00000041`cadfed60 00007ffb`e1d0d4c0 VBoxVMM!nemR3WinHandleExitCpuId+0x79d [L:\vbox-intern\src\VBox\VMM\VMMAll\NEMAllNativeTemplate-win.cpp.h @ 1829] @encode
129 @endcode
130 *
131 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
132 */
133#if defined(IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))
134# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false)
135#else
136# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT
137#endif
138
139#define IEM_IMPLEMENTS_TASKSWITCH
140
141/** @def IEM_WITH_3DNOW
142 * Includes the 3DNow decoding. */
143#if (!defined(IEM_WITH_3DNOW) && !defined(IEM_WITHOUT_3DNOW)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
144# define IEM_WITH_3DNOW
145#endif
146
147/** @def IEM_WITH_THREE_0F_38
148 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
149#if (!defined(IEM_WITH_THREE_0F_38) && !defined(IEM_WITHOUT_THREE_0F_38)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
150# define IEM_WITH_THREE_0F_38
151#endif
152
153/** @def IEM_WITH_THREE_0F_3A
154 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
155#if (!defined(IEM_WITH_THREE_0F_3A) && !defined(IEM_WITHOUT_THREE_0F_3A)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
156# define IEM_WITH_THREE_0F_3A
157#endif
158
159/** @def IEM_WITH_VEX
160 * Includes the VEX decoding. */
161#if (!defined(IEM_WITH_VEX) && !defined(IEM_WITHOUT_VEX)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
162# define IEM_WITH_VEX
163#endif
164
165/** @def IEM_CFG_TARGET_CPU
166 * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
167 *
168 * By default we allow this to be configured by the user via the
169 * CPUM/GuestCpuName config string, but this comes at a slight cost during
170 * decoding. So, for applications of this code where there is no need to
171 * be dynamic wrt target CPU, just modify this define.
172 */
173#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
174# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
175#endif
176
177//#define IEM_WITH_CODE_TLB // - work in progress
178//#define IEM_WITH_DATA_TLB // - work in progress
179
180
181/** @def IEM_USE_UNALIGNED_DATA_ACCESS
182 * Use unaligned accesses instead of elaborate byte assembly. */
183#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
184# define IEM_USE_UNALIGNED_DATA_ACCESS
185#endif
186
187//#define IEM_LOG_MEMORY_WRITES
188
189#if !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
190/** Instruction statistics. */
191typedef struct IEMINSTRSTATS
192{
193# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
194# include "IEMInstructionStatisticsTmpl.h"
195# undef IEM_DO_INSTR_STAT
196} IEMINSTRSTATS;
197#else
198struct IEMINSTRSTATS;
199typedef struct IEMINSTRSTATS IEMINSTRSTATS;
200#endif
201/** Pointer to IEM instruction statistics. */
202typedef IEMINSTRSTATS *PIEMINSTRSTATS;
203
204
205/** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour
206 * @{ */
207#define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 0 /**< Native x86 EFLAGS result; Intel EFLAGS when on non-x86 hosts. */
208#define IEMTARGETCPU_EFL_BEHAVIOR_INTEL 1 /**< Intel EFLAGS result. */
209#define IEMTARGETCPU_EFL_BEHAVIOR_AMD 2 /**< AMD EFLAGS result */
210#define IEMTARGETCPU_EFL_BEHAVIOR_RESERVED 3 /**< Reserved/dummy entry slot that's the same as 0. */
211#define IEMTARGETCPU_EFL_BEHAVIOR_MASK 3 /**< For masking the index before use. */
212/** Selects the right variant from a_aArray.
213 * pVCpu is implicit in the caller context. */
214#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \
215 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
216/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can
217 * be used because the host CPU does not support the operation. */
218#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \
219 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
220/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional
221 * array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index
222 * into the two.
223 * @sa IEM_SELECT_NATIVE_OR_FALLBACK */
224#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
225# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
226 (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
227#else
228# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
229 (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
230#endif
231/** @} */
232
233/**
234 * Picks @a a_pfnNative or @a a_pfnFallback according to the host CPU feature
235 * indicator given by @a a_fCpumFeatureMember (CPUMFEATURES member).
236 *
237 * On non-x86 hosts, this will shortcut to the fallback w/o checking the
238 * indicator.
239 *
240 * @sa IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX
241 */
242#if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
243# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) \
244 (g_CpumHostFeatures.s.a_fCpumFeatureMember ? a_pfnNative : a_pfnFallback)
245#else
246# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) (a_pfnFallback)
247#endif
248
249
250/**
251 * Extended operand mode that includes a representation of 8-bit.
252 *
253 * This is used for packing down modes when invoking some C instruction
254 * implementations.
255 */
256typedef enum IEMMODEX
257{
258 IEMMODEX_16BIT = IEMMODE_16BIT,
259 IEMMODEX_32BIT = IEMMODE_32BIT,
260 IEMMODEX_64BIT = IEMMODE_64BIT,
261 IEMMODEX_8BIT
262} IEMMODEX;
263AssertCompileSize(IEMMODEX, 4);
264
265
266/**
267 * Branch types.
268 */
269typedef enum IEMBRANCH
270{
271 IEMBRANCH_JUMP = 1,
272 IEMBRANCH_CALL,
273 IEMBRANCH_TRAP,
274 IEMBRANCH_SOFTWARE_INT,
275 IEMBRANCH_HARDWARE_INT
276} IEMBRANCH;
277AssertCompileSize(IEMBRANCH, 4);
278
279
280/**
281 * INT instruction types.
282 */
283typedef enum IEMINT
284{
285 /** INT n instruction (opcode 0xcd imm). */
286 IEMINT_INTN = 0,
287 /** Single byte INT3 instruction (opcode 0xcc). */
288 IEMINT_INT3 = IEM_XCPT_FLAGS_BP_INSTR,
289 /** Single byte INTO instruction (opcode 0xce). */
290 IEMINT_INTO = IEM_XCPT_FLAGS_OF_INSTR,
291 /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
292 IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
293} IEMINT;
294AssertCompileSize(IEMINT, 4);
295
296
297/**
298 * A FPU result.
299 */
300typedef struct IEMFPURESULT
301{
302 /** The output value. */
303 RTFLOAT80U r80Result;
304 /** The output status. */
305 uint16_t FSW;
306} IEMFPURESULT;
307AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
308/** Pointer to a FPU result. */
309typedef IEMFPURESULT *PIEMFPURESULT;
310/** Pointer to a const FPU result. */
311typedef IEMFPURESULT const *PCIEMFPURESULT;
312
313
314/**
315 * A FPU result consisting of two output values and FSW.
316 */
317typedef struct IEMFPURESULTTWO
318{
319 /** The first output value. */
320 RTFLOAT80U r80Result1;
321 /** The output status. */
322 uint16_t FSW;
323 /** The second output value. */
324 RTFLOAT80U r80Result2;
325} IEMFPURESULTTWO;
326AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
327AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
328/** Pointer to a FPU result consisting of two output values and FSW. */
329typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
330/** Pointer to a const FPU result consisting of two output values and FSW. */
331typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
332
333
334/**
335 * IEM TLB entry.
336 *
337 * Lookup assembly:
338 * @code{.asm}
339 ; Calculate tag.
340 mov rax, [VA]
341 shl rax, 16
342 shr rax, 16 + X86_PAGE_SHIFT
343 or rax, [uTlbRevision]
344
345 ; Do indexing.
346 movzx ecx, al
347 lea rcx, [pTlbEntries + rcx]
348
349 ; Check tag.
350 cmp [rcx + IEMTLBENTRY.uTag], rax
351 jne .TlbMiss
352
353 ; Check access.
354 mov rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
355 and rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
356 cmp rax, [uTlbPhysRev]
357 jne .TlbMiss
358
359 ; Calc address and we're done.
360 mov eax, X86_PAGE_OFFSET_MASK
361 and eax, [VA]
362 or rax, [rcx + IEMTLBENTRY.pMappingR3]
363 %ifdef VBOX_WITH_STATISTICS
364 inc qword [cTlbHits]
365 %endif
366 jmp .Done
367
368 .TlbMiss:
369 mov r8d, ACCESS_FLAGS
370 mov rdx, [VA]
371 mov rcx, [pVCpu]
372 call iemTlbTypeMiss
373 .Done:
374
375 @endcode
376 *
377 */
378typedef struct IEMTLBENTRY
379{
380 /** The TLB entry tag.
381 * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
382 * is ASSUMING a virtual address width of 48 bits.
383 *
384 * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
385 *
386 * The TLB lookup code uses the current TLB revision, which won't ever be zero,
387 * enabling an extremely cheap TLB invalidation most of the time. When the TLB
388 * revision wraps around though, the tags needs to be zeroed.
389 *
390 * @note Try use SHRD instruction? After seeing
391 * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
392 *
393 * @todo This will need to be reorganized for 57-bit wide virtual address and
394 * PCID (currently 12 bits) and ASID (currently 6 bits) support. We'll
395 * have to move the TLB entry versioning entirely to the
396 * fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
397 * 19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
398 * consumed by PCID and ASID (12 + 6 = 18).
399 */
400 uint64_t uTag;
401 /** Access flags and physical TLB revision.
402 *
403 * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
404 * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
405 * - Bit 2 - page tables - not user (complemented X86_PTE_US).
406 * - Bit 3 - pgm phys/virt - not directly writable.
407 * - Bit 4 - pgm phys page - not directly readable.
408 * - Bit 5 - page tables - not accessed (complemented X86_PTE_A).
409 * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
410 * - Bit 7 - tlb entry - pMappingR3 member not valid.
411 * - Bits 63 thru 8 are used for the physical TLB revision number.
412 *
413 * We're using complemented bit meanings here because it makes it easy to check
414 * whether special action is required. For instance a user mode write access
415 * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
416 * non-zero result would mean special handling needed because either it wasn't
417 * writable, or it wasn't user, or the page wasn't dirty. A user mode read
418 * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
419 * need to check any PTE flag.
420 */
421 uint64_t fFlagsAndPhysRev;
422 /** The guest physical page address. */
423 uint64_t GCPhys;
424 /** Pointer to the ring-3 mapping. */
425 R3PTRTYPE(uint8_t *) pbMappingR3;
426#if HC_ARCH_BITS == 32
427 uint32_t u32Padding1;
428#endif
429} IEMTLBENTRY;
430AssertCompileSize(IEMTLBENTRY, 32);
431/** Pointer to an IEM TLB entry. */
432typedef IEMTLBENTRY *PIEMTLBENTRY;
433
434/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
435 * @{ */
436#define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
437#define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
438#define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
439#define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
440#define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
441#define IEMTLBE_F_PT_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */
442#define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
443#define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(7) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
444#define IEMTLBE_F_PG_UNASSIGNED RT_BIT_64(8) /**< Phys page: Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
445#define IEMTLBE_F_PG_CODE_PAGE RT_BIT_64(9) /**< Phys page: Code page. */
446#define IEMTLBE_F_PHYS_REV UINT64_C(0xfffffffffffffc00) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
447/** @} */
448
449
450/**
451 * An IEM TLB.
452 *
453 * We've got two of these, one for data and one for instructions.
454 */
455typedef struct IEMTLB
456{
457 /** The TLB entries.
458 * We've choosen 256 because that way we can obtain the result directly from a
459 * 8-bit register without an additional AND instruction. */
460 IEMTLBENTRY aEntries[256];
461 /** The TLB revision.
462 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
463 * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
464 * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
465 * (The revision zero indicates an invalid TLB entry.)
466 *
467 * The initial value is choosen to cause an early wraparound. */
468 uint64_t uTlbRevision;
469 /** The TLB physical address revision - shadow of PGM variable.
470 *
471 * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
472 * incremented by adding RT_BIT_64(8). When it wraps around and becomes zero,
473 * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
474 * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
475 *
476 * The initial value is choosen to cause an early wraparound. */
477 uint64_t volatile uTlbPhysRev;
478
479 /* Statistics: */
480
481 /** TLB hits (VBOX_WITH_STATISTICS only). */
482 uint64_t cTlbHits;
483 /** TLB misses. */
484 uint32_t cTlbMisses;
485 /** Slow read path. */
486 uint32_t cTlbSlowReadPath;
487 /** Safe read path. */
488 uint32_t cTlbSafeReadPath;
489 /** Safe write path. */
490 uint32_t cTlbSafeWritePath;
491#if 0
492 /** TLB misses because of tag mismatch. */
493 uint32_t cTlbMissesTag;
494 /** TLB misses because of virtual access violation. */
495 uint32_t cTlbMissesVirtAccess;
496 /** TLB misses because of dirty bit. */
497 uint32_t cTlbMissesDirty;
498 /** TLB misses because of MMIO */
499 uint32_t cTlbMissesMmio;
500 /** TLB misses because of write access handlers. */
501 uint32_t cTlbMissesWriteHandler;
502 /** TLB misses because no r3(/r0) mapping. */
503 uint32_t cTlbMissesMapping;
504#endif
505 /** Alignment padding. */
506 uint32_t au32Padding[6];
507} IEMTLB;
508AssertCompileSizeAlignment(IEMTLB, 64);
509/** IEMTLB::uTlbRevision increment. */
510#define IEMTLB_REVISION_INCR RT_BIT_64(36)
511/** IEMTLB::uTlbRevision mask. */
512#define IEMTLB_REVISION_MASK (~(RT_BIT_64(36) - 1))
513/** IEMTLB::uTlbPhysRev increment.
514 * @sa IEMTLBE_F_PHYS_REV */
515#define IEMTLB_PHYS_REV_INCR RT_BIT_64(10)
516/**
517 * Calculates the TLB tag for a virtual address.
518 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
519 * @param a_pTlb The TLB.
520 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
521 * the clearing of the top 16 bits won't work (if 32-bit
522 * we'll end up with mostly zeros).
523 */
524#define IEMTLB_CALC_TAG(a_pTlb, a_GCPtr) ( IEMTLB_CALC_TAG_NO_REV(a_GCPtr) | (a_pTlb)->uTlbRevision )
525/**
526 * Calculates the TLB tag for a virtual address but without TLB revision.
527 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
528 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
529 * the clearing of the top 16 bits won't work (if 32-bit
530 * we'll end up with mostly zeros).
531 */
532#define IEMTLB_CALC_TAG_NO_REV(a_GCPtr) ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
533/**
534 * Converts a TLB tag value into a TLB index.
535 * @returns Index into IEMTLB::aEntries.
536 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
537 */
538#define IEMTLB_TAG_TO_INDEX(a_uTag) ( (uint8_t)(a_uTag) )
539/**
540 * Converts a TLB tag value into a TLB index.
541 * @returns Index into IEMTLB::aEntries.
542 * @param a_pTlb The TLB.
543 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
544 */
545#define IEMTLB_TAG_TO_ENTRY(a_pTlb, a_uTag) ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_INDEX(a_uTag)] )
546
547
548/** @name IEM_MC_F_XXX - MC block flags/clues.
549 * @todo Merge with IEM_CIMPL_F_XXX
550 * @{ */
551#define IEM_MC_F_ONLY_8086 RT_BIT_32(0)
552#define IEM_MC_F_MIN_186 RT_BIT_32(1)
553#define IEM_MC_F_MIN_286 RT_BIT_32(2)
554#define IEM_MC_F_NOT_286_OR_OLDER IEM_MC_F_MIN_386
555#define IEM_MC_F_MIN_386 RT_BIT_32(3)
556#define IEM_MC_F_MIN_486 RT_BIT_32(4)
557#define IEM_MC_F_MIN_PENTIUM RT_BIT_32(5)
558#define IEM_MC_F_MIN_PENTIUM_II IEM_MC_F_MIN_PENTIUM
559#define IEM_MC_F_MIN_CORE IEM_MC_F_MIN_PENTIUM
560#define IEM_MC_F_64BIT RT_BIT_32(6)
561#define IEM_MC_F_NOT_64BIT RT_BIT_32(7)
562/** @} */
563
564/** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls.
565 *
566 * These clues are mainly for the recompiler, so that it can emit correct code.
567 *
568 * They are processed by the python script and which also automatically
569 * calculates flags for MC blocks based on the statements, extending the use of
570 * these flags to describe MC block behavior to the recompiler core. The python
571 * script pass the flags to the IEM_MC2_END_EMIT_CALLS macro, but mainly for
572 * error checking purposes. The script emits the necessary fEndTb = true and
573 * similar statements as this reduces compile time a tiny bit.
574 *
575 * @{ */
576/** Flag set if direct branch, clear if absolute or indirect. */
577#define IEM_CIMPL_F_BRANCH_DIRECT RT_BIT_32(0)
578/** Flag set if indirect branch, clear if direct or relative.
579 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
580 * as well as for return instructions (RET, IRET, RETF). */
581#define IEM_CIMPL_F_BRANCH_INDIRECT RT_BIT_32(1)
582/** Flag set if relative branch, clear if absolute or indirect. */
583#define IEM_CIMPL_F_BRANCH_RELATIVE RT_BIT_32(2)
584/** Flag set if conditional branch, clear if unconditional. */
585#define IEM_CIMPL_F_BRANCH_CONDITIONAL RT_BIT_32(3)
586/** Flag set if it's a far branch (changes CS). */
587#define IEM_CIMPL_F_BRANCH_FAR RT_BIT_32(4)
588/** Convenience: Testing any kind of branch. */
589#define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
590
591/** Execution flags may change (IEMCPU::fExec). */
592#define IEM_CIMPL_F_MODE RT_BIT_32(5)
593/** May change significant portions of RFLAGS. */
594#define IEM_CIMPL_F_RFLAGS RT_BIT_32(6)
595/** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS. */
596#define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(7)
597/** May trigger interrupt shadowing. */
598#define IEM_CIMPL_F_INHIBIT_SHADOW RT_BIT_32(8)
599/** May enable interrupts, so recheck IRQ immediately afterwards executing
600 * the instruction. */
601#define IEM_CIMPL_F_CHECK_IRQ_AFTER RT_BIT_32(9)
602/** May disable interrupts, so recheck IRQ immediately before executing the
603 * instruction. */
604#define IEM_CIMPL_F_CHECK_IRQ_BEFORE RT_BIT_32(10)
605/** Convenience: Check for IRQ both before and after an instruction. */
606#define IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER (IEM_CIMPL_F_CHECK_IRQ_BEFORE | IEM_CIMPL_F_CHECK_IRQ_AFTER)
607/** May trigger a VM exit (treated like IEM_CIMPL_F_MODE atm). */
608#define IEM_CIMPL_F_VMEXIT RT_BIT_32(11)
609/** May modify FPU state.
610 * @todo Not sure if this is useful yet. */
611#define IEM_CIMPL_F_FPU RT_BIT_32(12)
612/** REP prefixed instruction which may yield before updating PC.
613 * @todo Not sure if this is useful, REP functions now return non-zero
614 * status if they don't update the PC. */
615#define IEM_CIMPL_F_REP RT_BIT_32(13)
616/** I/O instruction.
617 * @todo Not sure if this is useful yet. */
618#define IEM_CIMPL_F_IO RT_BIT_32(14)
619/** Force end of TB after the instruction. */
620#define IEM_CIMPL_F_END_TB RT_BIT_32(15)
621/** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
622#define IEM_CIMPL_F_XCPT \
623 (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
624/** @} */
625
626
627/** @name IEM_F_XXX - Execution mode flags (IEMCPU::fExec, IEMTB::fFlags).
628 *
629 * These flags are set when entering IEM and adjusted as code is executed, such
630 * that they will always contain the current values as instructions are
631 * finished.
632 *
633 * In recompiled execution mode, (most of) these flags are included in the
634 * translation block selection key and stored in IEMTB::fFlags alongside the
635 * IEMTB_F_XXX flags. The latter flags uses bits 31 thru 24, which are all zero
636 * in IEMCPU::fExec.
637 *
638 * @{ */
639/** Mode: The block target mode mask. */
640#define IEM_F_MODE_MASK UINT32_C(0x0000001f)
641/** Mode: The IEMMODE part of the IEMTB_F_MODE_MASK value. */
642#define IEM_F_MODE_CPUMODE_MASK UINT32_C(0x00000003)
643/** X86 Mode: Bit used to indicating pre-386 CPU in 16-bit mode (for eliminating
644 * conditional in EIP/IP updating), and flat wide open CS, SS DS, and ES in
645 * 32-bit mode (for simplifying most memory accesses). */
646#define IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK UINT32_C(0x00000004)
647/** X86 Mode: Bit indicating protected mode, real mode (or SMM) when not set. */
648#define IEM_F_MODE_X86_PROT_MASK UINT32_C(0x00000008)
649/** X86 Mode: Bit used to indicate virtual 8086 mode (only 16-bit). */
650#define IEM_F_MODE_X86_V86_MASK UINT32_C(0x00000010)
651
652/** X86 Mode: 16-bit on 386 or later. */
653#define IEM_F_MODE_X86_16BIT UINT32_C(0x00000000)
654/** X86 Mode: 80286, 80186 and 8086/88 targetting blocks (EIP update opt). */
655#define IEM_F_MODE_X86_16BIT_PRE_386 UINT32_C(0x00000004)
656/** X86 Mode: 16-bit protected mode on 386 or later. */
657#define IEM_F_MODE_X86_16BIT_PROT UINT32_C(0x00000008)
658/** X86 Mode: 16-bit protected mode on 386 or later. */
659#define IEM_F_MODE_X86_16BIT_PROT_PRE_386 UINT32_C(0x0000000c)
660/** X86 Mode: 16-bit virtual 8086 protected mode (on 386 or later). */
661#define IEM_F_MODE_X86_16BIT_PROT_V86 UINT32_C(0x00000018)
662
663/** X86 Mode: 32-bit on 386 or later. */
664#define IEM_F_MODE_X86_32BIT UINT32_C(0x00000001)
665/** X86 Mode: 32-bit mode with wide open flat CS, SS, DS and ES. */
666#define IEM_F_MODE_X86_32BIT_FLAT UINT32_C(0x00000005)
667/** X86 Mode: 32-bit protected mode. */
668#define IEM_F_MODE_X86_32BIT_PROT UINT32_C(0x00000009)
669/** X86 Mode: 32-bit protected mode with wide open flat CS, SS, DS and ES. */
670#define IEM_F_MODE_X86_32BIT_PROT_FLAT UINT32_C(0x0000000d)
671
672/** X86 Mode: 64-bit (includes protected, but not the flat bit). */
673#define IEM_F_MODE_X86_64BIT UINT32_C(0x0000000a)
674
675
676/** Bypass access handlers when set. */
677#define IEM_F_BYPASS_HANDLERS UINT32_C(0x00010000)
678/** Have pending hardware instruction breakpoints. */
679#define IEM_F_PENDING_BRK_INSTR UINT32_C(0x00020000)
680/** Have pending hardware data breakpoints. */
681#define IEM_F_PENDING_BRK_DATA UINT32_C(0x00040000)
682
683/** X86: Have pending hardware I/O breakpoints. */
684#define IEM_F_PENDING_BRK_X86_IO UINT32_C(0x00000400)
685/** X86: Disregard the lock prefix (implied or not) when set. */
686#define IEM_F_X86_DISREGARD_LOCK UINT32_C(0x00000800)
687
688/** Pending breakpoint mask (what iemCalcExecDbgFlags works out). */
689#define IEM_F_PENDING_BRK_MASK (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA | IEM_F_PENDING_BRK_X86_IO)
690
691/** Caller configurable options. */
692#define IEM_F_USER_OPTS (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK)
693
694/** X86: The current protection level (CPL) shift factor. */
695#define IEM_F_X86_CPL_SHIFT 8
696/** X86: The current protection level (CPL) mask. */
697#define IEM_F_X86_CPL_MASK UINT32_C(0x00000300)
698/** X86: The current protection level (CPL) shifted mask. */
699#define IEM_F_X86_CPL_SMASK UINT32_C(0x00000003)
700
701/** X86 execution context.
702 * The IEM_F_X86_CTX_XXX values are individual flags that can be combined (with
703 * the exception of IEM_F_X86_CTX_NORMAL). This allows running VMs from SMM
704 * mode. */
705#define IEM_F_X86_CTX_MASK UINT32_C(0x0000f000)
706/** X86 context: Plain regular execution context. */
707#define IEM_F_X86_CTX_NORMAL UINT32_C(0x00000000)
708/** X86 context: VT-x enabled. */
709#define IEM_F_X86_CTX_VMX UINT32_C(0x00001000)
710/** X86 context: AMD-V enabled. */
711#define IEM_F_X86_CTX_SVM UINT32_C(0x00002000)
712/** X86 context: In AMD-V or VT-x guest mode. */
713#define IEM_F_X86_CTX_IN_GUEST UINT32_C(0x00004000)
714/** X86 context: System management mode (SMM). */
715#define IEM_F_X86_CTX_SMM UINT32_C(0x00008000)
716
717/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
718 * iemRegFinishClearingRF() most for most situations (CPUMCTX_DBG_HIT_DRX_MASK
719 * and CPUMCTX_DBG_DBGF_MASK are covered by the IEM_F_PENDING_BRK_XXX bits
720 * alread). */
721
722/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
723 * iemRegFinishClearingRF() most for most situations
724 * (CPUMCTX_DBG_HIT_DRX_MASK and CPUMCTX_DBG_DBGF_MASK are covered by
725 * the IEM_F_PENDING_BRK_XXX bits alread). */
726
727/** @} */
728
729
730/** @name IEMTB_F_XXX - Translation block flags (IEMTB::fFlags).
731 *
732 * Extends the IEM_F_XXX flags (subject to IEMTB_F_IEM_F_MASK) to make up the
733 * translation block flags. The combined flag mask (subject to
734 * IEMTB_F_KEY_MASK) is used as part of the lookup key for translation blocks.
735 *
736 * @{ */
737/** Mask of IEM_F_XXX flags included in IEMTB_F_XXX. */
738#define IEMTB_F_IEM_F_MASK UINT32_C(0x00ffffff)
739
740/** Type: The block type mask. */
741#define IEMTB_F_TYPE_MASK UINT32_C(0x03000000)
742/** Type: Purly threaded recompiler (via tables). */
743#define IEMTB_F_TYPE_THREADED UINT32_C(0x01000000)
744/** Type: Native recompilation. */
745#define IEMTB_F_TYPE_NATIVE UINT32_C(0x02000000)
746
747/** Set when we're starting the block in an "interrupt shadow".
748 * We don't need to distingish between the two types of this mask, thus the one.
749 * @see CPUMCTX_INHIBIT_SHADOW, CPUMIsInInterruptShadow() */
750#define IEMTB_F_INHIBIT_SHADOW UINT32_C(0x04000000)
751/** Set when we're currently inhibiting NMIs
752 * @see CPUMCTX_INHIBIT_NMI, CPUMAreInterruptsInhibitedByNmi() */
753#define IEMTB_F_INHIBIT_NMI UINT32_C(0x08000000)
754
755/** Checks that EIP/IP is wihin CS.LIM before each instruction. Used when
756 * we're close the limit before starting a TB, as determined by
757 * iemGetTbFlagsForCurrentPc(). */
758#define IEMTB_F_CS_LIM_CHECKS UINT32_C(0x10000000)
759
760/** Mask of the IEMTB_F_XXX flags that are part of the TB lookup key.
761 * @note We skip the CPL as we don't currently generate ring-specific code,
762 * that's all handled in CIMPL functions.
763 *
764 * For the same reasons, we skip all of IEM_F_X86_CTX_MASK, with the
765 * exception of SMM (which we don't implement). */
766#define IEMTB_F_KEY_MASK ( (UINT32_MAX & ~(IEM_F_X86_CTX_MASK | IEM_F_X86_CPL_MASK | IEMTB_F_TYPE_MASK)) \
767 | IEM_F_X86_CTX_SMM)
768/** @} */
769
770AssertCompile( (IEM_F_MODE_X86_16BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
771AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
772AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_PROT_MASK));
773AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_V86_MASK));
774AssertCompile( (IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
775AssertCompile( IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
776AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_PROT_MASK));
777AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
778AssertCompile( (IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
779AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
780AssertCompile( IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
781AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_V86_MASK));
782AssertCompile( (IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
783AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
784AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_PROT_MASK);
785AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
786AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_PROT_MASK);
787AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
788AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_V86_MASK);
789
790AssertCompile( (IEM_F_MODE_X86_32BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
791AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
792AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_PROT_MASK));
793AssertCompile( (IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
794AssertCompile( IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
795AssertCompile(!(IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_PROT_MASK));
796AssertCompile( (IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
797AssertCompile(!(IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
798AssertCompile( IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
799AssertCompile( (IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
800AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
801AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_PROT_MASK);
802
803AssertCompile( (IEM_F_MODE_X86_64BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT);
804AssertCompile( IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_PROT_MASK);
805AssertCompile(!(IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
806
807/** Native instruction type for use with the native code generator.
808 * This is a byte (uint8_t) for x86 and amd64 and uint32_t for the other(s). */
809#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
810typedef uint8_t IEMNATIVEINSTR;
811#else
812typedef uint32_t IEMNATIVEINSTR;
813#endif
814/** Pointer to a native instruction unit. */
815typedef IEMNATIVEINSTR *PIEMNATIVEINSTR;
816/** Pointer to a const native instruction unit. */
817typedef IEMNATIVEINSTR const *PCIEMNATIVEINSTR;
818
819/**
820 * A call for the threaded call table.
821 */
822typedef struct IEMTHRDEDCALLENTRY
823{
824 /** The function to call (IEMTHREADEDFUNCS). */
825 uint16_t enmFunction;
826 /** Instruction number in the TB (for statistics). */
827 uint8_t idxInstr;
828 uint8_t uUnused0;
829
830 /** Offset into IEMTB::pabOpcodes. */
831 uint16_t offOpcode;
832 /** The opcode length. */
833 uint8_t cbOpcode;
834 /** Index in to IEMTB::aRanges. */
835 uint8_t idxRange;
836
837 /** Generic parameters. */
838 uint64_t auParams[3];
839} IEMTHRDEDCALLENTRY;
840AssertCompileSize(IEMTHRDEDCALLENTRY, sizeof(uint64_t) * 4);
841/** Pointer to a threaded call entry. */
842typedef struct IEMTHRDEDCALLENTRY *PIEMTHRDEDCALLENTRY;
843/** Pointer to a const threaded call entry. */
844typedef IEMTHRDEDCALLENTRY const *PCIEMTHRDEDCALLENTRY;
845
846/**
847 * Native IEM TB 'function' typedef.
848 *
849 * This will throw/longjmp on occation.
850 *
851 * @note AMD64 doesn't have that many non-volatile registers and does sport
852 * 32-bit address displacments, so we don't need pCtx.
853 *
854 * On ARM64 pCtx allows us to directly address the whole register
855 * context without requiring a separate indexing register holding the
856 * offset. This saves an instruction loading the offset for each guest
857 * CPU context access, at the cost of a non-volatile register.
858 * Fortunately, ARM64 has quite a lot more registers.
859 */
860typedef
861#ifdef RT_ARCH_AMD64
862int FNIEMTBNATIVE(PVMCPUCC pVCpu)
863#else
864int FNIEMTBNATIVE(PVMCPUCC pVCpu, PCPUMCTX pCtx)
865#endif
866#if RT_CPLUSPLUS_PREREQ(201700)
867 IEM_NOEXCEPT_MAY_LONGJMP
868#endif
869 ;
870/** Pointer to a native IEM TB entry point function.
871 * This will throw/longjmp on occation. */
872typedef FNIEMTBNATIVE *PFNIEMTBNATIVE;
873
874
875/**
876 * Translation block debug info entry type.
877 */
878typedef enum IEMTBDBGENTRYTYPE
879{
880 kIemTbDbgEntryType_Invalid = 0,
881 /** The entry is for marking a native code position.
882 * Entries following this all apply to this position. */
883 kIemTbDbgEntryType_NativeOffset,
884 /** The entry is for a new guest instruction. */
885 kIemTbDbgEntryType_GuestInstruction,
886 /** Marks the start of a threaded call. */
887 kIemTbDbgEntryType_ThreadedCall,
888 /** Marks the location of a label. */
889 kIemTbDbgEntryType_Label,
890 /** Info about a host register shadowing a guest register. */
891 kIemTbDbgEntryType_GuestRegShadowing,
892 kIemTbDbgEntryType_End
893} IEMTBDBGENTRYTYPE;
894
895/**
896 * Translation block debug info entry.
897 */
898typedef union IEMTBDBGENTRY
899{
900 /** Plain 32-bit view. */
901 uint32_t u;
902
903 /** Generic view for getting at the type field. */
904 struct
905 {
906 /** IEMTBDBGENTRYTYPE */
907 uint32_t uType : 4;
908 uint32_t uTypeSpecific : 28;
909 } Gen;
910
911 struct
912 {
913 /** kIemTbDbgEntryType_ThreadedCall1. */
914 uint32_t uType : 4;
915 /** Native code offset. */
916 uint32_t offNative : 28;
917 } NativeOffset;
918
919 struct
920 {
921 /** kIemTbDbgEntryType_GuestInstruction. */
922 uint32_t uType : 4;
923 uint32_t uUnused : 4;
924 /** The IEM_F_XXX flags. */
925 uint32_t fExec : 24;
926 } GuestInstruction;
927
928 struct
929 {
930 /* kIemTbDbgEntryType_ThreadedCall. */
931 uint32_t uType : 4;
932 /** Set if the call was recompiled to native code, clear if just calling
933 * threaded function. */
934 uint32_t fRecompiled : 1;
935 uint32_t uUnused : 11;
936 /** The threaded call number (IEMTHREADEDFUNCS). */
937 uint32_t enmCall : 16;
938 } ThreadedCall;
939
940 struct
941 {
942 /* kIemTbDbgEntryType_Label. */
943 uint32_t uType : 4;
944 uint32_t uUnused : 4;
945 /** The label type (IEMNATIVELABELTYPE). */
946 uint32_t enmLabel : 8;
947 /** The label data. */
948 uint32_t uData : 16;
949 } Label;
950
951 struct
952 {
953 /* kIemTbDbgEntryType_GuestRegShadowing. */
954 uint32_t uType : 4;
955 uint32_t uUnused : 4;
956 /** The guest register being shadowed (IEMNATIVEGSTREG). */
957 uint32_t idxGstReg : 8;
958 /** The host new register number, UINT8_MAX if dropped. */
959 uint32_t idxHstReg : 8;
960 /** The previous host register number, UINT8_MAX if new. */
961 uint32_t idxHstRegPrev : 8;
962 } GuestRegShadowing;
963} IEMTBDBGENTRY;
964AssertCompileSize(IEMTBDBGENTRY, sizeof(uint32_t));
965/** Pointer to a debug info entry. */
966typedef IEMTBDBGENTRY *PIEMTBDBGENTRY;
967/** Pointer to a const debug info entry. */
968typedef IEMTBDBGENTRY const *PCIEMTBDBGENTRY;
969
970/**
971 * Translation block debug info.
972 */
973typedef struct IEMTBDBG
974{
975 /** Number of entries in aEntries. */
976 uint32_t cEntries;
977 /** Debug info entries. */
978 RT_FLEXIBLE_ARRAY_EXTENSION
979 IEMTBDBGENTRY aEntries[RT_FLEXIBLE_ARRAY];
980} IEMTBDBG;
981/** Pointer to TB debug info. */
982typedef IEMTBDBG *PIEMTBDBG;
983/** Pointer to const TB debug info. */
984typedef IEMTBDBG const *PCIEMTBDBG;
985
986
987/**
988 * Translation block.
989 *
990 * The current plan is to just keep TBs and associated lookup hash table private
991 * to each VCpu as that simplifies TB removal greatly (no races) and generally
992 * avoids using expensive atomic primitives for updating lists and stuff.
993 */
994#pragma pack(2) /* to prevent the Thrd structure from being padded unnecessarily */
995typedef struct IEMTB
996{
997 /** Next block with the same hash table entry. */
998 struct IEMTB *pNext;
999 /** Usage counter. */
1000 uint32_t cUsed;
1001 /** The IEMCPU::msRecompilerPollNow last time it was used. */
1002 uint32_t msLastUsed;
1003
1004 /** @name What uniquely identifies the block.
1005 * @{ */
1006 RTGCPHYS GCPhysPc;
1007 /** IEMTB_F_XXX (i.e. IEM_F_XXX ++). */
1008 uint32_t fFlags;
1009 union
1010 {
1011 struct
1012 {
1013 /**< Relevant CS X86DESCATTR_XXX bits. */
1014 uint16_t fAttr;
1015 } x86;
1016 };
1017 /** @} */
1018
1019 /** Number of opcode ranges. */
1020 uint8_t cRanges;
1021 /** Statistics: Number of instructions in the block. */
1022 uint8_t cInstructions;
1023
1024 /** Type specific info. */
1025 union
1026 {
1027 struct
1028 {
1029 /** The call sequence table. */
1030 PIEMTHRDEDCALLENTRY paCalls;
1031 /** Number of calls in paCalls. */
1032 uint16_t cCalls;
1033 /** Number of calls allocated. */
1034 uint16_t cAllocated;
1035 } Thrd;
1036 struct
1037 {
1038 /** The native instructions (PFNIEMTBNATIVE). */
1039 PIEMNATIVEINSTR paInstructions;
1040 /** Number of instructions pointed to by paInstructions. */
1041 uint32_t cInstructions;
1042 } Native;
1043 /** Generic view for zeroing when freeing. */
1044 struct
1045 {
1046 uintptr_t uPtr;
1047 uint32_t uData;
1048 } Gen;
1049 };
1050
1051 /** The allocation chunk this TB belongs to. */
1052 uint8_t idxAllocChunk;
1053 uint8_t bUnused;
1054
1055 /** Number of bytes of opcodes stored in pabOpcodes.
1056 * @todo this field isn't really needed, aRanges keeps the actual info. */
1057 uint16_t cbOpcodes;
1058 /** Pointer to the opcode bytes this block was recompiled from. */
1059 uint8_t *pabOpcodes;
1060
1061 /** Debug info if enabled.
1062 * This is only generated by the native recompiler. */
1063 PIEMTBDBG pDbgInfo;
1064
1065 /* --- 64 byte cache line end --- */
1066
1067 /** Opcode ranges.
1068 *
1069 * The opcode checkers and maybe TLB loading functions will use this to figure
1070 * out what to do. The parameter will specify an entry and the opcode offset to
1071 * start at and the minimum number of bytes to verify (instruction length).
1072 *
1073 * When VT-x and AMD-V looks up the opcode bytes for an exitting instruction,
1074 * they'll first translate RIP (+ cbInstr - 1) to a physical address using the
1075 * code TLB (must have a valid entry for that address) and scan the ranges to
1076 * locate the corresponding opcodes. Probably.
1077 */
1078 struct IEMTBOPCODERANGE
1079 {
1080 /** Offset within pabOpcodes. */
1081 uint16_t offOpcodes;
1082 /** Number of bytes. */
1083 uint16_t cbOpcodes;
1084 /** The page offset. */
1085 RT_GCC_EXTENSION
1086 uint16_t offPhysPage : 12;
1087 /** Unused bits. */
1088 RT_GCC_EXTENSION
1089 uint16_t u2Unused : 2;
1090 /** Index into GCPhysPc + aGCPhysPages for the physical page address. */
1091 RT_GCC_EXTENSION
1092 uint16_t idxPhysPage : 2;
1093 } aRanges[8];
1094
1095 /** Physical pages that this TB covers.
1096 * The GCPhysPc w/o page offset is element zero, so starting here with 1. */
1097 RTGCPHYS aGCPhysPages[2];
1098} IEMTB;
1099#pragma pack()
1100AssertCompileMemberAlignment(IEMTB, GCPhysPc, sizeof(RTGCPHYS));
1101AssertCompileMemberAlignment(IEMTB, Thrd, sizeof(void *));
1102AssertCompileMemberAlignment(IEMTB, pabOpcodes, sizeof(void *));
1103AssertCompileMemberAlignment(IEMTB, pDbgInfo, sizeof(void *));
1104AssertCompileMemberAlignment(IEMTB, aGCPhysPages, sizeof(RTGCPHYS));
1105AssertCompileMemberOffset(IEMTB, aRanges, 64);
1106AssertCompileMemberSize(IEMTB, aRanges[0], 6);
1107#if 1
1108AssertCompileSize(IEMTB, 128);
1109# define IEMTB_SIZE_IS_POWER_OF_TWO /**< The IEMTB size is a power of two. */
1110#else
1111AssertCompileSize(IEMTB, 168);
1112# undef IEMTB_SIZE_IS_POWER_OF_TWO
1113#endif
1114
1115/** Pointer to a translation block. */
1116typedef IEMTB *PIEMTB;
1117/** Pointer to a const translation block. */
1118typedef IEMTB const *PCIEMTB;
1119
1120/**
1121 * A chunk of memory in the TB allocator.
1122 */
1123typedef struct IEMTBCHUNK
1124{
1125 /** Pointer to the translation blocks in this chunk. */
1126 PIEMTB paTbs;
1127#ifdef IN_RING0
1128 /** Allocation handle. */
1129 RTR0MEMOBJ hMemObj;
1130#endif
1131} IEMTBCHUNK;
1132
1133/**
1134 * A per-CPU translation block allocator.
1135 *
1136 * Because of how the IEMTBCACHE uses the lower 6 bits of the TB address to keep
1137 * the length of the collision list, and of course also for cache line alignment
1138 * reasons, the TBs must be allocated with at least 64-byte alignment.
1139 * Memory is there therefore allocated using one of the page aligned allocators.
1140 *
1141 *
1142 * To avoid wasting too much memory, it is allocated piecemeal as needed,
1143 * in chunks (IEMTBCHUNK) of 2 MiB or more. The TB has an 8-bit chunk index
1144 * that enables us to quickly calculate the allocation bitmap position when
1145 * freeing the translation block.
1146 */
1147typedef struct IEMTBALLOCATOR
1148{
1149 /** Magic value (IEMTBALLOCATOR_MAGIC). */
1150 uint32_t uMagic;
1151
1152#ifdef IEMTB_SIZE_IS_POWER_OF_TWO
1153 /** Mask corresponding to cTbsPerChunk - 1. */
1154 uint32_t fChunkMask;
1155 /** Shift count corresponding to cTbsPerChunk. */
1156 uint8_t cChunkShift;
1157#else
1158 uint32_t uUnused;
1159 uint8_t bUnused;
1160#endif
1161 /** Number of chunks we're allowed to allocate. */
1162 uint8_t cMaxChunks;
1163 /** Number of chunks currently populated. */
1164 uint16_t cAllocatedChunks;
1165 /** Number of translation blocks per chunk. */
1166 uint32_t cTbsPerChunk;
1167 /** Chunk size. */
1168 uint32_t cbPerChunk;
1169
1170 /** The maximum number of TBs. */
1171 uint32_t cMaxTbs;
1172 /** Total number of TBs in the populated chunks.
1173 * (cAllocatedChunks * cTbsPerChunk) */
1174 uint32_t cTotalTbs;
1175 /** The current number of TBs in use.
1176 * The number of free TBs: cAllocatedTbs - cInUseTbs; */
1177 uint32_t cInUseTbs;
1178 /** Statistics: Number of the cInUseTbs that are native ones. */
1179 uint32_t cNativeTbs;
1180 /** Statistics: Number of the cInUseTbs that are threaded ones. */
1181 uint32_t cThreadedTbs;
1182
1183 /** Where to start pruning TBs from when we're out.
1184 * See iemTbAllocatorAllocSlow for details. */
1185 uint32_t iPruneFrom;
1186 /** Hint about which bit to start scanning the bitmap from. */
1187 uint32_t iStartHint;
1188
1189 /** Statistics: Number of TB allocation calls. */
1190 STAMCOUNTER StatAllocs;
1191 /** Statistics: Number of TB free calls. */
1192 STAMCOUNTER StatFrees;
1193 /** Statistics: Time spend pruning. */
1194 STAMPROFILE StatPrune;
1195
1196 /** The delayed free list (see iemTbAlloctorScheduleForFree). */
1197 PIEMTB pDelayedFreeHead;
1198
1199 /** Allocation chunks. */
1200 IEMTBCHUNK aChunks[256];
1201
1202 /** Allocation bitmap for all possible chunk chunks. */
1203 RT_FLEXIBLE_ARRAY_EXTENSION
1204 uint64_t bmAllocated[RT_FLEXIBLE_ARRAY];
1205} IEMTBALLOCATOR;
1206/** Pointer to a TB allocator. */
1207typedef struct IEMTBALLOCATOR *PIEMTBALLOCATOR;
1208
1209/** Magic value for the TB allocator (Emmet Harley Cohen). */
1210#define IEMTBALLOCATOR_MAGIC UINT32_C(0x19900525)
1211
1212
1213/**
1214 * A per-CPU translation block cache (hash table).
1215 *
1216 * The hash table is allocated once during IEM initialization and size double
1217 * the max TB count, rounded up to the nearest power of two (so we can use and
1218 * AND mask rather than a rest division when hashing).
1219 */
1220typedef struct IEMTBCACHE
1221{
1222 /** Magic value (IEMTBCACHE_MAGIC). */
1223 uint32_t uMagic;
1224 /** Size of the hash table. This is a power of two. */
1225 uint32_t cHash;
1226 /** The mask corresponding to cHash. */
1227 uint32_t uHashMask;
1228 uint32_t uPadding;
1229
1230 /** @name Statistics
1231 * @{ */
1232 /** Number of collisions ever. */
1233 STAMCOUNTER cCollisions;
1234
1235 /** Statistics: Number of TB lookup misses. */
1236 STAMCOUNTER cLookupMisses;
1237 /** Statistics: Number of TB lookup hits (debug only). */
1238 STAMCOUNTER cLookupHits;
1239 STAMCOUNTER auPadding2[3];
1240 /** Statistics: Collision list length pruning. */
1241 STAMPROFILE StatPrune;
1242 /** @} */
1243
1244 /** The hash table itself.
1245 * @note The lower 6 bits of the pointer is used for keeping the collision
1246 * list length, so we can take action when it grows too long.
1247 * This works because TBs are allocated using a 64 byte (or
1248 * higher) alignment from page aligned chunks of memory, so the lower
1249 * 6 bits of the address will always be zero.
1250 * See IEMTBCACHE_PTR_COUNT_MASK, IEMTBCACHE_PTR_MAKE and friends.
1251 */
1252 RT_FLEXIBLE_ARRAY_EXTENSION
1253 PIEMTB apHash[RT_FLEXIBLE_ARRAY];
1254} IEMTBCACHE;
1255/** Pointer to a per-CPU translation block cahce. */
1256typedef IEMTBCACHE *PIEMTBCACHE;
1257
1258/** Magic value for IEMTBCACHE (Johnny O'Neal). */
1259#define IEMTBCACHE_MAGIC UINT32_C(0x19561010)
1260
1261/** The collision count mask for IEMTBCACHE::apHash entries. */
1262#define IEMTBCACHE_PTR_COUNT_MASK ((uintptr_t)0x3f)
1263/** The max collision count for IEMTBCACHE::apHash entries before pruning. */
1264#define IEMTBCACHE_PTR_MAX_COUNT ((uintptr_t)0x30)
1265/** Combine a TB pointer and a collision list length into a value for an
1266 * IEMTBCACHE::apHash entry. */
1267#define IEMTBCACHE_PTR_MAKE(a_pTb, a_cCount) (PIEMTB)((uintptr_t)(a_pTb) | (a_cCount))
1268/** Combine a TB pointer and a collision list length into a value for an
1269 * IEMTBCACHE::apHash entry. */
1270#define IEMTBCACHE_PTR_GET_TB(a_pHashEntry) (PIEMTB)((uintptr_t)(a_pHashEntry) & ~IEMTBCACHE_PTR_COUNT_MASK)
1271/** Combine a TB pointer and a collision list length into a value for an
1272 * IEMTBCACHE::apHash entry. */
1273#define IEMTBCACHE_PTR_GET_COUNT(a_pHashEntry) ((uintptr_t)(a_pHashEntry) & IEMTBCACHE_PTR_COUNT_MASK)
1274
1275/**
1276 * Calculates the hash table slot for a TB from physical PC address and TB flags.
1277 */
1278#define IEMTBCACHE_HASH(a_paCache, a_fTbFlags, a_GCPhysPc) \
1279 IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, (a_fTbFlags) & IEMTB_F_KEY_MASK, a_GCPhysPc)
1280
1281/**
1282 * Calculates the hash table slot for a TB from physical PC address and TB
1283 * flags, ASSUMING the caller has applied IEMTB_F_KEY_MASK to @a a_fTbFlags.
1284 */
1285#define IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, a_fTbFlags, a_GCPhysPc) \
1286 (((uint32_t)(a_GCPhysPc) ^ (a_fTbFlags)) & (a_paCache)->uHashMask)
1287
1288
1289/** @name IEMBRANCHED_F_XXX - Branched indicator (IEMCPU::fTbBranched).
1290 *
1291 * These flags parallels IEM_CIMPL_F_BRANCH_XXX.
1292 *
1293 * @{ */
1294/** Value if no branching happened recently. */
1295#define IEMBRANCHED_F_NO UINT8_C(0x00)
1296/** Flag set if direct branch, clear if absolute or indirect. */
1297#define IEMBRANCHED_F_DIRECT UINT8_C(0x01)
1298/** Flag set if indirect branch, clear if direct or relative. */
1299#define IEMBRANCHED_F_INDIRECT UINT8_C(0x02)
1300/** Flag set if relative branch, clear if absolute or indirect. */
1301#define IEMBRANCHED_F_RELATIVE UINT8_C(0x04)
1302/** Flag set if conditional branch, clear if unconditional. */
1303#define IEMBRANCHED_F_CONDITIONAL UINT8_C(0x08)
1304/** Flag set if it's a far branch. */
1305#define IEMBRANCHED_F_FAR UINT8_C(0x10)
1306/** Flag set (by IEM_MC_REL_JMP_XXX) if it's a zero bytes relative jump. */
1307#define IEMBRANCHED_F_ZERO UINT8_C(0x20)
1308/** @} */
1309
1310
1311/**
1312 * The per-CPU IEM state.
1313 */
1314typedef struct IEMCPU
1315{
1316 /** Info status code that needs to be propagated to the IEM caller.
1317 * This cannot be passed internally, as it would complicate all success
1318 * checks within the interpreter making the code larger and almost impossible
1319 * to get right. Instead, we'll store status codes to pass on here. Each
1320 * source of these codes will perform appropriate sanity checks. */
1321 int32_t rcPassUp; /* 0x00 */
1322 /** Execution flag, IEM_F_XXX. */
1323 uint32_t fExec; /* 0x04 */
1324
1325 /** @name Decoder state.
1326 * @{ */
1327#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1328# ifdef IEM_WITH_CODE_TLB
1329 /** The offset of the next instruction byte. */
1330 uint32_t offInstrNextByte; /* 0x08 */
1331 /** The number of bytes available at pbInstrBuf for the current instruction.
1332 * This takes the max opcode length into account so that doesn't need to be
1333 * checked separately. */
1334 uint32_t cbInstrBuf; /* 0x0c */
1335 /** Pointer to the page containing RIP, user specified buffer or abOpcode.
1336 * This can be NULL if the page isn't mappable for some reason, in which
1337 * case we'll do fallback stuff.
1338 *
1339 * If we're executing an instruction from a user specified buffer,
1340 * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page
1341 * aligned pointer but pointer to the user data.
1342 *
1343 * For instructions crossing pages, this will start on the first page and be
1344 * advanced to the next page by the time we've decoded the instruction. This
1345 * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt>
1346 */
1347 uint8_t const *pbInstrBuf; /* 0x10 */
1348# if ARCH_BITS == 32
1349 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */
1350# endif
1351 /** The program counter corresponding to pbInstrBuf.
1352 * This is set to a non-canonical address when we need to invalidate it. */
1353 uint64_t uInstrBufPc; /* 0x18 */
1354 /** The guest physical address corresponding to pbInstrBuf. */
1355 RTGCPHYS GCPhysInstrBuf; /* 0x20 */
1356 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots).
1357 * This takes the CS segment limit into account. */
1358 uint16_t cbInstrBufTotal; /* 0x28 */
1359 /** Offset into pbInstrBuf of the first byte of the current instruction.
1360 * Can be negative to efficiently handle cross page instructions. */
1361 int16_t offCurInstrStart; /* 0x2a */
1362
1363 /** The prefix mask (IEM_OP_PRF_XXX). */
1364 uint32_t fPrefixes; /* 0x2c */
1365 /** The extra REX ModR/M register field bit (REX.R << 3). */
1366 uint8_t uRexReg; /* 0x30 */
1367 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1368 * (REX.B << 3). */
1369 uint8_t uRexB; /* 0x31 */
1370 /** The extra REX SIB index field bit (REX.X << 3). */
1371 uint8_t uRexIndex; /* 0x32 */
1372
1373 /** The effective segment register (X86_SREG_XXX). */
1374 uint8_t iEffSeg; /* 0x33 */
1375
1376 /** The offset of the ModR/M byte relative to the start of the instruction. */
1377 uint8_t offModRm; /* 0x34 */
1378
1379# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1380 /** The current offset into abOpcode. */
1381 uint8_t offOpcode; /* 0x35 */
1382# else
1383 uint8_t bUnused; /* 0x35 */
1384# endif
1385# else /* !IEM_WITH_CODE_TLB */
1386 /** The size of what has currently been fetched into abOpcode. */
1387 uint8_t cbOpcode; /* 0x08 */
1388 /** The current offset into abOpcode. */
1389 uint8_t offOpcode; /* 0x09 */
1390 /** The offset of the ModR/M byte relative to the start of the instruction. */
1391 uint8_t offModRm; /* 0x0a */
1392
1393 /** The effective segment register (X86_SREG_XXX). */
1394 uint8_t iEffSeg; /* 0x0b */
1395
1396 /** The prefix mask (IEM_OP_PRF_XXX). */
1397 uint32_t fPrefixes; /* 0x0c */
1398 /** The extra REX ModR/M register field bit (REX.R << 3). */
1399 uint8_t uRexReg; /* 0x10 */
1400 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1401 * (REX.B << 3). */
1402 uint8_t uRexB; /* 0x11 */
1403 /** The extra REX SIB index field bit (REX.X << 3). */
1404 uint8_t uRexIndex; /* 0x12 */
1405
1406# endif /* !IEM_WITH_CODE_TLB */
1407
1408 /** The effective operand mode. */
1409 IEMMODE enmEffOpSize; /* 0x36, 0x13 */
1410 /** The default addressing mode. */
1411 IEMMODE enmDefAddrMode; /* 0x37, 0x14 */
1412 /** The effective addressing mode. */
1413 IEMMODE enmEffAddrMode; /* 0x38, 0x15 */
1414 /** The default operand mode. */
1415 IEMMODE enmDefOpSize; /* 0x39, 0x16 */
1416
1417 /** Prefix index (VEX.pp) for two byte and three byte tables. */
1418 uint8_t idxPrefix; /* 0x3a, 0x17 */
1419 /** 3rd VEX/EVEX/XOP register.
1420 * Please use IEM_GET_EFFECTIVE_VVVV to access. */
1421 uint8_t uVex3rdReg; /* 0x3b, 0x18 */
1422 /** The VEX/EVEX/XOP length field. */
1423 uint8_t uVexLength; /* 0x3c, 0x19 */
1424 /** Additional EVEX stuff. */
1425 uint8_t fEvexStuff; /* 0x3d, 0x1a */
1426
1427# ifndef IEM_WITH_CODE_TLB
1428 /** Explicit alignment padding. */
1429 uint8_t abAlignment2a[1]; /* 0x1b */
1430# endif
1431 /** The FPU opcode (FOP). */
1432 uint16_t uFpuOpcode; /* 0x3e, 0x1c */
1433# ifndef IEM_WITH_CODE_TLB
1434 /** Explicit alignment padding. */
1435 uint8_t abAlignment2b[2]; /* 0x1e */
1436# endif
1437
1438 /** The opcode bytes. */
1439 uint8_t abOpcode[15]; /* 0x40, 0x20 */
1440 /** Explicit alignment padding. */
1441# ifdef IEM_WITH_CODE_TLB
1442 //uint8_t abAlignment2c[0x4f - 0x4f]; /* 0x4f */
1443# else
1444 uint8_t abAlignment2c[0x4f - 0x2f]; /* 0x2f */
1445# endif
1446#else /* IEM_WITH_OPAQUE_DECODER_STATE */
1447 uint8_t abOpaqueDecoder[0x4f - 0x8];
1448#endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1449 /** @} */
1450
1451
1452 /** The number of active guest memory mappings. */
1453 uint8_t cActiveMappings; /* 0x4f, 0x4f */
1454
1455 /** Records for tracking guest memory mappings. */
1456 struct
1457 {
1458 /** The address of the mapped bytes. */
1459 R3R0PTRTYPE(void *) pv;
1460 /** The access flags (IEM_ACCESS_XXX).
1461 * IEM_ACCESS_INVALID if the entry is unused. */
1462 uint32_t fAccess;
1463#if HC_ARCH_BITS == 64
1464 uint32_t u32Alignment4; /**< Alignment padding. */
1465#endif
1466 } aMemMappings[3]; /* 0x50 LB 0x30 */
1467
1468 /** Locking records for the mapped memory. */
1469 union
1470 {
1471 PGMPAGEMAPLOCK Lock;
1472 uint64_t au64Padding[2];
1473 } aMemMappingLocks[3]; /* 0x80 LB 0x30 */
1474
1475 /** Bounce buffer info.
1476 * This runs in parallel to aMemMappings. */
1477 struct
1478 {
1479 /** The physical address of the first byte. */
1480 RTGCPHYS GCPhysFirst;
1481 /** The physical address of the second page. */
1482 RTGCPHYS GCPhysSecond;
1483 /** The number of bytes in the first page. */
1484 uint16_t cbFirst;
1485 /** The number of bytes in the second page. */
1486 uint16_t cbSecond;
1487 /** Whether it's unassigned memory. */
1488 bool fUnassigned;
1489 /** Explicit alignment padding. */
1490 bool afAlignment5[3];
1491 } aMemBbMappings[3]; /* 0xb0 LB 0x48 */
1492
1493 /** The flags of the current exception / interrupt. */
1494 uint32_t fCurXcpt; /* 0xf8 */
1495 /** The current exception / interrupt. */
1496 uint8_t uCurXcpt; /* 0xfc */
1497 /** Exception / interrupt recursion depth. */
1498 int8_t cXcptRecursions; /* 0xfb */
1499
1500 /** The next unused mapping index.
1501 * @todo try find room for this up with cActiveMappings. */
1502 uint8_t iNextMapping; /* 0xfd */
1503 uint8_t abAlignment7[1];
1504
1505 /** Bounce buffer storage.
1506 * This runs in parallel to aMemMappings and aMemBbMappings. */
1507 struct
1508 {
1509 uint8_t ab[512];
1510 } aBounceBuffers[3]; /* 0x100 LB 0x600 */
1511
1512
1513 /** Pointer set jump buffer - ring-3 context. */
1514 R3PTRTYPE(jmp_buf *) pJmpBufR3;
1515 /** Pointer set jump buffer - ring-0 context. */
1516 R0PTRTYPE(jmp_buf *) pJmpBufR0;
1517
1518 /** @todo Should move this near @a fCurXcpt later. */
1519 /** The CR2 for the current exception / interrupt. */
1520 uint64_t uCurXcptCr2;
1521 /** The error code for the current exception / interrupt. */
1522 uint32_t uCurXcptErr;
1523
1524 /** @name Statistics
1525 * @{ */
1526 /** The number of instructions we've executed. */
1527 uint32_t cInstructions;
1528 /** The number of potential exits. */
1529 uint32_t cPotentialExits;
1530 /** The number of bytes data or stack written (mostly for IEMExecOneEx).
1531 * This may contain uncommitted writes. */
1532 uint32_t cbWritten;
1533 /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
1534 uint32_t cRetInstrNotImplemented;
1535 /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
1536 uint32_t cRetAspectNotImplemented;
1537 /** Counts informational statuses returned (other than VINF_SUCCESS). */
1538 uint32_t cRetInfStatuses;
1539 /** Counts other error statuses returned. */
1540 uint32_t cRetErrStatuses;
1541 /** Number of times rcPassUp has been used. */
1542 uint32_t cRetPassUpStatus;
1543 /** Number of times RZ left with instruction commit pending for ring-3. */
1544 uint32_t cPendingCommit;
1545 /** Number of long jumps. */
1546 uint32_t cLongJumps;
1547 /** @} */
1548
1549 /** @name Target CPU information.
1550 * @{ */
1551#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1552 /** The target CPU. */
1553 uint8_t uTargetCpu;
1554#else
1555 uint8_t bTargetCpuPadding;
1556#endif
1557 /** For selecting assembly works matching the target CPU EFLAGS behaviour, see
1558 * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no
1559 * native host support and the 2nd for when there is.
1560 *
1561 * The two values are typically indexed by a g_CpumHostFeatures bit.
1562 *
1563 * This is for instance used for the BSF & BSR instructions where AMD and
1564 * Intel CPUs produce different EFLAGS. */
1565 uint8_t aidxTargetCpuEflFlavour[2];
1566
1567 /** The CPU vendor. */
1568 CPUMCPUVENDOR enmCpuVendor;
1569 /** @} */
1570
1571 /** @name Host CPU information.
1572 * @{ */
1573 /** The CPU vendor. */
1574 CPUMCPUVENDOR enmHostCpuVendor;
1575 /** @} */
1576
1577 /** Counts RDMSR \#GP(0) LogRel(). */
1578 uint8_t cLogRelRdMsr;
1579 /** Counts WRMSR \#GP(0) LogRel(). */
1580 uint8_t cLogRelWrMsr;
1581 /** Alignment padding. */
1582 uint8_t abAlignment9[46];
1583
1584 /** @name Recompilation
1585 * @{ */
1586 /** Pointer to the current translation block.
1587 * This can either be one being executed or one being compiled. */
1588 R3PTRTYPE(PIEMTB) pCurTbR3;
1589 /** Fixed TB used for threaded recompilation.
1590 * This is allocated once with maxed-out sizes and re-used afterwards. */
1591 R3PTRTYPE(PIEMTB) pThrdCompileTbR3;
1592 /** Pointer to the ring-3 TB cache for this EMT. */
1593 R3PTRTYPE(PIEMTBCACHE) pTbCacheR3;
1594 /** The PC (RIP) at the start of pCurTbR3/pCurTbR0.
1595 * The TBs are based on physical addresses, so this is needed to correleated
1596 * RIP to opcode bytes stored in the TB (AMD-V / VT-x). */
1597 uint64_t uCurTbStartPc;
1598 /** Number of threaded TBs executed. */
1599 uint64_t cTbExecThreaded;
1600 /** Number of native TBs executed. */
1601 uint64_t cTbExecNative;
1602 /** Whether we need to check the opcode bytes for the current instruction.
1603 * This is set by a previous instruction if it modified memory or similar. */
1604 bool fTbCheckOpcodes;
1605 /** Indicates whether and how we just branched - IEMBRANCHED_F_XXX. */
1606 uint8_t fTbBranched;
1607 /** Set when GCPhysInstrBuf is updated because of a page crossing. */
1608 bool fTbCrossedPage;
1609 /** Whether to end the current TB. */
1610 bool fEndTb;
1611 /** Number of instructions before we need emit an IRQ check call again.
1612 * This helps making sure we don't execute too long w/o checking for
1613 * interrupts and immediately following instructions that may enable
1614 * interrupts (e.g. POPF, IRET, STI). With STI an additional hack is
1615 * required to make sure we check following the next instruction as well, see
1616 * fTbCurInstrIsSti. */
1617 uint8_t cInstrTillIrqCheck;
1618 /** Indicates that the current instruction is an STI. This is set by the
1619 * iemCImpl_sti code and subsequently cleared by the recompiler. */
1620 bool fTbCurInstrIsSti;
1621 /** The size of the IEMTB::pabOpcodes allocation in pThrdCompileTbR3. */
1622 uint16_t cbOpcodesAllocated;
1623 /** Spaced reserved for recompiler data / alignment. */
1624 bool afRecompilerStuff1[4];
1625 /** The virtual sync time at the last timer poll call. */
1626 uint32_t msRecompilerPollNow;
1627 /** The IEM_CIMPL_F_XXX mask for the current instruction. */
1628 uint32_t fTbCurInstr;
1629 /** The IEM_CIMPL_F_XXX mask for the previous instruction. */
1630 uint32_t fTbPrevInstr;
1631 /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set. */
1632 RTGCPHYS GCPhysInstrBufPrev;
1633 /** Copy of IEMCPU::GCPhysInstrBuf after decoding a branch instruction.
1634 * This is used together with fTbBranched and GCVirtTbBranchSrcBuf to determin
1635 * whether a branch instruction jumps to a new page or stays within the
1636 * current one. */
1637 RTGCPHYS GCPhysTbBranchSrcBuf;
1638 /** Copy of IEMCPU::uInstrBufPc after decoding a branch instruction. */
1639 uint64_t GCVirtTbBranchSrcBuf;
1640 /** Pointer to the ring-3 TB allocator for this EMT. */
1641 R3PTRTYPE(PIEMTBALLOCATOR) pTbAllocatorR3;
1642 /** Pointer to the ring-3 executable memory allocator for this EMT. */
1643 R3PTRTYPE(struct IEMEXECMEMALLOCATOR *) pExecMemAllocatorR3;
1644 /** Pointer to the native recompiler state for ring-3. */
1645 R3PTRTYPE(struct IEMRECOMPILERSTATE *) pNativeRecompilerStateR3;
1646 /** Alignment padding. */
1647 uint64_t auAlignment10[4];
1648 /** Statistics: Times TB execution was broken off before reaching the end. */
1649 STAMCOUNTER StatTbExecBreaks;
1650 /** Statistics: Times BltIn_CheckIrq breaks out of the TB. */
1651 STAMCOUNTER StatCheckIrqBreaks;
1652 /** Statistics: Times BltIn_CheckMode breaks out of the TB. */
1653 STAMCOUNTER StatCheckModeBreaks;
1654 /** Statistics: Times a post jump target check missed and had to find new TB. */
1655 STAMCOUNTER StatCheckBranchMisses;
1656 /** Statistics: Times a jump or page crossing required a TB with CS.LIM checking. */
1657 STAMCOUNTER StatCheckNeedCsLimChecking;
1658 /** Threaded TB statistics: Number of instructions per TB. */
1659 STAMPROFILE StatTbThreadedInstr;
1660 /** Threaded TB statistics: Number of calls per TB. */
1661 STAMPROFILE StatTbThreadedCalls;
1662 /** Native TB statistics: Native code size per TB. */
1663 STAMPROFILE StatTbNativeCode;
1664 /** Native TB statistics: Profiling native recompilation. */
1665 STAMPROFILE StatNativeRecompilation;
1666 /** @} */
1667
1668 /** Data TLB.
1669 * @remarks Must be 64-byte aligned. */
1670 IEMTLB DataTlb;
1671 /** Instruction TLB.
1672 * @remarks Must be 64-byte aligned. */
1673 IEMTLB CodeTlb;
1674
1675 /** Exception statistics. */
1676 STAMCOUNTER aStatXcpts[32];
1677 /** Interrupt statistics. */
1678 uint32_t aStatInts[256];
1679
1680#if defined(VBOX_WITH_STATISTICS) && !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
1681 /** Instruction statistics for ring-0/raw-mode. */
1682 IEMINSTRSTATS StatsRZ;
1683 /** Instruction statistics for ring-3. */
1684 IEMINSTRSTATS StatsR3;
1685#endif
1686} IEMCPU;
1687AssertCompileMemberOffset(IEMCPU, cActiveMappings, 0x4f);
1688AssertCompileMemberAlignment(IEMCPU, aMemMappings, 16);
1689AssertCompileMemberAlignment(IEMCPU, aMemMappingLocks, 16);
1690AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64);
1691AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
1692AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
1693
1694/** Pointer to the per-CPU IEM state. */
1695typedef IEMCPU *PIEMCPU;
1696/** Pointer to the const per-CPU IEM state. */
1697typedef IEMCPU const *PCIEMCPU;
1698
1699
1700/** @def IEM_GET_CTX
1701 * Gets the guest CPU context for the calling EMT.
1702 * @returns PCPUMCTX
1703 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1704 */
1705#define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
1706
1707/** @def IEM_CTX_ASSERT
1708 * Asserts that the @a a_fExtrnMbz is present in the CPU context.
1709 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1710 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
1711 */
1712#define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) \
1713 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
1714 ("fExtrn=%#RX64 & fExtrnMbz=%#RX64 -> %#RX64\n", \
1715 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz), (a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz) ))
1716
1717/** @def IEM_CTX_IMPORT_RET
1718 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
1719 *
1720 * Will call the keep to import the bits as needed.
1721 *
1722 * Returns on import failure.
1723 *
1724 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1725 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
1726 */
1727#define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \
1728 do { \
1729 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1730 { /* likely */ } \
1731 else \
1732 { \
1733 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1734 AssertRCReturn(rcCtxImport, rcCtxImport); \
1735 } \
1736 } while (0)
1737
1738/** @def IEM_CTX_IMPORT_NORET
1739 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
1740 *
1741 * Will call the keep to import the bits as needed.
1742 *
1743 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1744 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
1745 */
1746#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \
1747 do { \
1748 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1749 { /* likely */ } \
1750 else \
1751 { \
1752 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1753 AssertLogRelRC(rcCtxImport); \
1754 } \
1755 } while (0)
1756
1757/** @def IEM_CTX_IMPORT_JMP
1758 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
1759 *
1760 * Will call the keep to import the bits as needed.
1761 *
1762 * Jumps on import failure.
1763 *
1764 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1765 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
1766 */
1767#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \
1768 do { \
1769 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1770 { /* likely */ } \
1771 else \
1772 { \
1773 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1774 AssertRCStmt(rcCtxImport, IEM_DO_LONGJMP(pVCpu, rcCtxImport)); \
1775 } \
1776 } while (0)
1777
1778
1779
1780/** @def IEM_GET_TARGET_CPU
1781 * Gets the current IEMTARGETCPU value.
1782 * @returns IEMTARGETCPU value.
1783 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1784 */
1785#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
1786# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
1787#else
1788# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
1789#endif
1790
1791/** @def IEM_GET_INSTR_LEN
1792 * Gets the instruction length. */
1793#ifdef IEM_WITH_CODE_TLB
1794# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(int32_t)(a_pVCpu)->iem.s.offCurInstrStart)
1795#else
1796# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offOpcode)
1797#endif
1798
1799/** @def IEM_TRY_SETJMP
1800 * Wrapper around setjmp / try, hiding all the ugly differences.
1801 *
1802 * @note Use with extreme care as this is a fragile macro.
1803 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1804 * @param a_rcTarget The variable that should receive the status code in case
1805 * of a longjmp/throw.
1806 */
1807/** @def IEM_TRY_SETJMP_AGAIN
1808 * For when setjmp / try is used again in the same variable scope as a previous
1809 * IEM_TRY_SETJMP invocation.
1810 */
1811/** @def IEM_CATCH_LONGJMP_BEGIN
1812 * Start wrapper for catch / setjmp-else.
1813 *
1814 * This will set up a scope.
1815 *
1816 * @note Use with extreme care as this is a fragile macro.
1817 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1818 * @param a_rcTarget The variable that should receive the status code in case
1819 * of a longjmp/throw.
1820 */
1821/** @def IEM_CATCH_LONGJMP_END
1822 * End wrapper for catch / setjmp-else.
1823 *
1824 * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
1825 * state.
1826 *
1827 * @note Use with extreme care as this is a fragile macro.
1828 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1829 */
1830#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
1831# ifdef IEM_WITH_THROW_CATCH
1832# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
1833 a_rcTarget = VINF_SUCCESS; \
1834 try
1835# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
1836 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
1837# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
1838 catch (int rcThrown) \
1839 { \
1840 a_rcTarget = rcThrown
1841# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
1842 } \
1843 ((void)0)
1844# else /* !IEM_WITH_THROW_CATCH */
1845# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
1846 jmp_buf JmpBuf; \
1847 jmp_buf * volatile pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
1848 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
1849 if ((rcStrict = setjmp(JmpBuf)) == 0)
1850# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
1851 pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
1852 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
1853 if ((rcStrict = setjmp(JmpBuf)) == 0)
1854# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
1855 else \
1856 { \
1857 ((void)0)
1858# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
1859 } \
1860 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
1861# endif /* !IEM_WITH_THROW_CATCH */
1862#endif /* IEM_WITH_SETJMP */
1863
1864
1865/**
1866 * Shared per-VM IEM data.
1867 */
1868typedef struct IEM
1869{
1870 /** The VMX APIC-access page handler type. */
1871 PGMPHYSHANDLERTYPE hVmxApicAccessPage;
1872#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
1873 /** Set if the CPUID host call functionality is enabled. */
1874 bool fCpuIdHostCall;
1875#endif
1876} IEM;
1877
1878
1879
1880/** @name IEM_ACCESS_XXX - Access details.
1881 * @{ */
1882#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
1883#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
1884#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
1885#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
1886#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
1887#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
1888#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
1889#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
1890#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
1891#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
1892/** The writes are partial, so if initialize the bounce buffer with the
1893 * orignal RAM content. */
1894#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
1895/** Used in aMemMappings to indicate that the entry is bounce buffered. */
1896#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
1897/** Bounce buffer with ring-3 write pending, first page. */
1898#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
1899/** Bounce buffer with ring-3 write pending, second page. */
1900#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
1901/** Not locked, accessed via the TLB. */
1902#define IEM_ACCESS_NOT_LOCKED UINT32_C(0x00001000)
1903/** Valid bit mask. */
1904#define IEM_ACCESS_VALID_MASK UINT32_C(0x00001fff)
1905/** Shift count for the TLB flags (upper word). */
1906#define IEM_ACCESS_SHIFT_TLB_FLAGS 16
1907
1908/** Read+write data alias. */
1909#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
1910/** Write data alias. */
1911#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
1912/** Read data alias. */
1913#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
1914/** Instruction fetch alias. */
1915#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
1916/** Stack write alias. */
1917#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
1918/** Stack read alias. */
1919#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
1920/** Stack read+write alias. */
1921#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
1922/** Read system table alias. */
1923#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
1924/** Read+write system table alias. */
1925#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
1926/** @} */
1927
1928/** @name Prefix constants (IEMCPU::fPrefixes)
1929 * @{ */
1930#define IEM_OP_PRF_SEG_CS RT_BIT_32(0) /**< CS segment prefix (0x2e). */
1931#define IEM_OP_PRF_SEG_SS RT_BIT_32(1) /**< SS segment prefix (0x36). */
1932#define IEM_OP_PRF_SEG_DS RT_BIT_32(2) /**< DS segment prefix (0x3e). */
1933#define IEM_OP_PRF_SEG_ES RT_BIT_32(3) /**< ES segment prefix (0x26). */
1934#define IEM_OP_PRF_SEG_FS RT_BIT_32(4) /**< FS segment prefix (0x64). */
1935#define IEM_OP_PRF_SEG_GS RT_BIT_32(5) /**< GS segment prefix (0x65). */
1936#define IEM_OP_PRF_SEG_MASK UINT32_C(0x3f)
1937
1938#define IEM_OP_PRF_SIZE_OP RT_BIT_32(8) /**< Operand size prefix (0x66). */
1939#define IEM_OP_PRF_SIZE_REX_W RT_BIT_32(9) /**< REX.W prefix (0x48-0x4f). */
1940#define IEM_OP_PRF_SIZE_ADDR RT_BIT_32(10) /**< Address size prefix (0x67). */
1941
1942#define IEM_OP_PRF_LOCK RT_BIT_32(16) /**< Lock prefix (0xf0). */
1943#define IEM_OP_PRF_REPNZ RT_BIT_32(17) /**< Repeat-not-zero prefix (0xf2). */
1944#define IEM_OP_PRF_REPZ RT_BIT_32(18) /**< Repeat-if-zero prefix (0xf3). */
1945
1946#define IEM_OP_PRF_REX RT_BIT_32(24) /**< Any REX prefix (0x40-0x4f). */
1947#define IEM_OP_PRF_REX_R RT_BIT_32(25) /**< REX.R prefix (0x44,0x45,0x46,0x47,0x4c,0x4d,0x4e,0x4f). */
1948#define IEM_OP_PRF_REX_B RT_BIT_32(26) /**< REX.B prefix (0x41,0x43,0x45,0x47,0x49,0x4b,0x4d,0x4f). */
1949#define IEM_OP_PRF_REX_X RT_BIT_32(27) /**< REX.X prefix (0x42,0x43,0x46,0x47,0x4a,0x4b,0x4e,0x4f). */
1950/** Mask with all the REX prefix flags.
1951 * This is generally for use when needing to undo the REX prefixes when they
1952 * are followed legacy prefixes and therefore does not immediately preceed
1953 * the first opcode byte.
1954 * For testing whether any REX prefix is present, use IEM_OP_PRF_REX instead. */
1955#define IEM_OP_PRF_REX_MASK (IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W )
1956
1957#define IEM_OP_PRF_VEX RT_BIT_32(28) /**< Indiciates VEX prefix. */
1958#define IEM_OP_PRF_EVEX RT_BIT_32(29) /**< Indiciates EVEX prefix. */
1959#define IEM_OP_PRF_XOP RT_BIT_32(30) /**< Indiciates XOP prefix. */
1960/** @} */
1961
1962/** @name IEMOPFORM_XXX - Opcode forms
1963 * @note These are ORed together with IEMOPHINT_XXX.
1964 * @{ */
1965/** ModR/M: reg, r/m */
1966#define IEMOPFORM_RM 0
1967/** ModR/M: reg, r/m (register) */
1968#define IEMOPFORM_RM_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
1969/** ModR/M: reg, r/m (memory) */
1970#define IEMOPFORM_RM_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
1971/** ModR/M: reg, r/m */
1972#define IEMOPFORM_RMI 1
1973/** ModR/M: reg, r/m (register) */
1974#define IEMOPFORM_RMI_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
1975/** ModR/M: reg, r/m (memory) */
1976#define IEMOPFORM_RMI_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
1977/** ModR/M: r/m, reg */
1978#define IEMOPFORM_MR 2
1979/** ModR/M: r/m (register), reg */
1980#define IEMOPFORM_MR_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
1981/** ModR/M: r/m (memory), reg */
1982#define IEMOPFORM_MR_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
1983/** ModR/M: r/m, reg */
1984#define IEMOPFORM_MRI 3
1985/** ModR/M: r/m (register), reg */
1986#define IEMOPFORM_MRI_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
1987/** ModR/M: r/m (memory), reg */
1988#define IEMOPFORM_MRI_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
1989/** ModR/M: r/m only */
1990#define IEMOPFORM_M 4
1991/** ModR/M: r/m only (register). */
1992#define IEMOPFORM_M_REG (IEMOPFORM_M | IEMOPFORM_MOD3)
1993/** ModR/M: r/m only (memory). */
1994#define IEMOPFORM_M_MEM (IEMOPFORM_M | IEMOPFORM_NOT_MOD3)
1995/** ModR/M: reg only */
1996#define IEMOPFORM_R 5
1997
1998/** VEX+ModR/M: reg, r/m */
1999#define IEMOPFORM_VEX_RM 8
2000/** VEX+ModR/M: reg, r/m (register) */
2001#define IEMOPFORM_VEX_RM_REG (IEMOPFORM_VEX_RM | IEMOPFORM_MOD3)
2002/** VEX+ModR/M: reg, r/m (memory) */
2003#define IEMOPFORM_VEX_RM_MEM (IEMOPFORM_VEX_RM | IEMOPFORM_NOT_MOD3)
2004/** VEX+ModR/M: r/m, reg */
2005#define IEMOPFORM_VEX_MR 9
2006/** VEX+ModR/M: r/m (register), reg */
2007#define IEMOPFORM_VEX_MR_REG (IEMOPFORM_VEX_MR | IEMOPFORM_MOD3)
2008/** VEX+ModR/M: r/m (memory), reg */
2009#define IEMOPFORM_VEX_MR_MEM (IEMOPFORM_VEX_MR | IEMOPFORM_NOT_MOD3)
2010/** VEX+ModR/M: r/m only */
2011#define IEMOPFORM_VEX_M 10
2012/** VEX+ModR/M: r/m only (register). */
2013#define IEMOPFORM_VEX_M_REG (IEMOPFORM_VEX_M | IEMOPFORM_MOD3)
2014/** VEX+ModR/M: r/m only (memory). */
2015#define IEMOPFORM_VEX_M_MEM (IEMOPFORM_VEX_M | IEMOPFORM_NOT_MOD3)
2016/** VEX+ModR/M: reg only */
2017#define IEMOPFORM_VEX_R 11
2018/** VEX+ModR/M: reg, vvvv, r/m */
2019#define IEMOPFORM_VEX_RVM 12
2020/** VEX+ModR/M: reg, vvvv, r/m (register). */
2021#define IEMOPFORM_VEX_RVM_REG (IEMOPFORM_VEX_RVM | IEMOPFORM_MOD3)
2022/** VEX+ModR/M: reg, vvvv, r/m (memory). */
2023#define IEMOPFORM_VEX_RVM_MEM (IEMOPFORM_VEX_RVM | IEMOPFORM_NOT_MOD3)
2024/** VEX+ModR/M: reg, r/m, vvvv */
2025#define IEMOPFORM_VEX_RMV 13
2026/** VEX+ModR/M: reg, r/m, vvvv (register). */
2027#define IEMOPFORM_VEX_RMV_REG (IEMOPFORM_VEX_RMV | IEMOPFORM_MOD3)
2028/** VEX+ModR/M: reg, r/m, vvvv (memory). */
2029#define IEMOPFORM_VEX_RMV_MEM (IEMOPFORM_VEX_RMV | IEMOPFORM_NOT_MOD3)
2030/** VEX+ModR/M: reg, r/m, imm8 */
2031#define IEMOPFORM_VEX_RMI 14
2032/** VEX+ModR/M: reg, r/m, imm8 (register). */
2033#define IEMOPFORM_VEX_RMI_REG (IEMOPFORM_VEX_RMI | IEMOPFORM_MOD3)
2034/** VEX+ModR/M: reg, r/m, imm8 (memory). */
2035#define IEMOPFORM_VEX_RMI_MEM (IEMOPFORM_VEX_RMI | IEMOPFORM_NOT_MOD3)
2036/** VEX+ModR/M: r/m, vvvv, reg */
2037#define IEMOPFORM_VEX_MVR 15
2038/** VEX+ModR/M: r/m, vvvv, reg (register) */
2039#define IEMOPFORM_VEX_MVR_REG (IEMOPFORM_VEX_MVR | IEMOPFORM_MOD3)
2040/** VEX+ModR/M: r/m, vvvv, reg (memory) */
2041#define IEMOPFORM_VEX_MVR_MEM (IEMOPFORM_VEX_MVR | IEMOPFORM_NOT_MOD3)
2042/** VEX+ModR/M+/n: vvvv, r/m */
2043#define IEMOPFORM_VEX_VM 16
2044/** VEX+ModR/M+/n: vvvv, r/m (register) */
2045#define IEMOPFORM_VEX_VM_REG (IEMOPFORM_VEX_VM | IEMOPFORM_MOD3)
2046/** VEX+ModR/M+/n: vvvv, r/m (memory) */
2047#define IEMOPFORM_VEX_VM_MEM (IEMOPFORM_VEX_VM | IEMOPFORM_NOT_MOD3)
2048
2049/** Fixed register instruction, no R/M. */
2050#define IEMOPFORM_FIXED 32
2051
2052/** The r/m is a register. */
2053#define IEMOPFORM_MOD3 RT_BIT_32(8)
2054/** The r/m is a memory access. */
2055#define IEMOPFORM_NOT_MOD3 RT_BIT_32(9)
2056/** @} */
2057
2058/** @name IEMOPHINT_XXX - Additional Opcode Hints
2059 * @note These are ORed together with IEMOPFORM_XXX.
2060 * @{ */
2061/** Ignores the operand size prefix (66h). */
2062#define IEMOPHINT_IGNORES_OZ_PFX RT_BIT_32(10)
2063/** Ignores REX.W (aka WIG). */
2064#define IEMOPHINT_IGNORES_REXW RT_BIT_32(11)
2065/** Both the operand size prefixes (66h + REX.W) are ignored. */
2066#define IEMOPHINT_IGNORES_OP_SIZES (IEMOPHINT_IGNORES_OZ_PFX | IEMOPHINT_IGNORES_REXW)
2067/** Allowed with the lock prefix. */
2068#define IEMOPHINT_LOCK_ALLOWED RT_BIT_32(11)
2069/** The VEX.L value is ignored (aka LIG). */
2070#define IEMOPHINT_VEX_L_IGNORED RT_BIT_32(12)
2071/** The VEX.L value must be zero (i.e. 128-bit width only). */
2072#define IEMOPHINT_VEX_L_ZERO RT_BIT_32(13)
2073/** The VEX.V value must be zero. */
2074#define IEMOPHINT_VEX_V_ZERO RT_BIT_32(14)
2075
2076/** Hint to IEMAllInstructionPython.py that this macro should be skipped. */
2077#define IEMOPHINT_SKIP_PYTHON RT_BIT_32(31)
2078/** @} */
2079
2080/**
2081 * Possible hardware task switch sources.
2082 */
2083typedef enum IEMTASKSWITCH
2084{
2085 /** Task switch caused by an interrupt/exception. */
2086 IEMTASKSWITCH_INT_XCPT = 1,
2087 /** Task switch caused by a far CALL. */
2088 IEMTASKSWITCH_CALL,
2089 /** Task switch caused by a far JMP. */
2090 IEMTASKSWITCH_JUMP,
2091 /** Task switch caused by an IRET. */
2092 IEMTASKSWITCH_IRET
2093} IEMTASKSWITCH;
2094AssertCompileSize(IEMTASKSWITCH, 4);
2095
2096/**
2097 * Possible CrX load (write) sources.
2098 */
2099typedef enum IEMACCESSCRX
2100{
2101 /** CrX access caused by 'mov crX' instruction. */
2102 IEMACCESSCRX_MOV_CRX,
2103 /** CrX (CR0) write caused by 'lmsw' instruction. */
2104 IEMACCESSCRX_LMSW,
2105 /** CrX (CR0) write caused by 'clts' instruction. */
2106 IEMACCESSCRX_CLTS,
2107 /** CrX (CR0) read caused by 'smsw' instruction. */
2108 IEMACCESSCRX_SMSW
2109} IEMACCESSCRX;
2110
2111#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2112/** @name IEM_SLAT_FAIL_XXX - Second-level address translation failure information.
2113 *
2114 * These flags provide further context to SLAT page-walk failures that could not be
2115 * determined by PGM (e.g, PGM is not privy to memory access permissions).
2116 *
2117 * @{
2118 */
2119/** Translating a nested-guest linear address failed accessing a nested-guest
2120 * physical address. */
2121# define IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR RT_BIT_32(0)
2122/** Translating a nested-guest linear address failed accessing a
2123 * paging-structure entry or updating accessed/dirty bits. */
2124# define IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE RT_BIT_32(1)
2125/** @} */
2126
2127DECLCALLBACK(FNPGMPHYSHANDLER) iemVmxApicAccessPageHandler;
2128# ifndef IN_RING3
2129DECLCALLBACK(FNPGMRZPHYSPFHANDLER) iemVmxApicAccessPagePfHandler;
2130# endif
2131#endif
2132
2133/**
2134 * Indicates to the verifier that the given flag set is undefined.
2135 *
2136 * Can be invoked again to add more flags.
2137 *
2138 * This is a NOOP if the verifier isn't compiled in.
2139 *
2140 * @note We're temporarily keeping this until code is converted to new
2141 * disassembler style opcode handling.
2142 */
2143#define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0)
2144
2145
2146/** @def IEM_DECL_IMPL_TYPE
2147 * For typedef'ing an instruction implementation function.
2148 *
2149 * @param a_RetType The return type.
2150 * @param a_Name The name of the type.
2151 * @param a_ArgList The argument list enclosed in parentheses.
2152 */
2153
2154/** @def IEM_DECL_IMPL_DEF
2155 * For defining an instruction implementation function.
2156 *
2157 * @param a_RetType The return type.
2158 * @param a_Name The name of the type.
2159 * @param a_ArgList The argument list enclosed in parentheses.
2160 */
2161
2162#if defined(__GNUC__) && defined(RT_ARCH_X86)
2163# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2164 __attribute__((__fastcall__)) a_RetType (a_Name) a_ArgList
2165# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2166 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2167# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2168 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2169
2170#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
2171# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2172 a_RetType (__fastcall a_Name) a_ArgList
2173# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2174 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2175# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2176 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2177
2178#elif __cplusplus >= 201700 /* P0012R1 support */
2179# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2180 a_RetType (VBOXCALL a_Name) a_ArgList RT_NOEXCEPT
2181# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2182 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2183# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2184 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2185
2186#else
2187# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2188 a_RetType (VBOXCALL a_Name) a_ArgList
2189# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2190 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2191# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2192 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2193
2194#endif
2195
2196/** Defined in IEMAllAImplC.cpp but also used by IEMAllAImplA.asm. */
2197RT_C_DECLS_BEGIN
2198extern uint8_t const g_afParity[256];
2199RT_C_DECLS_END
2200
2201
2202/** @name Arithmetic assignment operations on bytes (binary).
2203 * @{ */
2204typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU8, (uint8_t *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
2205typedef FNIEMAIMPLBINU8 *PFNIEMAIMPLBINU8;
2206FNIEMAIMPLBINU8 iemAImpl_add_u8, iemAImpl_add_u8_locked;
2207FNIEMAIMPLBINU8 iemAImpl_adc_u8, iemAImpl_adc_u8_locked;
2208FNIEMAIMPLBINU8 iemAImpl_sub_u8, iemAImpl_sub_u8_locked;
2209FNIEMAIMPLBINU8 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked;
2210FNIEMAIMPLBINU8 iemAImpl_or_u8, iemAImpl_or_u8_locked;
2211FNIEMAIMPLBINU8 iemAImpl_xor_u8, iemAImpl_xor_u8_locked;
2212FNIEMAIMPLBINU8 iemAImpl_and_u8, iemAImpl_and_u8_locked;
2213/** @} */
2214
2215/** @name Arithmetic assignment operations on words (binary).
2216 * @{ */
2217typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU16, (uint16_t *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
2218typedef FNIEMAIMPLBINU16 *PFNIEMAIMPLBINU16;
2219FNIEMAIMPLBINU16 iemAImpl_add_u16, iemAImpl_add_u16_locked;
2220FNIEMAIMPLBINU16 iemAImpl_adc_u16, iemAImpl_adc_u16_locked;
2221FNIEMAIMPLBINU16 iemAImpl_sub_u16, iemAImpl_sub_u16_locked;
2222FNIEMAIMPLBINU16 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked;
2223FNIEMAIMPLBINU16 iemAImpl_or_u16, iemAImpl_or_u16_locked;
2224FNIEMAIMPLBINU16 iemAImpl_xor_u16, iemAImpl_xor_u16_locked;
2225FNIEMAIMPLBINU16 iemAImpl_and_u16, iemAImpl_and_u16_locked;
2226/** @} */
2227
2228/** @name Arithmetic assignment operations on double words (binary).
2229 * @{ */
2230typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU32, (uint32_t *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
2231typedef FNIEMAIMPLBINU32 *PFNIEMAIMPLBINU32;
2232FNIEMAIMPLBINU32 iemAImpl_add_u32, iemAImpl_add_u32_locked;
2233FNIEMAIMPLBINU32 iemAImpl_adc_u32, iemAImpl_adc_u32_locked;
2234FNIEMAIMPLBINU32 iemAImpl_sub_u32, iemAImpl_sub_u32_locked;
2235FNIEMAIMPLBINU32 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked;
2236FNIEMAIMPLBINU32 iemAImpl_or_u32, iemAImpl_or_u32_locked;
2237FNIEMAIMPLBINU32 iemAImpl_xor_u32, iemAImpl_xor_u32_locked;
2238FNIEMAIMPLBINU32 iemAImpl_and_u32, iemAImpl_and_u32_locked;
2239FNIEMAIMPLBINU32 iemAImpl_blsi_u32, iemAImpl_blsi_u32_fallback;
2240FNIEMAIMPLBINU32 iemAImpl_blsr_u32, iemAImpl_blsr_u32_fallback;
2241FNIEMAIMPLBINU32 iemAImpl_blsmsk_u32, iemAImpl_blsmsk_u32_fallback;
2242/** @} */
2243
2244/** @name Arithmetic assignment operations on quad words (binary).
2245 * @{ */
2246typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU64, (uint64_t *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
2247typedef FNIEMAIMPLBINU64 *PFNIEMAIMPLBINU64;
2248FNIEMAIMPLBINU64 iemAImpl_add_u64, iemAImpl_add_u64_locked;
2249FNIEMAIMPLBINU64 iemAImpl_adc_u64, iemAImpl_adc_u64_locked;
2250FNIEMAIMPLBINU64 iemAImpl_sub_u64, iemAImpl_sub_u64_locked;
2251FNIEMAIMPLBINU64 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked;
2252FNIEMAIMPLBINU64 iemAImpl_or_u64, iemAImpl_or_u64_locked;
2253FNIEMAIMPLBINU64 iemAImpl_xor_u64, iemAImpl_xor_u64_locked;
2254FNIEMAIMPLBINU64 iemAImpl_and_u64, iemAImpl_and_u64_locked;
2255FNIEMAIMPLBINU64 iemAImpl_blsi_u64, iemAImpl_blsi_u64_fallback;
2256FNIEMAIMPLBINU64 iemAImpl_blsr_u64, iemAImpl_blsr_u64_fallback;
2257FNIEMAIMPLBINU64 iemAImpl_blsmsk_u64, iemAImpl_blsmsk_u64_fallback;
2258/** @} */
2259
2260typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU8,(uint8_t const *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
2261typedef FNIEMAIMPLBINROU8 *PFNIEMAIMPLBINROU8;
2262typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU16,(uint16_t const *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
2263typedef FNIEMAIMPLBINROU16 *PFNIEMAIMPLBINROU16;
2264typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU32,(uint32_t const *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
2265typedef FNIEMAIMPLBINROU32 *PFNIEMAIMPLBINROU32;
2266typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU64,(uint64_t const *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
2267typedef FNIEMAIMPLBINROU64 *PFNIEMAIMPLBINROU64;
2268
2269/** @name Compare operations (thrown in with the binary ops).
2270 * @{ */
2271FNIEMAIMPLBINROU8 iemAImpl_cmp_u8;
2272FNIEMAIMPLBINROU16 iemAImpl_cmp_u16;
2273FNIEMAIMPLBINROU32 iemAImpl_cmp_u32;
2274FNIEMAIMPLBINROU64 iemAImpl_cmp_u64;
2275/** @} */
2276
2277/** @name Test operations (thrown in with the binary ops).
2278 * @{ */
2279FNIEMAIMPLBINROU8 iemAImpl_test_u8;
2280FNIEMAIMPLBINROU16 iemAImpl_test_u16;
2281FNIEMAIMPLBINROU32 iemAImpl_test_u32;
2282FNIEMAIMPLBINROU64 iemAImpl_test_u64;
2283/** @} */
2284
2285/** @name Bit operations operations (thrown in with the binary ops).
2286 * @{ */
2287FNIEMAIMPLBINROU16 iemAImpl_bt_u16;
2288FNIEMAIMPLBINROU32 iemAImpl_bt_u32;
2289FNIEMAIMPLBINROU64 iemAImpl_bt_u64;
2290FNIEMAIMPLBINU16 iemAImpl_btc_u16, iemAImpl_btc_u16_locked;
2291FNIEMAIMPLBINU32 iemAImpl_btc_u32, iemAImpl_btc_u32_locked;
2292FNIEMAIMPLBINU64 iemAImpl_btc_u64, iemAImpl_btc_u64_locked;
2293FNIEMAIMPLBINU16 iemAImpl_btr_u16, iemAImpl_btr_u16_locked;
2294FNIEMAIMPLBINU32 iemAImpl_btr_u32, iemAImpl_btr_u32_locked;
2295FNIEMAIMPLBINU64 iemAImpl_btr_u64, iemAImpl_btr_u64_locked;
2296FNIEMAIMPLBINU16 iemAImpl_bts_u16, iemAImpl_bts_u16_locked;
2297FNIEMAIMPLBINU32 iemAImpl_bts_u32, iemAImpl_bts_u32_locked;
2298FNIEMAIMPLBINU64 iemAImpl_bts_u64, iemAImpl_bts_u64_locked;
2299/** @} */
2300
2301/** @name Arithmetic three operand operations on double words (binary).
2302 * @{ */
2303typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2, uint32_t *pEFlags));
2304typedef FNIEMAIMPLBINVEXU32 *PFNIEMAIMPLBINVEXU32;
2305FNIEMAIMPLBINVEXU32 iemAImpl_andn_u32, iemAImpl_andn_u32_fallback;
2306FNIEMAIMPLBINVEXU32 iemAImpl_bextr_u32, iemAImpl_bextr_u32_fallback;
2307FNIEMAIMPLBINVEXU32 iemAImpl_bzhi_u32, iemAImpl_bzhi_u32_fallback;
2308/** @} */
2309
2310/** @name Arithmetic three operand operations on quad words (binary).
2311 * @{ */
2312typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2, uint32_t *pEFlags));
2313typedef FNIEMAIMPLBINVEXU64 *PFNIEMAIMPLBINVEXU64;
2314FNIEMAIMPLBINVEXU64 iemAImpl_andn_u64, iemAImpl_andn_u64_fallback;
2315FNIEMAIMPLBINVEXU64 iemAImpl_bextr_u64, iemAImpl_bextr_u64_fallback;
2316FNIEMAIMPLBINVEXU64 iemAImpl_bzhi_u64, iemAImpl_bzhi_u64_fallback;
2317/** @} */
2318
2319/** @name Arithmetic three operand operations on double words w/o EFLAGS (binary).
2320 * @{ */
2321typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32NOEFL, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2));
2322typedef FNIEMAIMPLBINVEXU32NOEFL *PFNIEMAIMPLBINVEXU32NOEFL;
2323FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pdep_u32, iemAImpl_pdep_u32_fallback;
2324FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pext_u32, iemAImpl_pext_u32_fallback;
2325FNIEMAIMPLBINVEXU32NOEFL iemAImpl_sarx_u32, iemAImpl_sarx_u32_fallback;
2326FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shlx_u32, iemAImpl_shlx_u32_fallback;
2327FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shrx_u32, iemAImpl_shrx_u32_fallback;
2328FNIEMAIMPLBINVEXU32NOEFL iemAImpl_rorx_u32;
2329/** @} */
2330
2331/** @name Arithmetic three operand operations on quad words w/o EFLAGS (binary).
2332 * @{ */
2333typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64NOEFL, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2));
2334typedef FNIEMAIMPLBINVEXU64NOEFL *PFNIEMAIMPLBINVEXU64NOEFL;
2335FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pdep_u64, iemAImpl_pdep_u64_fallback;
2336FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pext_u64, iemAImpl_pext_u64_fallback;
2337FNIEMAIMPLBINVEXU64NOEFL iemAImpl_sarx_u64, iemAImpl_sarx_u64_fallback;
2338FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shlx_u64, iemAImpl_shlx_u64_fallback;
2339FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shrx_u64, iemAImpl_shrx_u64_fallback;
2340FNIEMAIMPLBINVEXU64NOEFL iemAImpl_rorx_u64;
2341/** @} */
2342
2343/** @name MULX 32-bit and 64-bit.
2344 * @{ */
2345typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU32, (uint32_t *puDst1, uint32_t *puDst2, uint32_t uSrc1, uint32_t uSrc2));
2346typedef FNIEMAIMPLMULXVEXU32 *PFNIEMAIMPLMULXVEXU32;
2347FNIEMAIMPLMULXVEXU32 iemAImpl_mulx_u32, iemAImpl_mulx_u32_fallback;
2348
2349typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU64, (uint64_t *puDst1, uint64_t *puDst2, uint64_t uSrc1, uint64_t uSrc2));
2350typedef FNIEMAIMPLMULXVEXU64 *PFNIEMAIMPLMULXVEXU64;
2351FNIEMAIMPLMULXVEXU64 iemAImpl_mulx_u64, iemAImpl_mulx_u64_fallback;
2352/** @} */
2353
2354
2355/** @name Exchange memory with register operations.
2356 * @{ */
2357IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_locked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2358IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_locked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2359IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_locked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2360IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_locked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2361IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_unlocked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2362IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_unlocked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2363IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_unlocked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2364IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_unlocked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2365/** @} */
2366
2367/** @name Exchange and add operations.
2368 * @{ */
2369IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2370IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2371IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2372IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2373IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8_locked, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2374IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16_locked,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2375IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32_locked,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2376IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64_locked,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2377/** @} */
2378
2379/** @name Compare and exchange.
2380 * @{ */
2381IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2382IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8_locked, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2383IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16, (uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2384IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16_locked,(uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2385IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32, (uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2386IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32_locked,(uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2387#if ARCH_BITS == 32
2388IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2389IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2390#else
2391IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2392IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2393#endif
2394IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2395 uint32_t *pEFlags));
2396IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b_locked,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2397 uint32_t *pEFlags));
2398IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2399 uint32_t *pEFlags));
2400IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_locked,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2401 uint32_t *pEFlags));
2402#ifndef RT_ARCH_ARM64
2403IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_fallback,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx,
2404 PRTUINT128U pu128RbxRcx, uint32_t *pEFlags));
2405#endif
2406/** @} */
2407
2408/** @name Memory ordering
2409 * @{ */
2410typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEMFENCE,(void));
2411typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE;
2412IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void));
2413IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void));
2414IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void));
2415#ifndef RT_ARCH_ARM64
2416IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void));
2417#endif
2418/** @} */
2419
2420/** @name Double precision shifts
2421 * @{ */
2422typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags));
2423typedef FNIEMAIMPLSHIFTDBLU16 *PFNIEMAIMPLSHIFTDBLU16;
2424typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU32,(uint32_t *pu32Dst, uint32_t u32Src, uint8_t cShift, uint32_t *pEFlags));
2425typedef FNIEMAIMPLSHIFTDBLU32 *PFNIEMAIMPLSHIFTDBLU32;
2426typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t cShift, uint32_t *pEFlags));
2427typedef FNIEMAIMPLSHIFTDBLU64 *PFNIEMAIMPLSHIFTDBLU64;
2428FNIEMAIMPLSHIFTDBLU16 iemAImpl_shld_u16, iemAImpl_shld_u16_amd, iemAImpl_shld_u16_intel;
2429FNIEMAIMPLSHIFTDBLU32 iemAImpl_shld_u32, iemAImpl_shld_u32_amd, iemAImpl_shld_u32_intel;
2430FNIEMAIMPLSHIFTDBLU64 iemAImpl_shld_u64, iemAImpl_shld_u64_amd, iemAImpl_shld_u64_intel;
2431FNIEMAIMPLSHIFTDBLU16 iemAImpl_shrd_u16, iemAImpl_shrd_u16_amd, iemAImpl_shrd_u16_intel;
2432FNIEMAIMPLSHIFTDBLU32 iemAImpl_shrd_u32, iemAImpl_shrd_u32_amd, iemAImpl_shrd_u32_intel;
2433FNIEMAIMPLSHIFTDBLU64 iemAImpl_shrd_u64, iemAImpl_shrd_u64_amd, iemAImpl_shrd_u64_intel;
2434/** @} */
2435
2436
2437/** @name Bit search operations (thrown in with the binary ops).
2438 * @{ */
2439FNIEMAIMPLBINU16 iemAImpl_bsf_u16, iemAImpl_bsf_u16_amd, iemAImpl_bsf_u16_intel;
2440FNIEMAIMPLBINU32 iemAImpl_bsf_u32, iemAImpl_bsf_u32_amd, iemAImpl_bsf_u32_intel;
2441FNIEMAIMPLBINU64 iemAImpl_bsf_u64, iemAImpl_bsf_u64_amd, iemAImpl_bsf_u64_intel;
2442FNIEMAIMPLBINU16 iemAImpl_bsr_u16, iemAImpl_bsr_u16_amd, iemAImpl_bsr_u16_intel;
2443FNIEMAIMPLBINU32 iemAImpl_bsr_u32, iemAImpl_bsr_u32_amd, iemAImpl_bsr_u32_intel;
2444FNIEMAIMPLBINU64 iemAImpl_bsr_u64, iemAImpl_bsr_u64_amd, iemAImpl_bsr_u64_intel;
2445FNIEMAIMPLBINU16 iemAImpl_lzcnt_u16, iemAImpl_lzcnt_u16_amd, iemAImpl_lzcnt_u16_intel;
2446FNIEMAIMPLBINU32 iemAImpl_lzcnt_u32, iemAImpl_lzcnt_u32_amd, iemAImpl_lzcnt_u32_intel;
2447FNIEMAIMPLBINU64 iemAImpl_lzcnt_u64, iemAImpl_lzcnt_u64_amd, iemAImpl_lzcnt_u64_intel;
2448FNIEMAIMPLBINU16 iemAImpl_tzcnt_u16, iemAImpl_tzcnt_u16_amd, iemAImpl_tzcnt_u16_intel;
2449FNIEMAIMPLBINU32 iemAImpl_tzcnt_u32, iemAImpl_tzcnt_u32_amd, iemAImpl_tzcnt_u32_intel;
2450FNIEMAIMPLBINU64 iemAImpl_tzcnt_u64, iemAImpl_tzcnt_u64_amd, iemAImpl_tzcnt_u64_intel;
2451FNIEMAIMPLBINU16 iemAImpl_popcnt_u16, iemAImpl_popcnt_u16_fallback;
2452FNIEMAIMPLBINU32 iemAImpl_popcnt_u32, iemAImpl_popcnt_u32_fallback;
2453FNIEMAIMPLBINU64 iemAImpl_popcnt_u64, iemAImpl_popcnt_u64_fallback;
2454/** @} */
2455
2456/** @name Signed multiplication operations (thrown in with the binary ops).
2457 * @{ */
2458FNIEMAIMPLBINU16 iemAImpl_imul_two_u16, iemAImpl_imul_two_u16_amd, iemAImpl_imul_two_u16_intel;
2459FNIEMAIMPLBINU32 iemAImpl_imul_two_u32, iemAImpl_imul_two_u32_amd, iemAImpl_imul_two_u32_intel;
2460FNIEMAIMPLBINU64 iemAImpl_imul_two_u64, iemAImpl_imul_two_u64_amd, iemAImpl_imul_two_u64_intel;
2461/** @} */
2462
2463/** @name Arithmetic assignment operations on bytes (unary).
2464 * @{ */
2465typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU8, (uint8_t *pu8Dst, uint32_t *pEFlags));
2466typedef FNIEMAIMPLUNARYU8 *PFNIEMAIMPLUNARYU8;
2467FNIEMAIMPLUNARYU8 iemAImpl_inc_u8, iemAImpl_inc_u8_locked;
2468FNIEMAIMPLUNARYU8 iemAImpl_dec_u8, iemAImpl_dec_u8_locked;
2469FNIEMAIMPLUNARYU8 iemAImpl_not_u8, iemAImpl_not_u8_locked;
2470FNIEMAIMPLUNARYU8 iemAImpl_neg_u8, iemAImpl_neg_u8_locked;
2471/** @} */
2472
2473/** @name Arithmetic assignment operations on words (unary).
2474 * @{ */
2475typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU16, (uint16_t *pu16Dst, uint32_t *pEFlags));
2476typedef FNIEMAIMPLUNARYU16 *PFNIEMAIMPLUNARYU16;
2477FNIEMAIMPLUNARYU16 iemAImpl_inc_u16, iemAImpl_inc_u16_locked;
2478FNIEMAIMPLUNARYU16 iemAImpl_dec_u16, iemAImpl_dec_u16_locked;
2479FNIEMAIMPLUNARYU16 iemAImpl_not_u16, iemAImpl_not_u16_locked;
2480FNIEMAIMPLUNARYU16 iemAImpl_neg_u16, iemAImpl_neg_u16_locked;
2481/** @} */
2482
2483/** @name Arithmetic assignment operations on double words (unary).
2484 * @{ */
2485typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU32, (uint32_t *pu32Dst, uint32_t *pEFlags));
2486typedef FNIEMAIMPLUNARYU32 *PFNIEMAIMPLUNARYU32;
2487FNIEMAIMPLUNARYU32 iemAImpl_inc_u32, iemAImpl_inc_u32_locked;
2488FNIEMAIMPLUNARYU32 iemAImpl_dec_u32, iemAImpl_dec_u32_locked;
2489FNIEMAIMPLUNARYU32 iemAImpl_not_u32, iemAImpl_not_u32_locked;
2490FNIEMAIMPLUNARYU32 iemAImpl_neg_u32, iemAImpl_neg_u32_locked;
2491/** @} */
2492
2493/** @name Arithmetic assignment operations on quad words (unary).
2494 * @{ */
2495typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU64, (uint64_t *pu64Dst, uint32_t *pEFlags));
2496typedef FNIEMAIMPLUNARYU64 *PFNIEMAIMPLUNARYU64;
2497FNIEMAIMPLUNARYU64 iemAImpl_inc_u64, iemAImpl_inc_u64_locked;
2498FNIEMAIMPLUNARYU64 iemAImpl_dec_u64, iemAImpl_dec_u64_locked;
2499FNIEMAIMPLUNARYU64 iemAImpl_not_u64, iemAImpl_not_u64_locked;
2500FNIEMAIMPLUNARYU64 iemAImpl_neg_u64, iemAImpl_neg_u64_locked;
2501/** @} */
2502
2503
2504/** @name Shift operations on bytes (Group 2).
2505 * @{ */
2506typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU8,(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags));
2507typedef FNIEMAIMPLSHIFTU8 *PFNIEMAIMPLSHIFTU8;
2508FNIEMAIMPLSHIFTU8 iemAImpl_rol_u8, iemAImpl_rol_u8_amd, iemAImpl_rol_u8_intel;
2509FNIEMAIMPLSHIFTU8 iemAImpl_ror_u8, iemAImpl_ror_u8_amd, iemAImpl_ror_u8_intel;
2510FNIEMAIMPLSHIFTU8 iemAImpl_rcl_u8, iemAImpl_rcl_u8_amd, iemAImpl_rcl_u8_intel;
2511FNIEMAIMPLSHIFTU8 iemAImpl_rcr_u8, iemAImpl_rcr_u8_amd, iemAImpl_rcr_u8_intel;
2512FNIEMAIMPLSHIFTU8 iemAImpl_shl_u8, iemAImpl_shl_u8_amd, iemAImpl_shl_u8_intel;
2513FNIEMAIMPLSHIFTU8 iemAImpl_shr_u8, iemAImpl_shr_u8_amd, iemAImpl_shr_u8_intel;
2514FNIEMAIMPLSHIFTU8 iemAImpl_sar_u8, iemAImpl_sar_u8_amd, iemAImpl_sar_u8_intel;
2515/** @} */
2516
2517/** @name Shift operations on words (Group 2).
2518 * @{ */
2519typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU16,(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags));
2520typedef FNIEMAIMPLSHIFTU16 *PFNIEMAIMPLSHIFTU16;
2521FNIEMAIMPLSHIFTU16 iemAImpl_rol_u16, iemAImpl_rol_u16_amd, iemAImpl_rol_u16_intel;
2522FNIEMAIMPLSHIFTU16 iemAImpl_ror_u16, iemAImpl_ror_u16_amd, iemAImpl_ror_u16_intel;
2523FNIEMAIMPLSHIFTU16 iemAImpl_rcl_u16, iemAImpl_rcl_u16_amd, iemAImpl_rcl_u16_intel;
2524FNIEMAIMPLSHIFTU16 iemAImpl_rcr_u16, iemAImpl_rcr_u16_amd, iemAImpl_rcr_u16_intel;
2525FNIEMAIMPLSHIFTU16 iemAImpl_shl_u16, iemAImpl_shl_u16_amd, iemAImpl_shl_u16_intel;
2526FNIEMAIMPLSHIFTU16 iemAImpl_shr_u16, iemAImpl_shr_u16_amd, iemAImpl_shr_u16_intel;
2527FNIEMAIMPLSHIFTU16 iemAImpl_sar_u16, iemAImpl_sar_u16_amd, iemAImpl_sar_u16_intel;
2528/** @} */
2529
2530/** @name Shift operations on double words (Group 2).
2531 * @{ */
2532typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU32,(uint32_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags));
2533typedef FNIEMAIMPLSHIFTU32 *PFNIEMAIMPLSHIFTU32;
2534FNIEMAIMPLSHIFTU32 iemAImpl_rol_u32, iemAImpl_rol_u32_amd, iemAImpl_rol_u32_intel;
2535FNIEMAIMPLSHIFTU32 iemAImpl_ror_u32, iemAImpl_ror_u32_amd, iemAImpl_ror_u32_intel;
2536FNIEMAIMPLSHIFTU32 iemAImpl_rcl_u32, iemAImpl_rcl_u32_amd, iemAImpl_rcl_u32_intel;
2537FNIEMAIMPLSHIFTU32 iemAImpl_rcr_u32, iemAImpl_rcr_u32_amd, iemAImpl_rcr_u32_intel;
2538FNIEMAIMPLSHIFTU32 iemAImpl_shl_u32, iemAImpl_shl_u32_amd, iemAImpl_shl_u32_intel;
2539FNIEMAIMPLSHIFTU32 iemAImpl_shr_u32, iemAImpl_shr_u32_amd, iemAImpl_shr_u32_intel;
2540FNIEMAIMPLSHIFTU32 iemAImpl_sar_u32, iemAImpl_sar_u32_amd, iemAImpl_sar_u32_intel;
2541/** @} */
2542
2543/** @name Shift operations on words (Group 2).
2544 * @{ */
2545typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU64,(uint64_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags));
2546typedef FNIEMAIMPLSHIFTU64 *PFNIEMAIMPLSHIFTU64;
2547FNIEMAIMPLSHIFTU64 iemAImpl_rol_u64, iemAImpl_rol_u64_amd, iemAImpl_rol_u64_intel;
2548FNIEMAIMPLSHIFTU64 iemAImpl_ror_u64, iemAImpl_ror_u64_amd, iemAImpl_ror_u64_intel;
2549FNIEMAIMPLSHIFTU64 iemAImpl_rcl_u64, iemAImpl_rcl_u64_amd, iemAImpl_rcl_u64_intel;
2550FNIEMAIMPLSHIFTU64 iemAImpl_rcr_u64, iemAImpl_rcr_u64_amd, iemAImpl_rcr_u64_intel;
2551FNIEMAIMPLSHIFTU64 iemAImpl_shl_u64, iemAImpl_shl_u64_amd, iemAImpl_shl_u64_intel;
2552FNIEMAIMPLSHIFTU64 iemAImpl_shr_u64, iemAImpl_shr_u64_amd, iemAImpl_shr_u64_intel;
2553FNIEMAIMPLSHIFTU64 iemAImpl_sar_u64, iemAImpl_sar_u64_amd, iemAImpl_sar_u64_intel;
2554/** @} */
2555
2556/** @name Multiplication and division operations.
2557 * @{ */
2558typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t *pEFlags));
2559typedef FNIEMAIMPLMULDIVU8 *PFNIEMAIMPLMULDIVU8;
2560FNIEMAIMPLMULDIVU8 iemAImpl_mul_u8, iemAImpl_mul_u8_amd, iemAImpl_mul_u8_intel;
2561FNIEMAIMPLMULDIVU8 iemAImpl_imul_u8, iemAImpl_imul_u8_amd, iemAImpl_imul_u8_intel;
2562FNIEMAIMPLMULDIVU8 iemAImpl_div_u8, iemAImpl_div_u8_amd, iemAImpl_div_u8_intel;
2563FNIEMAIMPLMULDIVU8 iemAImpl_idiv_u8, iemAImpl_idiv_u8_amd, iemAImpl_idiv_u8_intel;
2564
2565typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t *pEFlags));
2566typedef FNIEMAIMPLMULDIVU16 *PFNIEMAIMPLMULDIVU16;
2567FNIEMAIMPLMULDIVU16 iemAImpl_mul_u16, iemAImpl_mul_u16_amd, iemAImpl_mul_u16_intel;
2568FNIEMAIMPLMULDIVU16 iemAImpl_imul_u16, iemAImpl_imul_u16_amd, iemAImpl_imul_u16_intel;
2569FNIEMAIMPLMULDIVU16 iemAImpl_div_u16, iemAImpl_div_u16_amd, iemAImpl_div_u16_intel;
2570FNIEMAIMPLMULDIVU16 iemAImpl_idiv_u16, iemAImpl_idiv_u16_amd, iemAImpl_idiv_u16_intel;
2571
2572typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t *pEFlags));
2573typedef FNIEMAIMPLMULDIVU32 *PFNIEMAIMPLMULDIVU32;
2574FNIEMAIMPLMULDIVU32 iemAImpl_mul_u32, iemAImpl_mul_u32_amd, iemAImpl_mul_u32_intel;
2575FNIEMAIMPLMULDIVU32 iemAImpl_imul_u32, iemAImpl_imul_u32_amd, iemAImpl_imul_u32_intel;
2576FNIEMAIMPLMULDIVU32 iemAImpl_div_u32, iemAImpl_div_u32_amd, iemAImpl_div_u32_intel;
2577FNIEMAIMPLMULDIVU32 iemAImpl_idiv_u32, iemAImpl_idiv_u32_amd, iemAImpl_idiv_u32_intel;
2578
2579typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t *pEFlags));
2580typedef FNIEMAIMPLMULDIVU64 *PFNIEMAIMPLMULDIVU64;
2581FNIEMAIMPLMULDIVU64 iemAImpl_mul_u64, iemAImpl_mul_u64_amd, iemAImpl_mul_u64_intel;
2582FNIEMAIMPLMULDIVU64 iemAImpl_imul_u64, iemAImpl_imul_u64_amd, iemAImpl_imul_u64_intel;
2583FNIEMAIMPLMULDIVU64 iemAImpl_div_u64, iemAImpl_div_u64_amd, iemAImpl_div_u64_intel;
2584FNIEMAIMPLMULDIVU64 iemAImpl_idiv_u64, iemAImpl_idiv_u64_amd, iemAImpl_idiv_u64_intel;
2585/** @} */
2586
2587/** @name Byte Swap.
2588 * @{ */
2589IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u16,(uint32_t *pu32Dst)); /* Yes, 32-bit register access. */
2590IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u32,(uint32_t *pu32Dst));
2591IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u64,(uint64_t *pu64Dst));
2592/** @} */
2593
2594/** @name Misc.
2595 * @{ */
2596FNIEMAIMPLBINU16 iemAImpl_arpl;
2597/** @} */
2598
2599/** @name RDRAND and RDSEED
2600 * @{ */
2601typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU16,(uint16_t *puDst, uint32_t *pEFlags));
2602typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU32,(uint32_t *puDst, uint32_t *pEFlags));
2603typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU64,(uint64_t *puDst, uint32_t *pEFlags));
2604typedef FNIEMAIMPLRDRANDSEEDU16 *PFNIEMAIMPLRDRANDSEEDU16;
2605typedef FNIEMAIMPLRDRANDSEEDU32 *PFNIEMAIMPLRDRANDSEEDU32;
2606typedef FNIEMAIMPLRDRANDSEEDU64 *PFNIEMAIMPLRDRANDSEEDU64;
2607
2608FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdrand_u16, iemAImpl_rdrand_u16_fallback;
2609FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdrand_u32, iemAImpl_rdrand_u32_fallback;
2610FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdrand_u64, iemAImpl_rdrand_u64_fallback;
2611FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdseed_u16, iemAImpl_rdseed_u16_fallback;
2612FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdseed_u32, iemAImpl_rdseed_u32_fallback;
2613FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdseed_u64, iemAImpl_rdseed_u64_fallback;
2614/** @} */
2615
2616/** @name ADOX and ADCX
2617 * @{ */
2618typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLADXU32,(uint32_t *puDst, uint32_t *pfEFlags, uint32_t uSrc));
2619typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLADXU64,(uint64_t *puDst, uint32_t *pfEFlags, uint64_t uSrc));
2620typedef FNIEMAIMPLADXU32 *PFNIEMAIMPLADXU32;
2621typedef FNIEMAIMPLADXU64 *PFNIEMAIMPLADXU64;
2622
2623FNIEMAIMPLADXU32 iemAImpl_adcx_u32, iemAImpl_adcx_u32_fallback;
2624FNIEMAIMPLADXU64 iemAImpl_adcx_u64, iemAImpl_adcx_u64_fallback;
2625FNIEMAIMPLADXU32 iemAImpl_adox_u32, iemAImpl_adox_u32_fallback;
2626FNIEMAIMPLADXU64 iemAImpl_adox_u64, iemAImpl_adox_u64_fallback;
2627/** @} */
2628
2629/** @name FPU operations taking a 32-bit float argument
2630 * @{ */
2631typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2632 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
2633typedef FNIEMAIMPLFPUR32FSW *PFNIEMAIMPLFPUR32FSW;
2634
2635typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2636 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
2637typedef FNIEMAIMPLFPUR32 *PFNIEMAIMPLFPUR32;
2638
2639FNIEMAIMPLFPUR32FSW iemAImpl_fcom_r80_by_r32;
2640FNIEMAIMPLFPUR32 iemAImpl_fadd_r80_by_r32;
2641FNIEMAIMPLFPUR32 iemAImpl_fmul_r80_by_r32;
2642FNIEMAIMPLFPUR32 iemAImpl_fsub_r80_by_r32;
2643FNIEMAIMPLFPUR32 iemAImpl_fsubr_r80_by_r32;
2644FNIEMAIMPLFPUR32 iemAImpl_fdiv_r80_by_r32;
2645FNIEMAIMPLFPUR32 iemAImpl_fdivr_r80_by_r32;
2646
2647IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT32U pr32Val));
2648IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2649 PRTFLOAT32U pr32Val, PCRTFLOAT80U pr80Val));
2650/** @} */
2651
2652/** @name FPU operations taking a 64-bit float argument
2653 * @{ */
2654typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2655 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
2656typedef FNIEMAIMPLFPUR64FSW *PFNIEMAIMPLFPUR64FSW;
2657
2658typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2659 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
2660typedef FNIEMAIMPLFPUR64 *PFNIEMAIMPLFPUR64;
2661
2662FNIEMAIMPLFPUR64FSW iemAImpl_fcom_r80_by_r64;
2663FNIEMAIMPLFPUR64 iemAImpl_fadd_r80_by_r64;
2664FNIEMAIMPLFPUR64 iemAImpl_fmul_r80_by_r64;
2665FNIEMAIMPLFPUR64 iemAImpl_fsub_r80_by_r64;
2666FNIEMAIMPLFPUR64 iemAImpl_fsubr_r80_by_r64;
2667FNIEMAIMPLFPUR64 iemAImpl_fdiv_r80_by_r64;
2668FNIEMAIMPLFPUR64 iemAImpl_fdivr_r80_by_r64;
2669
2670IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT64U pr64Val));
2671IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2672 PRTFLOAT64U pr32Val, PCRTFLOAT80U pr80Val));
2673/** @} */
2674
2675/** @name FPU operations taking a 80-bit float argument
2676 * @{ */
2677typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2678 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2679typedef FNIEMAIMPLFPUR80 *PFNIEMAIMPLFPUR80;
2680FNIEMAIMPLFPUR80 iemAImpl_fadd_r80_by_r80;
2681FNIEMAIMPLFPUR80 iemAImpl_fmul_r80_by_r80;
2682FNIEMAIMPLFPUR80 iemAImpl_fsub_r80_by_r80;
2683FNIEMAIMPLFPUR80 iemAImpl_fsubr_r80_by_r80;
2684FNIEMAIMPLFPUR80 iemAImpl_fdiv_r80_by_r80;
2685FNIEMAIMPLFPUR80 iemAImpl_fdivr_r80_by_r80;
2686FNIEMAIMPLFPUR80 iemAImpl_fprem_r80_by_r80;
2687FNIEMAIMPLFPUR80 iemAImpl_fprem1_r80_by_r80;
2688FNIEMAIMPLFPUR80 iemAImpl_fscale_r80_by_r80;
2689
2690FNIEMAIMPLFPUR80 iemAImpl_fpatan_r80_by_r80, iemAImpl_fpatan_r80_by_r80_amd, iemAImpl_fpatan_r80_by_r80_intel;
2691FNIEMAIMPLFPUR80 iemAImpl_fyl2x_r80_by_r80, iemAImpl_fyl2x_r80_by_r80_amd, iemAImpl_fyl2x_r80_by_r80_intel;
2692FNIEMAIMPLFPUR80 iemAImpl_fyl2xp1_r80_by_r80, iemAImpl_fyl2xp1_r80_by_r80_amd, iemAImpl_fyl2xp1_r80_by_r80_intel;
2693
2694typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2695 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2696typedef FNIEMAIMPLFPUR80FSW *PFNIEMAIMPLFPUR80FSW;
2697FNIEMAIMPLFPUR80FSW iemAImpl_fcom_r80_by_r80;
2698FNIEMAIMPLFPUR80FSW iemAImpl_fucom_r80_by_r80;
2699
2700typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPUR80EFL,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
2701 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2702typedef FNIEMAIMPLFPUR80EFL *PFNIEMAIMPLFPUR80EFL;
2703FNIEMAIMPLFPUR80EFL iemAImpl_fcomi_r80_by_r80;
2704FNIEMAIMPLFPUR80EFL iemAImpl_fucomi_r80_by_r80;
2705
2706typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARY,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
2707typedef FNIEMAIMPLFPUR80UNARY *PFNIEMAIMPLFPUR80UNARY;
2708FNIEMAIMPLFPUR80UNARY iemAImpl_fabs_r80;
2709FNIEMAIMPLFPUR80UNARY iemAImpl_fchs_r80;
2710FNIEMAIMPLFPUR80UNARY iemAImpl_f2xm1_r80, iemAImpl_f2xm1_r80_amd, iemAImpl_f2xm1_r80_intel;
2711FNIEMAIMPLFPUR80UNARY iemAImpl_fsqrt_r80;
2712FNIEMAIMPLFPUR80UNARY iemAImpl_frndint_r80;
2713FNIEMAIMPLFPUR80UNARY iemAImpl_fsin_r80, iemAImpl_fsin_r80_amd, iemAImpl_fsin_r80_intel;
2714FNIEMAIMPLFPUR80UNARY iemAImpl_fcos_r80, iemAImpl_fcos_r80_amd, iemAImpl_fcos_r80_intel;
2715
2716typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYFSW,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw, PCRTFLOAT80U pr80Val));
2717typedef FNIEMAIMPLFPUR80UNARYFSW *PFNIEMAIMPLFPUR80UNARYFSW;
2718FNIEMAIMPLFPUR80UNARYFSW iemAImpl_ftst_r80;
2719FNIEMAIMPLFPUR80UNARYFSW iemAImpl_fxam_r80;
2720
2721typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80LDCONST,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes));
2722typedef FNIEMAIMPLFPUR80LDCONST *PFNIEMAIMPLFPUR80LDCONST;
2723FNIEMAIMPLFPUR80LDCONST iemAImpl_fld1;
2724FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2t;
2725FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2e;
2726FNIEMAIMPLFPUR80LDCONST iemAImpl_fldpi;
2727FNIEMAIMPLFPUR80LDCONST iemAImpl_fldlg2;
2728FNIEMAIMPLFPUR80LDCONST iemAImpl_fldln2;
2729FNIEMAIMPLFPUR80LDCONST iemAImpl_fldz;
2730
2731typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYTWO,(PCX86FXSTATE pFpuState, PIEMFPURESULTTWO pFpuResTwo,
2732 PCRTFLOAT80U pr80Val));
2733typedef FNIEMAIMPLFPUR80UNARYTWO *PFNIEMAIMPLFPUR80UNARYTWO;
2734FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fptan_r80_r80, iemAImpl_fptan_r80_r80_amd, iemAImpl_fptan_r80_r80_intel;
2735FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fxtract_r80_r80;
2736FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fsincos_r80_r80, iemAImpl_fsincos_r80_r80_amd, iemAImpl_fsincos_r80_r80_intel;
2737
2738IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
2739IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2740 PRTFLOAT80U pr80Dst, PCRTFLOAT80U pr80Src));
2741
2742IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_d80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTPBCD80U pd80Val));
2743IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_d80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2744 PRTPBCD80U pd80Dst, PCRTFLOAT80U pr80Src));
2745
2746/** @} */
2747
2748/** @name FPU operations taking a 16-bit signed integer argument
2749 * @{ */
2750typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2751 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
2752typedef FNIEMAIMPLFPUI16 *PFNIEMAIMPLFPUI16;
2753typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI16,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
2754 int16_t *pi16Dst, PCRTFLOAT80U pr80Src));
2755typedef FNIEMAIMPLFPUSTR80TOI16 *PFNIEMAIMPLFPUSTR80TOI16;
2756
2757FNIEMAIMPLFPUI16 iemAImpl_fiadd_r80_by_i16;
2758FNIEMAIMPLFPUI16 iemAImpl_fimul_r80_by_i16;
2759FNIEMAIMPLFPUI16 iemAImpl_fisub_r80_by_i16;
2760FNIEMAIMPLFPUI16 iemAImpl_fisubr_r80_by_i16;
2761FNIEMAIMPLFPUI16 iemAImpl_fidiv_r80_by_i16;
2762FNIEMAIMPLFPUI16 iemAImpl_fidivr_r80_by_i16;
2763
2764typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2765 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
2766typedef FNIEMAIMPLFPUI16FSW *PFNIEMAIMPLFPUI16FSW;
2767FNIEMAIMPLFPUI16FSW iemAImpl_ficom_r80_by_i16;
2768
2769IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int16_t const *pi16Val));
2770FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fist_r80_to_i16;
2771FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fistt_r80_to_i16, iemAImpl_fistt_r80_to_i16_amd, iemAImpl_fistt_r80_to_i16_intel;
2772/** @} */
2773
2774/** @name FPU operations taking a 32-bit signed integer argument
2775 * @{ */
2776typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2777 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
2778typedef FNIEMAIMPLFPUI32 *PFNIEMAIMPLFPUI32;
2779typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI32,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
2780 int32_t *pi32Dst, PCRTFLOAT80U pr80Src));
2781typedef FNIEMAIMPLFPUSTR80TOI32 *PFNIEMAIMPLFPUSTR80TOI32;
2782
2783FNIEMAIMPLFPUI32 iemAImpl_fiadd_r80_by_i32;
2784FNIEMAIMPLFPUI32 iemAImpl_fimul_r80_by_i32;
2785FNIEMAIMPLFPUI32 iemAImpl_fisub_r80_by_i32;
2786FNIEMAIMPLFPUI32 iemAImpl_fisubr_r80_by_i32;
2787FNIEMAIMPLFPUI32 iemAImpl_fidiv_r80_by_i32;
2788FNIEMAIMPLFPUI32 iemAImpl_fidivr_r80_by_i32;
2789
2790typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2791 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
2792typedef FNIEMAIMPLFPUI32FSW *PFNIEMAIMPLFPUI32FSW;
2793FNIEMAIMPLFPUI32FSW iemAImpl_ficom_r80_by_i32;
2794
2795IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int32_t const *pi32Val));
2796FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fist_r80_to_i32;
2797FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fistt_r80_to_i32;
2798/** @} */
2799
2800/** @name FPU operations taking a 64-bit signed integer argument
2801 * @{ */
2802typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI64,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
2803 int64_t *pi64Dst, PCRTFLOAT80U pr80Src));
2804typedef FNIEMAIMPLFPUSTR80TOI64 *PFNIEMAIMPLFPUSTR80TOI64;
2805
2806IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int64_t const *pi64Val));
2807FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fist_r80_to_i64;
2808FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fistt_r80_to_i64;
2809/** @} */
2810
2811
2812/** Temporary type representing a 256-bit vector register. */
2813typedef struct { uint64_t au64[4]; } IEMVMM256;
2814/** Temporary type pointing to a 256-bit vector register. */
2815typedef IEMVMM256 *PIEMVMM256;
2816/** Temporary type pointing to a const 256-bit vector register. */
2817typedef IEMVMM256 *PCIEMVMM256;
2818
2819
2820/** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1.
2821 * @{ */
2822typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *puDst, uint64_t const *puSrc));
2823typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF2U64;
2824typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U128,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc));
2825typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF2U128;
2826typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U128,(PX86XSAVEAREA pExtState, PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
2827typedef FNIEMAIMPLMEDIAF3U128 *PFNIEMAIMPLMEDIAF3U128;
2828typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U256,(PX86XSAVEAREA pExtState, PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
2829typedef FNIEMAIMPLMEDIAF3U256 *PFNIEMAIMPLMEDIAF3U256;
2830typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U64,(uint64_t *puDst, uint64_t const *puSrc));
2831typedef FNIEMAIMPLMEDIAOPTF2U64 *PFNIEMAIMPLMEDIAOPTF2U64;
2832typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128,(PRTUINT128U puDst, PCRTUINT128U puSrc));
2833typedef FNIEMAIMPLMEDIAOPTF2U128 *PFNIEMAIMPLMEDIAOPTF2U128;
2834typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
2835typedef FNIEMAIMPLMEDIAOPTF3U128 *PFNIEMAIMPLMEDIAOPTF3U128;
2836typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
2837typedef FNIEMAIMPLMEDIAOPTF3U256 *PFNIEMAIMPLMEDIAOPTF3U256;
2838typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256,(PRTUINT256U puDst, PCRTUINT256U puSrc));
2839typedef FNIEMAIMPLMEDIAOPTF2U256 *PFNIEMAIMPLMEDIAOPTF2U256;
2840FNIEMAIMPLMEDIAF2U64 iemAImpl_pshufb_u64, iemAImpl_pshufb_u64_fallback;
2841FNIEMAIMPLMEDIAF2U64 iemAImpl_pand_u64, iemAImpl_pandn_u64, iemAImpl_por_u64, iemAImpl_pxor_u64;
2842FNIEMAIMPLMEDIAF2U64 iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqd_u64;
2843FNIEMAIMPLMEDIAF2U64 iemAImpl_pcmpgtb_u64, iemAImpl_pcmpgtw_u64, iemAImpl_pcmpgtd_u64;
2844FNIEMAIMPLMEDIAF2U64 iemAImpl_paddb_u64, iemAImpl_paddsb_u64, iemAImpl_paddusb_u64;
2845FNIEMAIMPLMEDIAF2U64 iemAImpl_paddw_u64, iemAImpl_paddsw_u64, iemAImpl_paddusw_u64;
2846FNIEMAIMPLMEDIAF2U64 iemAImpl_paddd_u64;
2847FNIEMAIMPLMEDIAF2U64 iemAImpl_paddq_u64;
2848FNIEMAIMPLMEDIAF2U64 iemAImpl_psubb_u64, iemAImpl_psubsb_u64, iemAImpl_psubusb_u64;
2849FNIEMAIMPLMEDIAF2U64 iemAImpl_psubw_u64, iemAImpl_psubsw_u64, iemAImpl_psubusw_u64;
2850FNIEMAIMPLMEDIAF2U64 iemAImpl_psubd_u64;
2851FNIEMAIMPLMEDIAF2U64 iemAImpl_psubq_u64;
2852FNIEMAIMPLMEDIAF2U64 iemAImpl_pmaddwd_u64;
2853FNIEMAIMPLMEDIAF2U64 iemAImpl_pmullw_u64, iemAImpl_pmulhw_u64;
2854FNIEMAIMPLMEDIAF2U64 iemAImpl_pminub_u64, iemAImpl_pmaxub_u64;
2855FNIEMAIMPLMEDIAF2U64 iemAImpl_pminsw_u64, iemAImpl_pmaxsw_u64;
2856FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsb_u64, iemAImpl_pabsb_u64_fallback;
2857FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsw_u64, iemAImpl_pabsw_u64_fallback;
2858FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsd_u64, iemAImpl_pabsd_u64_fallback;
2859FNIEMAIMPLMEDIAF2U64 iemAImpl_psignb_u64, iemAImpl_psignb_u64_fallback;
2860FNIEMAIMPLMEDIAF2U64 iemAImpl_psignw_u64, iemAImpl_psignw_u64_fallback;
2861FNIEMAIMPLMEDIAF2U64 iemAImpl_psignd_u64, iemAImpl_psignd_u64_fallback;
2862FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddw_u64, iemAImpl_phaddw_u64_fallback;
2863FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddd_u64, iemAImpl_phaddd_u64_fallback;
2864FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubw_u64, iemAImpl_phsubw_u64_fallback;
2865FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubd_u64, iemAImpl_phsubd_u64_fallback;
2866FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddsw_u64, iemAImpl_phaddsw_u64_fallback;
2867FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubsw_u64, iemAImpl_phsubsw_u64_fallback;
2868FNIEMAIMPLMEDIAF2U64 iemAImpl_pmaddubsw_u64, iemAImpl_pmaddubsw_u64_fallback;
2869FNIEMAIMPLMEDIAF2U64 iemAImpl_pmulhrsw_u64, iemAImpl_pmulhrsw_u64_fallback;
2870FNIEMAIMPLMEDIAF2U64 iemAImpl_pmuludq_u64;
2871FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllw_u64, iemAImpl_psrlw_u64, iemAImpl_psraw_u64;
2872FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pslld_u64, iemAImpl_psrld_u64, iemAImpl_psrad_u64;
2873FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllq_u64, iemAImpl_psrlq_u64;
2874FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packsswb_u64, iemAImpl_packuswb_u64;
2875FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packssdw_u64;
2876FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmulhuw_u64;
2877FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pavgb_u64, iemAImpl_pavgw_u64;
2878FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psadbw_u64;
2879
2880FNIEMAIMPLMEDIAF2U128 iemAImpl_pshufb_u128, iemAImpl_pshufb_u128_fallback;
2881FNIEMAIMPLMEDIAF2U128 iemAImpl_pand_u128, iemAImpl_pandn_u128, iemAImpl_por_u128, iemAImpl_pxor_u128;
2882FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpeqb_u128, iemAImpl_pcmpeqw_u128, iemAImpl_pcmpeqd_u128;
2883FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpeqq_u128, iemAImpl_pcmpeqq_u128_fallback;
2884FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpgtb_u128, iemAImpl_pcmpgtw_u128, iemAImpl_pcmpgtd_u128;
2885FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpgtq_u128, iemAImpl_pcmpgtq_u128_fallback;
2886FNIEMAIMPLMEDIAF2U128 iemAImpl_paddb_u128, iemAImpl_paddsb_u128, iemAImpl_paddusb_u128;
2887FNIEMAIMPLMEDIAF2U128 iemAImpl_paddw_u128, iemAImpl_paddsw_u128, iemAImpl_paddusw_u128;
2888FNIEMAIMPLMEDIAF2U128 iemAImpl_paddd_u128;
2889FNIEMAIMPLMEDIAF2U128 iemAImpl_paddq_u128;
2890FNIEMAIMPLMEDIAF2U128 iemAImpl_psubb_u128, iemAImpl_psubsb_u128, iemAImpl_psubusb_u128;
2891FNIEMAIMPLMEDIAF2U128 iemAImpl_psubw_u128, iemAImpl_psubsw_u128, iemAImpl_psubusw_u128;
2892FNIEMAIMPLMEDIAF2U128 iemAImpl_psubd_u128;
2893FNIEMAIMPLMEDIAF2U128 iemAImpl_psubq_u128;
2894FNIEMAIMPLMEDIAF2U128 iemAImpl_pmullw_u128, iemAImpl_pmullw_u128_fallback;
2895FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulhw_u128;
2896FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulld_u128, iemAImpl_pmulld_u128_fallback;
2897FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaddwd_u128;
2898FNIEMAIMPLMEDIAF2U128 iemAImpl_pminub_u128;
2899FNIEMAIMPLMEDIAF2U128 iemAImpl_pminud_u128, iemAImpl_pminud_u128_fallback;
2900FNIEMAIMPLMEDIAF2U128 iemAImpl_pminuw_u128, iemAImpl_pminuw_u128_fallback;
2901FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsb_u128, iemAImpl_pminsb_u128_fallback;
2902FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsd_u128, iemAImpl_pminsd_u128_fallback;
2903FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsw_u128, iemAImpl_pminsw_u128_fallback;
2904FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxub_u128;
2905FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxud_u128, iemAImpl_pmaxud_u128_fallback;
2906FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxuw_u128, iemAImpl_pmaxuw_u128_fallback;
2907FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsb_u128, iemAImpl_pmaxsb_u128_fallback;
2908FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsw_u128;
2909FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsd_u128, iemAImpl_pmaxsd_u128_fallback;
2910FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsb_u128, iemAImpl_pabsb_u128_fallback;
2911FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsw_u128, iemAImpl_pabsw_u128_fallback;
2912FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsd_u128, iemAImpl_pabsd_u128_fallback;
2913FNIEMAIMPLMEDIAF2U128 iemAImpl_psignb_u128, iemAImpl_psignb_u128_fallback;
2914FNIEMAIMPLMEDIAF2U128 iemAImpl_psignw_u128, iemAImpl_psignw_u128_fallback;
2915FNIEMAIMPLMEDIAF2U128 iemAImpl_psignd_u128, iemAImpl_psignd_u128_fallback;
2916FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddw_u128, iemAImpl_phaddw_u128_fallback;
2917FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddd_u128, iemAImpl_phaddd_u128_fallback;
2918FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubw_u128, iemAImpl_phsubw_u128_fallback;
2919FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubd_u128, iemAImpl_phsubd_u128_fallback;
2920FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddsw_u128, iemAImpl_phaddsw_u128_fallback;
2921FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubsw_u128, iemAImpl_phsubsw_u128_fallback;
2922FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaddubsw_u128, iemAImpl_pmaddubsw_u128_fallback;
2923FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulhrsw_u128, iemAImpl_pmulhrsw_u128_fallback;
2924FNIEMAIMPLMEDIAF2U128 iemAImpl_pmuludq_u128;
2925FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packsswb_u128, iemAImpl_packuswb_u128;
2926FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packssdw_u128, iemAImpl_packusdw_u128;
2927FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllw_u128, iemAImpl_psrlw_u128, iemAImpl_psraw_u128;
2928FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pslld_u128, iemAImpl_psrld_u128, iemAImpl_psrad_u128;
2929FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllq_u128, iemAImpl_psrlq_u128;
2930FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhuw_u128;
2931FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pavgb_u128, iemAImpl_pavgw_u128;
2932FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psadbw_u128;
2933FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuldq_u128, iemAImpl_pmuldq_u128_fallback;
2934FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpcklps_u128, iemAImpl_unpcklpd_u128;
2935FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpckhps_u128, iemAImpl_unpckhpd_u128;
2936FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phminposuw_u128, iemAImpl_phminposuw_u128_fallback;
2937
2938FNIEMAIMPLMEDIAF3U128 iemAImpl_vpshufb_u128, iemAImpl_vpshufb_u128_fallback;
2939FNIEMAIMPLMEDIAF3U128 iemAImpl_vpand_u128, iemAImpl_vpand_u128_fallback;
2940FNIEMAIMPLMEDIAF3U128 iemAImpl_vpandn_u128, iemAImpl_vpandn_u128_fallback;
2941FNIEMAIMPLMEDIAF3U128 iemAImpl_vpor_u128, iemAImpl_vpor_u128_fallback;
2942FNIEMAIMPLMEDIAF3U128 iemAImpl_vpxor_u128, iemAImpl_vpxor_u128_fallback;
2943FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqb_u128, iemAImpl_vpcmpeqb_u128_fallback;
2944FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqw_u128, iemAImpl_vpcmpeqw_u128_fallback;
2945FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqd_u128, iemAImpl_vpcmpeqd_u128_fallback;
2946FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqq_u128, iemAImpl_vpcmpeqq_u128_fallback;
2947FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtb_u128, iemAImpl_vpcmpgtb_u128_fallback;
2948FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtw_u128, iemAImpl_vpcmpgtw_u128_fallback;
2949FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtd_u128, iemAImpl_vpcmpgtd_u128_fallback;
2950FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtq_u128, iemAImpl_vpcmpgtq_u128_fallback;
2951FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddb_u128, iemAImpl_vpaddb_u128_fallback;
2952FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddw_u128, iemAImpl_vpaddw_u128_fallback;
2953FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddd_u128, iemAImpl_vpaddd_u128_fallback;
2954FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddq_u128, iemAImpl_vpaddq_u128_fallback;
2955FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubb_u128, iemAImpl_vpsubb_u128_fallback;
2956FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubw_u128, iemAImpl_vpsubw_u128_fallback;
2957FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubd_u128, iemAImpl_vpsubd_u128_fallback;
2958FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubq_u128, iemAImpl_vpsubq_u128_fallback;
2959FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminub_u128, iemAImpl_vpminub_u128_fallback;
2960FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminuw_u128, iemAImpl_vpminuw_u128_fallback;
2961FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminud_u128, iemAImpl_vpminud_u128_fallback;
2962FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsb_u128, iemAImpl_vpminsb_u128_fallback;
2963FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsw_u128, iemAImpl_vpminsw_u128_fallback;
2964FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsd_u128, iemAImpl_vpminsd_u128_fallback;
2965FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxub_u128, iemAImpl_vpmaxub_u128_fallback;
2966FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxuw_u128, iemAImpl_vpmaxuw_u128_fallback;
2967FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxud_u128, iemAImpl_vpmaxud_u128_fallback;
2968FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsb_u128, iemAImpl_vpmaxsb_u128_fallback;
2969FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsw_u128, iemAImpl_vpmaxsw_u128_fallback;
2970FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsd_u128, iemAImpl_vpmaxsd_u128_fallback;
2971FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpacksswb_u128, iemAImpl_vpacksswb_u128_fallback;
2972FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackssdw_u128, iemAImpl_vpackssdw_u128_fallback;
2973FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackuswb_u128, iemAImpl_vpackuswb_u128_fallback;
2974FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackusdw_u128, iemAImpl_vpackusdw_u128_fallback;
2975FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmullw_u128, iemAImpl_vpmullw_u128_fallback;
2976FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulld_u128, iemAImpl_vpmulld_u128_fallback;
2977FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhw_u128, iemAImpl_vpmulhw_u128_fallback;
2978FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhuw_u128, iemAImpl_vpmulhuw_u128_fallback;
2979FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgb_u128, iemAImpl_vpavgb_u128_fallback;
2980FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgw_u128, iemAImpl_vpavgw_u128_fallback;
2981FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignb_u128, iemAImpl_vpsignb_u128_fallback;
2982FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignw_u128, iemAImpl_vpsignw_u128_fallback;
2983FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignd_u128, iemAImpl_vpsignd_u128_fallback;
2984FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddw_u128, iemAImpl_vphaddw_u128_fallback;
2985FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddd_u128, iemAImpl_vphaddd_u128_fallback;
2986FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubw_u128, iemAImpl_vphsubw_u128_fallback;
2987FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubd_u128, iemAImpl_vphsubd_u128_fallback;
2988FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddsw_u128, iemAImpl_vphaddsw_u128_fallback;
2989FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubsw_u128, iemAImpl_vphsubsw_u128_fallback;
2990FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddubsw_u128, iemAImpl_vpmaddubsw_u128_fallback;
2991FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhrsw_u128, iemAImpl_vpmulhrsw_u128_fallback;
2992FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsadbw_u128, iemAImpl_vpsadbw_u128_fallback;
2993FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuldq_u128, iemAImpl_vpmuldq_u128_fallback;
2994FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuludq_u128, iemAImpl_vpmuludq_u128_fallback;
2995FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsb_u128, iemAImpl_vpsubsb_u128_fallback;
2996FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsw_u128, iemAImpl_vpsubsw_u128_fallback;
2997FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusb_u128, iemAImpl_vpsubusb_u128_fallback;
2998FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusw_u128, iemAImpl_vpsubusw_u128_fallback;
2999FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusb_u128, iemAImpl_vpaddusb_u128_fallback;
3000FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusw_u128, iemAImpl_vpaddusw_u128_fallback;
3001FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsb_u128, iemAImpl_vpaddsb_u128_fallback;
3002FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsw_u128, iemAImpl_vpaddsw_u128_fallback;
3003
3004FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsb_u128, iemAImpl_vpabsb_u128_fallback;
3005FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsw_u128, iemAImpl_vpabsd_u128_fallback;
3006FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsd_u128, iemAImpl_vpabsw_u128_fallback;
3007FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vphminposuw_u128, iemAImpl_vphminposuw_u128_fallback;
3008
3009FNIEMAIMPLMEDIAF3U256 iemAImpl_vpshufb_u256, iemAImpl_vpshufb_u256_fallback;
3010FNIEMAIMPLMEDIAF3U256 iemAImpl_vpand_u256, iemAImpl_vpand_u256_fallback;
3011FNIEMAIMPLMEDIAF3U256 iemAImpl_vpandn_u256, iemAImpl_vpandn_u256_fallback;
3012FNIEMAIMPLMEDIAF3U256 iemAImpl_vpor_u256, iemAImpl_vpor_u256_fallback;
3013FNIEMAIMPLMEDIAF3U256 iemAImpl_vpxor_u256, iemAImpl_vpxor_u256_fallback;
3014FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqb_u256, iemAImpl_vpcmpeqb_u256_fallback;
3015FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqw_u256, iemAImpl_vpcmpeqw_u256_fallback;
3016FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqd_u256, iemAImpl_vpcmpeqd_u256_fallback;
3017FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqq_u256, iemAImpl_vpcmpeqq_u256_fallback;
3018FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtb_u256, iemAImpl_vpcmpgtb_u256_fallback;
3019FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtw_u256, iemAImpl_vpcmpgtw_u256_fallback;
3020FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtd_u256, iemAImpl_vpcmpgtd_u256_fallback;
3021FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtq_u256, iemAImpl_vpcmpgtq_u256_fallback;
3022FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddb_u256, iemAImpl_vpaddb_u256_fallback;
3023FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddw_u256, iemAImpl_vpaddw_u256_fallback;
3024FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddd_u256, iemAImpl_vpaddd_u256_fallback;
3025FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddq_u256, iemAImpl_vpaddq_u256_fallback;
3026FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubb_u256, iemAImpl_vpsubb_u256_fallback;
3027FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubw_u256, iemAImpl_vpsubw_u256_fallback;
3028FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubd_u256, iemAImpl_vpsubd_u256_fallback;
3029FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubq_u256, iemAImpl_vpsubq_u256_fallback;
3030FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminub_u256, iemAImpl_vpminub_u256_fallback;
3031FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminuw_u256, iemAImpl_vpminuw_u256_fallback;
3032FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminud_u256, iemAImpl_vpminud_u256_fallback;
3033FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsb_u256, iemAImpl_vpminsb_u256_fallback;
3034FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsw_u256, iemAImpl_vpminsw_u256_fallback;
3035FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsd_u256, iemAImpl_vpminsd_u256_fallback;
3036FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxub_u256, iemAImpl_vpmaxub_u256_fallback;
3037FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxuw_u256, iemAImpl_vpmaxuw_u256_fallback;
3038FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxud_u256, iemAImpl_vpmaxud_u256_fallback;
3039FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsb_u256, iemAImpl_vpmaxsb_u256_fallback;
3040FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsw_u256, iemAImpl_vpmaxsw_u256_fallback;
3041FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsd_u256, iemAImpl_vpmaxsd_u256_fallback;
3042FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpacksswb_u256, iemAImpl_vpacksswb_u256_fallback;
3043FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackssdw_u256, iemAImpl_vpackssdw_u256_fallback;
3044FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackuswb_u256, iemAImpl_vpackuswb_u256_fallback;
3045FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackusdw_u256, iemAImpl_vpackusdw_u256_fallback;
3046FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmullw_u256, iemAImpl_vpmullw_u256_fallback;
3047FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulld_u256, iemAImpl_vpmulld_u256_fallback;
3048FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhw_u256, iemAImpl_vpmulhw_u256_fallback;
3049FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhuw_u256, iemAImpl_vpmulhuw_u256_fallback;
3050FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgb_u256, iemAImpl_vpavgb_u256_fallback;
3051FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgw_u256, iemAImpl_vpavgw_u256_fallback;
3052FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignb_u256, iemAImpl_vpsignb_u256_fallback;
3053FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignw_u256, iemAImpl_vpsignw_u256_fallback;
3054FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignd_u256, iemAImpl_vpsignd_u256_fallback;
3055FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddw_u256, iemAImpl_vphaddw_u256_fallback;
3056FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddd_u256, iemAImpl_vphaddd_u256_fallback;
3057FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubw_u256, iemAImpl_vphsubw_u256_fallback;
3058FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubd_u256, iemAImpl_vphsubd_u256_fallback;
3059FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddsw_u256, iemAImpl_vphaddsw_u256_fallback;
3060FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubsw_u256, iemAImpl_vphsubsw_u256_fallback;
3061FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddubsw_u256, iemAImpl_vpmaddubsw_u256_fallback;
3062FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhrsw_u256, iemAImpl_vpmulhrsw_u256_fallback;
3063FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsadbw_u256, iemAImpl_vpsadbw_u256_fallback;
3064FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuldq_u256, iemAImpl_vpmuldq_u256_fallback;
3065FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuludq_u256, iemAImpl_vpmuludq_u256_fallback;
3066FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsb_u256, iemAImpl_vpsubsb_u256_fallback;
3067FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsw_u256, iemAImpl_vpsubsw_u256_fallback;
3068FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusb_u256, iemAImpl_vpsubusb_u256_fallback;
3069FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusw_u256, iemAImpl_vpsubusw_u256_fallback;
3070FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusb_u256, iemAImpl_vpaddusb_u256_fallback;
3071FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusw_u256, iemAImpl_vpaddusw_u256_fallback;
3072FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsb_u256, iemAImpl_vpaddsb_u256_fallback;
3073FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsw_u256, iemAImpl_vpaddsw_u256_fallback;
3074
3075FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsb_u256, iemAImpl_vpabsb_u256_fallback;
3076FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsw_u256, iemAImpl_vpabsw_u256_fallback;
3077FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsd_u256, iemAImpl_vpabsd_u256_fallback;
3078/** @} */
3079
3080/** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1.
3081 * @{ */
3082FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpcklbw_u64, iemAImpl_punpcklwd_u64, iemAImpl_punpckldq_u64;
3083FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128;
3084FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpcklbw_u128, iemAImpl_vpunpcklbw_u128_fallback,
3085 iemAImpl_vpunpcklwd_u128, iemAImpl_vpunpcklwd_u128_fallback,
3086 iemAImpl_vpunpckldq_u128, iemAImpl_vpunpckldq_u128_fallback,
3087 iemAImpl_vpunpcklqdq_u128, iemAImpl_vpunpcklqdq_u128_fallback,
3088 iemAImpl_vunpcklps_u128, iemAImpl_vunpcklps_u128_fallback,
3089 iemAImpl_vunpcklpd_u128, iemAImpl_vunpcklpd_u128_fallback,
3090 iemAImpl_vunpckhps_u128, iemAImpl_vunpckhps_u128_fallback,
3091 iemAImpl_vunpckhpd_u128, iemAImpl_vunpckhpd_u128_fallback;
3092
3093FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpcklbw_u256, iemAImpl_vpunpcklbw_u256_fallback,
3094 iemAImpl_vpunpcklwd_u256, iemAImpl_vpunpcklwd_u256_fallback,
3095 iemAImpl_vpunpckldq_u256, iemAImpl_vpunpckldq_u256_fallback,
3096 iemAImpl_vpunpcklqdq_u256, iemAImpl_vpunpcklqdq_u256_fallback,
3097 iemAImpl_vunpcklps_u256, iemAImpl_vunpcklps_u256_fallback,
3098 iemAImpl_vunpcklpd_u256, iemAImpl_vunpcklpd_u256_fallback,
3099 iemAImpl_vunpckhps_u256, iemAImpl_vunpckhps_u256_fallback,
3100 iemAImpl_vunpckhpd_u256, iemAImpl_vunpckhpd_u256_fallback;
3101/** @} */
3102
3103/** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1.
3104 * @{ */
3105FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64;
3106FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128;
3107FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpckhbw_u128, iemAImpl_vpunpckhbw_u128_fallback,
3108 iemAImpl_vpunpckhwd_u128, iemAImpl_vpunpckhwd_u128_fallback,
3109 iemAImpl_vpunpckhdq_u128, iemAImpl_vpunpckhdq_u128_fallback,
3110 iemAImpl_vpunpckhqdq_u128, iemAImpl_vpunpckhqdq_u128_fallback;
3111FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpckhbw_u256, iemAImpl_vpunpckhbw_u256_fallback,
3112 iemAImpl_vpunpckhwd_u256, iemAImpl_vpunpckhwd_u256_fallback,
3113 iemAImpl_vpunpckhdq_u256, iemAImpl_vpunpckhdq_u256_fallback,
3114 iemAImpl_vpunpckhqdq_u256, iemAImpl_vpunpckhqdq_u256_fallback;
3115/** @} */
3116
3117/** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil)
3118 * @{ */
3119typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3120typedef FNIEMAIMPLMEDIAPSHUFU128 *PFNIEMAIMPLMEDIAPSHUFU128;
3121typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
3122typedef FNIEMAIMPLMEDIAPSHUFU256 *PFNIEMAIMPLMEDIAPSHUFU256;
3123IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw_u64,(uint64_t *puDst, uint64_t const *puSrc, uint8_t bEvil));
3124FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_pshufhw_u128, iemAImpl_pshuflw_u128, iemAImpl_pshufd_u128;
3125#ifndef IEM_WITHOUT_ASSEMBLY
3126FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256, iemAImpl_vpshuflw_u256, iemAImpl_vpshufd_u256;
3127#endif
3128FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256_fallback, iemAImpl_vpshuflw_u256_fallback, iemAImpl_vpshufd_u256_fallback;
3129/** @} */
3130
3131/** @name Media (SSE/MMX/AVX) operation: Shift Immediate Stuff (evil)
3132 * @{ */
3133typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU64,(uint64_t *puDst, uint8_t bShift));
3134typedef FNIEMAIMPLMEDIAPSHIFTU64 *PFNIEMAIMPLMEDIAPSHIFTU64;
3135typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU128,(PRTUINT128U puDst, uint8_t bShift));
3136typedef FNIEMAIMPLMEDIAPSHIFTU128 *PFNIEMAIMPLMEDIAPSHIFTU128;
3137typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU256,(PRTUINT256U puDst, uint8_t bShift));
3138typedef FNIEMAIMPLMEDIAPSHIFTU256 *PFNIEMAIMPLMEDIAPSHIFTU256;
3139FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psllw_imm_u64, iemAImpl_pslld_imm_u64, iemAImpl_psllq_imm_u64;
3140FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psrlw_imm_u64, iemAImpl_psrld_imm_u64, iemAImpl_psrlq_imm_u64;
3141FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psraw_imm_u64, iemAImpl_psrad_imm_u64;
3142FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psllw_imm_u128, iemAImpl_pslld_imm_u128, iemAImpl_psllq_imm_u128;
3143FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psrlw_imm_u128, iemAImpl_psrld_imm_u128, iemAImpl_psrlq_imm_u128;
3144FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psraw_imm_u128, iemAImpl_psrad_imm_u128;
3145FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_pslldq_imm_u128, iemAImpl_psrldq_imm_u128;
3146/** @} */
3147
3148/** @name Media (SSE/MMX/AVX) operation: Move Byte Mask
3149 * @{ */
3150IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u64,(uint64_t *pu64Dst, uint64_t const *puSrc));
3151IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u128,(uint64_t *pu64Dst, PCRTUINT128U puSrc));
3152#ifndef IEM_WITHOUT_ASSEMBLY
3153IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3154#endif
3155IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256_fallback,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3156/** @} */
3157
3158/** @name Media (SSE/MMX/AVX) operations: Variable Blend Packed Bytes/R32/R64.
3159 * @{ */
3160typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puMask));
3161typedef FNIEMAIMPLBLENDU128 *PFNIEMAIMPLBLENDU128;
3162typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, PCRTUINT128U puMask));
3163typedef FNIEMAIMPLAVXBLENDU128 *PFNIEMAIMPLAVXBLENDU128;
3164typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, PCRTUINT256U puMask));
3165typedef FNIEMAIMPLAVXBLENDU256 *PFNIEMAIMPLAVXBLENDU256;
3166
3167FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128;
3168FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128_fallback;
3169FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128;
3170FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128_fallback;
3171FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256;
3172FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256_fallback;
3173
3174FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128;
3175FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128_fallback;
3176FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128;
3177FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128_fallback;
3178FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256;
3179FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256_fallback;
3180
3181FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128;
3182FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128_fallback;
3183FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128;
3184FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128_fallback;
3185FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256;
3186FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256_fallback;
3187/** @} */
3188
3189
3190/** @name Media (SSE/MMX/AVX) operation: Sort this later
3191 * @{ */
3192IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3193IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3194IEM_DECL_IMPL_DEF(void, iemAImpl_vmovshdup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3195IEM_DECL_IMPL_DEF(void, iemAImpl_vmovshdup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3196IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3197IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3198
3199IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3200IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3201IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3202IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3203IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3204
3205IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3206IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3207IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3208IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3209IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3210
3211IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3212IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3213IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3214IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3215IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3216
3217IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3218IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3219IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3220IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3221IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3222
3223IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3224IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3225IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3226IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3227IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3228
3229IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3230IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3231IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3232IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3233IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3234
3235IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3236IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3237IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3238IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3239IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3240
3241IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3242IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3243IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3244IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3245IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3246
3247IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3248IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3249IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3250IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3251IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3252
3253IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3254IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3255IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3256IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3257IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3258
3259IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3260IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3261IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3262IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3263IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3264
3265IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3266IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3267IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3268IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3269IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3270
3271IEM_DECL_IMPL_DEF(void, iemAImpl_shufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3272IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3273IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3274IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3275IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3276
3277IEM_DECL_IMPL_DEF(void, iemAImpl_shufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3278IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3279IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3280IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3281IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3282
3283IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3284IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64_fallback,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3285
3286IEM_DECL_IMPL_DEF(void, iemAImpl_pinsrw_u64,(uint64_t *pu64Dst, uint16_t u16Src, uint8_t bEvil));
3287IEM_DECL_IMPL_DEF(void, iemAImpl_pinsrw_u128,(PRTUINT128U puDst, uint16_t u16Src, uint8_t bEvil));
3288IEM_DECL_IMPL_DEF(void, iemAImpl_vpinsrw_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint16_t u16Src, uint8_t bEvil));
3289IEM_DECL_IMPL_DEF(void, iemAImpl_vpinsrw_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint16_t u16Src, uint8_t bEvil));
3290
3291IEM_DECL_IMPL_DEF(void, iemAImpl_pextrw_u64,(uint16_t *pu16Dst, uint64_t u64Src, uint8_t bEvil));
3292IEM_DECL_IMPL_DEF(void, iemAImpl_pextrw_u128,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3293IEM_DECL_IMPL_DEF(void, iemAImpl_vpextrw_u128,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3294IEM_DECL_IMPL_DEF(void, iemAImpl_vpextrw_u128_fallback,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3295
3296IEM_DECL_IMPL_DEF(void, iemAImpl_movmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3297IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3298IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3299IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3300IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3301
3302IEM_DECL_IMPL_DEF(void, iemAImpl_movmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3303IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3304IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3305IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3306IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3307
3308
3309typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3310typedef FNIEMAIMPLMEDIAOPTF2U128IMM8 *PFNIEMAIMPLMEDIAOPTF2U128IMM8;
3311typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3312typedef FNIEMAIMPLMEDIAOPTF3U128IMM8 *PFNIEMAIMPLMEDIAOPTF3U128IMM8;
3313typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3314typedef FNIEMAIMPLMEDIAOPTF3U256IMM8 *PFNIEMAIMPLMEDIAOPTF3U256IMM8;
3315
3316FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_palignr_u128, iemAImpl_palignr_u128_fallback;
3317FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pblendw_u128, iemAImpl_pblendw_u128_fallback;
3318FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendps_u128, iemAImpl_blendps_u128_fallback;
3319FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendpd_u128, iemAImpl_blendpd_u128_fallback;
3320
3321FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpalignr_u128, iemAImpl_vpalignr_u128_fallback;
3322FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendw_u128, iemAImpl_vpblendw_u128_fallback;
3323FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendps_u128, iemAImpl_vblendps_u128_fallback;
3324FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendpd_u128, iemAImpl_vblendpd_u128_fallback;
3325
3326FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpalignr_u256, iemAImpl_vpalignr_u256_fallback;
3327FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendw_u256, iemAImpl_vpblendw_u256_fallback;
3328FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendps_u256, iemAImpl_vblendps_u256_fallback;
3329FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendpd_u256, iemAImpl_vblendpd_u256_fallback;
3330FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2i128_u256, iemAImpl_vperm2i128_u256_fallback;
3331FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2f128_u256, iemAImpl_vperm2f128_u256_fallback;
3332
3333FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesimc_u128, iemAImpl_aesimc_u128_fallback;
3334FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenc_u128, iemAImpl_aesenc_u128_fallback;
3335FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenclast_u128, iemAImpl_aesenclast_u128_fallback;
3336FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdec_u128, iemAImpl_aesdec_u128_fallback;
3337FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdeclast_u128, iemAImpl_aesdeclast_u128_fallback;
3338
3339FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesimc_u128, iemAImpl_vaesimc_u128_fallback;
3340FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenc_u128, iemAImpl_vaesenc_u128_fallback;
3341FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenclast_u128, iemAImpl_vaesenclast_u128_fallback;
3342FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdec_u128, iemAImpl_vaesdec_u128_fallback;
3343FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdeclast_u128, iemAImpl_vaesdeclast_u128_fallback;
3344
3345FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_aeskeygenassist_u128, iemAImpl_aeskeygenassist_u128_fallback;
3346
3347FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vaeskeygenassist_u128, iemAImpl_vaeskeygenassist_u128_fallback;
3348
3349FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1nexte_u128, iemAImpl_sha1nexte_u128_fallback;
3350FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg1_u128, iemAImpl_sha1msg1_u128_fallback;
3351FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg2_u128, iemAImpl_sha1msg2_u128_fallback;
3352FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg1_u128, iemAImpl_sha256msg1_u128_fallback;
3353FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg2_u128, iemAImpl_sha256msg2_u128_fallback;
3354FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_sha1rnds4_u128, iemAImpl_sha1rnds4_u128_fallback;
3355IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3356IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3357
3358typedef struct IEMPCMPISTRXSRC
3359{
3360 RTUINT128U uSrc1;
3361 RTUINT128U uSrc2;
3362} IEMPCMPISTRXSRC;
3363typedef IEMPCMPISTRXSRC *PIEMPCMPISTRXSRC;
3364typedef const IEMPCMPISTRXSRC *PCIEMPCMPISTRXSRC;
3365
3366typedef struct IEMPCMPESTRXSRC
3367{
3368 RTUINT128U uSrc1;
3369 RTUINT128U uSrc2;
3370 uint64_t u64Rax;
3371 uint64_t u64Rdx;
3372} IEMPCMPESTRXSRC;
3373typedef IEMPCMPESTRXSRC *PIEMPCMPESTRXSRC;
3374typedef const IEMPCMPESTRXSRC *PCIEMPCMPESTRXSRC;
3375
3376typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
3377typedef FNIEMAIMPLPCMPISTRIU128IMM8 *PFNIEMAIMPLPCMPISTRIU128IMM8;
3378typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3379typedef FNIEMAIMPLPCMPESTRIU128IMM8 *PFNIEMAIMPLPCMPESTRIU128IMM8;
3380
3381typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
3382typedef FNIEMAIMPLPCMPISTRMU128IMM8 *PFNIEMAIMPLPCMPISTRMU128IMM8;
3383typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3384typedef FNIEMAIMPLPCMPESTRMU128IMM8 *PFNIEMAIMPLPCMPESTRMU128IMM8;
3385
3386FNIEMAIMPLPCMPISTRIU128IMM8 iemAImpl_pcmpistri_u128, iemAImpl_pcmpistri_u128_fallback;
3387FNIEMAIMPLPCMPESTRIU128IMM8 iemAImpl_pcmpestri_u128, iemAImpl_pcmpestri_u128_fallback;
3388FNIEMAIMPLPCMPISTRMU128IMM8 iemAImpl_pcmpistrm_u128, iemAImpl_pcmpistrm_u128_fallback;
3389FNIEMAIMPLPCMPESTRMU128IMM8 iemAImpl_pcmpestrm_u128, iemAImpl_pcmpestrm_u128_fallback;
3390
3391FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pclmulqdq_u128, iemAImpl_pclmulqdq_u128_fallback;
3392FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpclmulqdq_u128, iemAImpl_vpclmulqdq_u128_fallback;
3393
3394FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_mpsadbw_u128, iemAImpl_mpsadbw_u128_fallback;
3395FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vmpsadbw_u128, iemAImpl_vmpsadbw_u128_fallback;
3396FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vmpsadbw_u256, iemAImpl_vmpsadbw_u256_fallback;
3397/** @} */
3398
3399/** @name Media Odds and Ends
3400 * @{ */
3401typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U8,(uint32_t *puDst, uint8_t uSrc));
3402typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U16,(uint32_t *puDst, uint16_t uSrc));
3403typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U32,(uint32_t *puDst, uint32_t uSrc));
3404typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U64,(uint32_t *puDst, uint64_t uSrc));
3405FNIEMAIMPLCR32U8 iemAImpl_crc32_u8, iemAImpl_crc32_u8_fallback;
3406FNIEMAIMPLCR32U16 iemAImpl_crc32_u16, iemAImpl_crc32_u16_fallback;
3407FNIEMAIMPLCR32U32 iemAImpl_crc32_u32, iemAImpl_crc32_u32_fallback;
3408FNIEMAIMPLCR32U64 iemAImpl_crc32_u64, iemAImpl_crc32_u64_fallback;
3409
3410typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL128,(PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint32_t *pEFlags));
3411typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL256,(PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint32_t *pEFlags));
3412FNIEMAIMPLF2EFL128 iemAImpl_ptest_u128;
3413FNIEMAIMPLF2EFL256 iemAImpl_vptest_u256, iemAImpl_vptest_u256_fallback;
3414
3415typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I32U64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int32_t *pi32Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
3416typedef FNIEMAIMPLSSEF2I32U64 *PFNIEMAIMPLSSEF2I32U64;
3417typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I64U64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int64_t *pi64Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
3418typedef FNIEMAIMPLSSEF2I64U64 *PFNIEMAIMPLSSEF2I64U64;
3419typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I32U32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int32_t *pi32Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
3420typedef FNIEMAIMPLSSEF2I32U32 *PFNIEMAIMPLSSEF2I32U32;
3421typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I64U32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int64_t *pi64Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
3422typedef FNIEMAIMPLSSEF2I64U32 *PFNIEMAIMPLSSEF2I64U32;
3423
3424FNIEMAIMPLSSEF2I32U64 iemAImpl_cvttsd2si_i32_r64;
3425FNIEMAIMPLSSEF2I32U64 iemAImpl_cvtsd2si_i32_r64;
3426
3427FNIEMAIMPLSSEF2I64U64 iemAImpl_cvttsd2si_i64_r64;
3428FNIEMAIMPLSSEF2I64U64 iemAImpl_cvtsd2si_i64_r64;
3429
3430FNIEMAIMPLSSEF2I32U32 iemAImpl_cvttss2si_i32_r32;
3431FNIEMAIMPLSSEF2I32U32 iemAImpl_cvtss2si_i32_r32;
3432
3433FNIEMAIMPLSSEF2I64U32 iemAImpl_cvttss2si_i64_r32;
3434FNIEMAIMPLSSEF2I64U32 iemAImpl_cvtss2si_i64_r32;
3435
3436typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R32I32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT32U pr32Dst, const int32_t *pi32Src));
3437typedef FNIEMAIMPLSSEF2R32I32 *PFNIEMAIMPLSSEF2R32I32;
3438typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R32I64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT32U pr32Dst, const int64_t *pi64Src));
3439typedef FNIEMAIMPLSSEF2R32I64 *PFNIEMAIMPLSSEF2R32I64;
3440
3441FNIEMAIMPLSSEF2R32I32 iemAImpl_cvtsi2ss_r32_i32;
3442FNIEMAIMPLSSEF2R32I64 iemAImpl_cvtsi2ss_r32_i64;
3443
3444typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R64I32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT64U pr64Dst, const int32_t *pi32Src));
3445typedef FNIEMAIMPLSSEF2R64I32 *PFNIEMAIMPLSSEF2R64I32;
3446typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R64I64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT64U pr64Dst, const int64_t *pi64Src));
3447typedef FNIEMAIMPLSSEF2R64I64 *PFNIEMAIMPLSSEF2R64I64;
3448
3449FNIEMAIMPLSSEF2R64I32 iemAImpl_cvtsi2sd_r64_i32;
3450FNIEMAIMPLSSEF2R64I64 iemAImpl_cvtsi2sd_r64_i64;
3451
3452
3453typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFLMXCSR128,(uint32_t *pfMxcsr, uint32_t *pfEFlags, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
3454typedef FNIEMAIMPLF2EFLMXCSR128 *PFNIEMAIMPLF2EFLMXCSR128;
3455
3456FNIEMAIMPLF2EFLMXCSR128 iemAImpl_ucomiss_u128;
3457FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vucomiss_u128, iemAImpl_vucomiss_u128_fallback;
3458
3459FNIEMAIMPLF2EFLMXCSR128 iemAImpl_ucomisd_u128;
3460FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vucomisd_u128, iemAImpl_vucomisd_u128_fallback;
3461
3462FNIEMAIMPLF2EFLMXCSR128 iemAImpl_comiss_u128;
3463FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vcomiss_u128, iemAImpl_vcomiss_u128_fallback;
3464
3465FNIEMAIMPLF2EFLMXCSR128 iemAImpl_comisd_u128;
3466FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vcomisd_u128, iemAImpl_vcomisd_u128_fallback;
3467
3468
3469typedef struct IEMMEDIAF2XMMSRC
3470{
3471 X86XMMREG uSrc1;
3472 X86XMMREG uSrc2;
3473} IEMMEDIAF2XMMSRC;
3474typedef IEMMEDIAF2XMMSRC *PIEMMEDIAF2XMMSRC;
3475typedef const IEMMEDIAF2XMMSRC *PCIEMMEDIAF2XMMSRC;
3476
3477typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRF2XMMIMM8,(uint32_t *pfMxcsr, PX86XMMREG puDst, PCIEMMEDIAF2XMMSRC puSrc, uint8_t bEvil));
3478typedef FNIEMAIMPLMXCSRF2XMMIMM8 *PFNIEMAIMPLMXCSRF2XMMIMM8;
3479
3480FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpps_u128;
3481FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmppd_u128;
3482FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpss_u128;
3483FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpsd_u128;
3484FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundss_u128;
3485FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundsd_u128;
3486
3487FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundps_u128, iemAImpl_roundps_u128_fallback;
3488FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundpd_u128, iemAImpl_roundpd_u128_fallback;
3489
3490FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dpps_u128, iemAImpl_dpps_u128_fallback;
3491FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dppd_u128, iemAImpl_dppd_u128_fallback;
3492
3493typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU64U128,(uint32_t *pfMxcsr, uint64_t *pu64Dst, PCX86XMMREG pSrc));
3494typedef FNIEMAIMPLMXCSRU64U128 *PFNIEMAIMPLMXCSRU64U128;
3495
3496FNIEMAIMPLMXCSRU64U128 iemAImpl_cvtpd2pi_u128;
3497FNIEMAIMPLMXCSRU64U128 iemAImpl_cvttpd2pi_u128;
3498
3499typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU128U64,(uint32_t *pfMxcsr, PX86XMMREG pDst, uint64_t u64Src));
3500typedef FNIEMAIMPLMXCSRU128U64 *PFNIEMAIMPLMXCSRU128U64;
3501
3502FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2ps_u128;
3503FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2pd_u128;
3504
3505typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU64U64,(uint32_t *pfMxcsr, uint64_t *pu64Dst, uint64_t u64Src));
3506typedef FNIEMAIMPLMXCSRU64U64 *PFNIEMAIMPLMXCSRU64U64;
3507
3508FNIEMAIMPLMXCSRU64U64 iemAImpl_cvtps2pi_u128;
3509FNIEMAIMPLMXCSRU64U64 iemAImpl_cvttps2pi_u128;
3510
3511/** @} */
3512
3513
3514/** @name Function tables.
3515 * @{
3516 */
3517
3518/**
3519 * Function table for a binary operator providing implementation based on
3520 * operand size.
3521 */
3522typedef struct IEMOPBINSIZES
3523{
3524 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
3525 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
3526 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
3527 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
3528} IEMOPBINSIZES;
3529/** Pointer to a binary operator function table. */
3530typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
3531
3532
3533/**
3534 * Function table for a unary operator providing implementation based on
3535 * operand size.
3536 */
3537typedef struct IEMOPUNARYSIZES
3538{
3539 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
3540 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
3541 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
3542 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
3543} IEMOPUNARYSIZES;
3544/** Pointer to a unary operator function table. */
3545typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
3546
3547
3548/**
3549 * Function table for a shift operator providing implementation based on
3550 * operand size.
3551 */
3552typedef struct IEMOPSHIFTSIZES
3553{
3554 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
3555 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
3556 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
3557 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
3558} IEMOPSHIFTSIZES;
3559/** Pointer to a shift operator function table. */
3560typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
3561
3562
3563/**
3564 * Function table for a multiplication or division operation.
3565 */
3566typedef struct IEMOPMULDIVSIZES
3567{
3568 PFNIEMAIMPLMULDIVU8 pfnU8;
3569 PFNIEMAIMPLMULDIVU16 pfnU16;
3570 PFNIEMAIMPLMULDIVU32 pfnU32;
3571 PFNIEMAIMPLMULDIVU64 pfnU64;
3572} IEMOPMULDIVSIZES;
3573/** Pointer to a multiplication or division operation function table. */
3574typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
3575
3576
3577/**
3578 * Function table for a double precision shift operator providing implementation
3579 * based on operand size.
3580 */
3581typedef struct IEMOPSHIFTDBLSIZES
3582{
3583 PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16;
3584 PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32;
3585 PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64;
3586} IEMOPSHIFTDBLSIZES;
3587/** Pointer to a double precision shift function table. */
3588typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
3589
3590
3591/**
3592 * Function table for media instruction taking two full sized media source
3593 * registers and one full sized destination register (AVX).
3594 */
3595typedef struct IEMOPMEDIAF3
3596{
3597 PFNIEMAIMPLMEDIAF3U128 pfnU128;
3598 PFNIEMAIMPLMEDIAF3U256 pfnU256;
3599} IEMOPMEDIAF3;
3600/** Pointer to a media operation function table for 3 full sized ops (AVX). */
3601typedef IEMOPMEDIAF3 const *PCIEMOPMEDIAF3;
3602
3603/** @def IEMOPMEDIAF3_INIT_VARS_EX
3604 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3605 * given functions as initializers. For use in AVX functions where a pair of
3606 * functions are only used once and the function table need not be public. */
3607#ifndef TST_IEM_CHECK_MC
3608# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3609# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3610 static IEMOPMEDIAF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3611 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3612# else
3613# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3614 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3615# endif
3616#else
3617# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3618#endif
3619/** @def IEMOPMEDIAF3_INIT_VARS
3620 * Generate AVX function tables for the @a a_InstrNm instruction.
3621 * @sa IEMOPMEDIAF3_INIT_VARS_EX */
3622#define IEMOPMEDIAF3_INIT_VARS(a_InstrNm) \
3623 IEMOPMEDIAF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3624 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3625
3626/**
3627 * Function table for media instruction taking two full sized media source
3628 * registers and one full sized destination register, but no additional state
3629 * (AVX).
3630 */
3631typedef struct IEMOPMEDIAOPTF3
3632{
3633 PFNIEMAIMPLMEDIAOPTF3U128 pfnU128;
3634 PFNIEMAIMPLMEDIAOPTF3U256 pfnU256;
3635} IEMOPMEDIAOPTF3;
3636/** Pointer to a media operation function table for 3 full sized ops (AVX). */
3637typedef IEMOPMEDIAOPTF3 const *PCIEMOPMEDIAOPTF3;
3638
3639/** @def IEMOPMEDIAOPTF3_INIT_VARS_EX
3640 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3641 * given functions as initializers. For use in AVX functions where a pair of
3642 * functions are only used once and the function table need not be public. */
3643#ifndef TST_IEM_CHECK_MC
3644# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3645# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3646 static IEMOPMEDIAOPTF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3647 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3648# else
3649# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3650 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3651# endif
3652#else
3653# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3654#endif
3655/** @def IEMOPMEDIAOPTF3_INIT_VARS
3656 * Generate AVX function tables for the @a a_InstrNm instruction.
3657 * @sa IEMOPMEDIAOPTF3_INIT_VARS_EX */
3658#define IEMOPMEDIAOPTF3_INIT_VARS(a_InstrNm) \
3659 IEMOPMEDIAOPTF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3660 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3661
3662/**
3663 * Function table for media instruction taking one full sized media source
3664 * registers and one full sized destination register, but no additional state
3665 * (AVX).
3666 */
3667typedef struct IEMOPMEDIAOPTF2
3668{
3669 PFNIEMAIMPLMEDIAOPTF2U128 pfnU128;
3670 PFNIEMAIMPLMEDIAOPTF2U256 pfnU256;
3671} IEMOPMEDIAOPTF2;
3672/** Pointer to a media operation function table for 2 full sized ops (AVX). */
3673typedef IEMOPMEDIAOPTF2 const *PCIEMOPMEDIAOPTF2;
3674
3675/** @def IEMOPMEDIAOPTF2_INIT_VARS_EX
3676 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3677 * given functions as initializers. For use in AVX functions where a pair of
3678 * functions are only used once and the function table need not be public. */
3679#ifndef TST_IEM_CHECK_MC
3680# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3681# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3682 static IEMOPMEDIAOPTF2 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3683 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3684# else
3685# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3686 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3687# endif
3688#else
3689# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3690#endif
3691/** @def IEMOPMEDIAOPTF2_INIT_VARS
3692 * Generate AVX function tables for the @a a_InstrNm instruction.
3693 * @sa IEMOPMEDIAOPTF2_INIT_VARS_EX */
3694#define IEMOPMEDIAOPTF2_INIT_VARS(a_InstrNm) \
3695 IEMOPMEDIAOPTF2_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3696 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3697
3698/**
3699 * Function table for media instruction taking two full sized media source
3700 * registers and one full sized destination register and an 8-bit immediate, but no additional state
3701 * (AVX).
3702 */
3703typedef struct IEMOPMEDIAOPTF3IMM8
3704{
3705 PFNIEMAIMPLMEDIAOPTF3U128IMM8 pfnU128;
3706 PFNIEMAIMPLMEDIAOPTF3U256IMM8 pfnU256;
3707} IEMOPMEDIAOPTF3IMM8;
3708/** Pointer to a media operation function table for 3 full sized ops (AVX). */
3709typedef IEMOPMEDIAOPTF3IMM8 const *PCIEMOPMEDIAOPTF3IMM8;
3710
3711/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX
3712 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3713 * given functions as initializers. For use in AVX functions where a pair of
3714 * functions are only used once and the function table need not be public. */
3715#ifndef TST_IEM_CHECK_MC
3716# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3717# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3718 static IEMOPMEDIAOPTF3IMM8 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3719 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3720# else
3721# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3722 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3723# endif
3724#else
3725# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3726#endif
3727/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS
3728 * Generate AVX function tables for the @a a_InstrNm instruction.
3729 * @sa IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX */
3730#define IEMOPMEDIAOPTF3IMM8_INIT_VARS(a_InstrNm) \
3731 IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3732 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3733/** @} */
3734
3735
3736/**
3737 * Function table for blend type instruction taking three full sized media source
3738 * registers and one full sized destination register, but no additional state
3739 * (AVX).
3740 */
3741typedef struct IEMOPBLENDOP
3742{
3743 PFNIEMAIMPLAVXBLENDU128 pfnU128;
3744 PFNIEMAIMPLAVXBLENDU256 pfnU256;
3745} IEMOPBLENDOP;
3746/** Pointer to a media operation function table for 4 full sized ops (AVX). */
3747typedef IEMOPBLENDOP const *PCIEMOPBLENDOP;
3748
3749/** @def IEMOPBLENDOP_INIT_VARS_EX
3750 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3751 * given functions as initializers. For use in AVX functions where a pair of
3752 * functions are only used once and the function table need not be public. */
3753#ifndef TST_IEM_CHECK_MC
3754# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3755# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3756 static IEMOPBLENDOP const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3757 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3758# else
3759# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3760 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3761# endif
3762#else
3763# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3764#endif
3765/** @def IEMOPBLENDOP_INIT_VARS
3766 * Generate AVX function tables for the @a a_InstrNm instruction.
3767 * @sa IEMOPBLENDOP_INIT_VARS_EX */
3768#define IEMOPBLENDOP_INIT_VARS(a_InstrNm) \
3769 IEMOPBLENDOP_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3770 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3771
3772
3773/** @name SSE/AVX single/double precision floating point operations.
3774 * @{ */
3775/**
3776 * A SSE result.
3777 */
3778typedef struct IEMSSERESULT
3779{
3780 /** The output value. */
3781 X86XMMREG uResult;
3782 /** The output status. */
3783 uint32_t MXCSR;
3784} IEMSSERESULT;
3785AssertCompileMemberOffset(IEMSSERESULT, MXCSR, 128 / 8);
3786/** Pointer to a SSE result. */
3787typedef IEMSSERESULT *PIEMSSERESULT;
3788/** Pointer to a const SSE result. */
3789typedef IEMSSERESULT const *PCIEMSSERESULT;
3790
3791
3792/**
3793 * A AVX128 result.
3794 */
3795typedef struct IEMAVX128RESULT
3796{
3797 /** The output value. */
3798 X86XMMREG uResult;
3799 /** The output status. */
3800 uint32_t MXCSR;
3801} IEMAVX128RESULT;
3802AssertCompileMemberOffset(IEMAVX128RESULT, MXCSR, 128 / 8);
3803/** Pointer to a AVX128 result. */
3804typedef IEMAVX128RESULT *PIEMAVX128RESULT;
3805/** Pointer to a const AVX128 result. */
3806typedef IEMAVX128RESULT const *PCIEMAVX128RESULT;
3807
3808
3809/**
3810 * A AVX256 result.
3811 */
3812typedef struct IEMAVX256RESULT
3813{
3814 /** The output value. */
3815 X86YMMREG uResult;
3816 /** The output status. */
3817 uint32_t MXCSR;
3818} IEMAVX256RESULT;
3819AssertCompileMemberOffset(IEMAVX256RESULT, MXCSR, 256 / 8);
3820/** Pointer to a AVX256 result. */
3821typedef IEMAVX256RESULT *PIEMAVX256RESULT;
3822/** Pointer to a const AVX256 result. */
3823typedef IEMAVX256RESULT const *PCIEMAVX256RESULT;
3824
3825
3826typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
3827typedef FNIEMAIMPLFPSSEF2U128 *PFNIEMAIMPLFPSSEF2U128;
3828typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128R32,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
3829typedef FNIEMAIMPLFPSSEF2U128R32 *PFNIEMAIMPLFPSSEF2U128R32;
3830typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128R64,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
3831typedef FNIEMAIMPLFPSSEF2U128R64 *PFNIEMAIMPLFPSSEF2U128R64;
3832
3833typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
3834typedef FNIEMAIMPLFPAVXF3U128 *PFNIEMAIMPLFPAVXF3U128;
3835typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128R32,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
3836typedef FNIEMAIMPLFPAVXF3U128R32 *PFNIEMAIMPLFPAVXF3U128R32;
3837typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128R64,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
3838typedef FNIEMAIMPLFPAVXF3U128R64 *PFNIEMAIMPLFPAVXF3U128R64;
3839
3840typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U256,(PX86XSAVEAREA pExtState, PIEMAVX256RESULT pResult, PCX86YMMREG puSrc1, PCX86YMMREG puSrc2));
3841typedef FNIEMAIMPLFPAVXF3U256 *PFNIEMAIMPLFPAVXF3U256;
3842
3843FNIEMAIMPLFPSSEF2U128 iemAImpl_addps_u128;
3844FNIEMAIMPLFPSSEF2U128 iemAImpl_addpd_u128;
3845FNIEMAIMPLFPSSEF2U128 iemAImpl_mulps_u128;
3846FNIEMAIMPLFPSSEF2U128 iemAImpl_mulpd_u128;
3847FNIEMAIMPLFPSSEF2U128 iemAImpl_subps_u128;
3848FNIEMAIMPLFPSSEF2U128 iemAImpl_subpd_u128;
3849FNIEMAIMPLFPSSEF2U128 iemAImpl_minps_u128;
3850FNIEMAIMPLFPSSEF2U128 iemAImpl_minpd_u128;
3851FNIEMAIMPLFPSSEF2U128 iemAImpl_divps_u128;
3852FNIEMAIMPLFPSSEF2U128 iemAImpl_divpd_u128;
3853FNIEMAIMPLFPSSEF2U128 iemAImpl_maxps_u128;
3854FNIEMAIMPLFPSSEF2U128 iemAImpl_maxpd_u128;
3855FNIEMAIMPLFPSSEF2U128 iemAImpl_haddps_u128;
3856FNIEMAIMPLFPSSEF2U128 iemAImpl_haddpd_u128;
3857FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubps_u128;
3858FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubpd_u128;
3859FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtps_u128;
3860FNIEMAIMPLFPSSEF2U128 iemAImpl_rsqrtps_u128;
3861FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtpd_u128;
3862FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubps_u128;
3863FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubpd_u128;
3864FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2ps_u128;
3865FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2pd_u128;
3866
3867FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2ps_u128;
3868FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2dq_u128;
3869FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttps2dq_u128;
3870FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttpd2dq_u128;
3871FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2pd_u128;
3872FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2dq_u128;
3873
3874FNIEMAIMPLFPSSEF2U128R32 iemAImpl_addss_u128_r32;
3875FNIEMAIMPLFPSSEF2U128R64 iemAImpl_addsd_u128_r64;
3876FNIEMAIMPLFPSSEF2U128R32 iemAImpl_mulss_u128_r32;
3877FNIEMAIMPLFPSSEF2U128R64 iemAImpl_mulsd_u128_r64;
3878FNIEMAIMPLFPSSEF2U128R32 iemAImpl_subss_u128_r32;
3879FNIEMAIMPLFPSSEF2U128R64 iemAImpl_subsd_u128_r64;
3880FNIEMAIMPLFPSSEF2U128R32 iemAImpl_minss_u128_r32;
3881FNIEMAIMPLFPSSEF2U128R64 iemAImpl_minsd_u128_r64;
3882FNIEMAIMPLFPSSEF2U128R32 iemAImpl_divss_u128_r32;
3883FNIEMAIMPLFPSSEF2U128R64 iemAImpl_divsd_u128_r64;
3884FNIEMAIMPLFPSSEF2U128R32 iemAImpl_maxss_u128_r32;
3885FNIEMAIMPLFPSSEF2U128R64 iemAImpl_maxsd_u128_r64;
3886FNIEMAIMPLFPSSEF2U128R32 iemAImpl_cvtss2sd_u128_r32;
3887FNIEMAIMPLFPSSEF2U128R64 iemAImpl_cvtsd2ss_u128_r64;
3888FNIEMAIMPLFPSSEF2U128R32 iemAImpl_sqrtss_u128_r32;
3889FNIEMAIMPLFPSSEF2U128R64 iemAImpl_sqrtsd_u128_r64;
3890FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rsqrtss_u128_r32;
3891
3892FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddps_u128, iemAImpl_vaddps_u128_fallback;
3893FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddpd_u128, iemAImpl_vaddpd_u128_fallback;
3894FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulps_u128, iemAImpl_vmulps_u128_fallback;
3895FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulpd_u128, iemAImpl_vmulpd_u128_fallback;
3896FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubps_u128, iemAImpl_vsubps_u128_fallback;
3897FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubpd_u128, iemAImpl_vsubpd_u128_fallback;
3898FNIEMAIMPLFPAVXF3U128 iemAImpl_vminps_u128, iemAImpl_vminps_u128_fallback;
3899FNIEMAIMPLFPAVXF3U128 iemAImpl_vminpd_u128, iemAImpl_vminpd_u128_fallback;
3900FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivps_u128, iemAImpl_vdivps_u128_fallback;
3901FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivpd_u128, iemAImpl_vdivpd_u128_fallback;
3902FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxps_u128, iemAImpl_vmaxps_u128_fallback;
3903FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxpd_u128, iemAImpl_vmaxpd_u128_fallback;
3904FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddps_u128, iemAImpl_vhaddps_u128_fallback;
3905FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddpd_u128, iemAImpl_vhaddpd_u128_fallback;
3906FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubps_u128, iemAImpl_vhsubps_u128_fallback;
3907FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubpd_u128, iemAImpl_vhsubpd_u128_fallback;
3908FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtps_u128, iemAImpl_vsqrtps_u128_fallback;
3909FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtpd_u128, iemAImpl_vsqrtpd_u128_fallback;
3910FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubps_u128, iemAImpl_vaddsubps_u128_fallback;
3911FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubpd_u128, iemAImpl_vaddsubpd_u128_fallback;
3912FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtpd2ps_u128, iemAImpl_vcvtpd2ps_u128_fallback;
3913FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtps2pd_u128, iemAImpl_vcvtps2pd_u128_fallback;
3914
3915FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vaddss_u128_r32, iemAImpl_vaddss_u128_r32_fallback;
3916FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vaddsd_u128_r64, iemAImpl_vaddsd_u128_r64_fallback;
3917FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmulss_u128_r32, iemAImpl_vmulss_u128_r32_fallback;
3918FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmulsd_u128_r64, iemAImpl_vmulsd_u128_r64_fallback;
3919FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsubss_u128_r32, iemAImpl_vsubss_u128_r32_fallback;
3920FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsubsd_u128_r64, iemAImpl_vsubsd_u128_r64_fallback;
3921FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vminss_u128_r32, iemAImpl_vminss_u128_r32_fallback;
3922FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vminsd_u128_r64, iemAImpl_vminsd_u128_r64_fallback;
3923FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vdivss_u128_r32, iemAImpl_vdivss_u128_r32_fallback;
3924FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vdivsd_u128_r64, iemAImpl_vdivsd_u128_r64_fallback;
3925FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmaxss_u128_r32, iemAImpl_vmaxss_u128_r32_fallback;
3926FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmaxsd_u128_r64, iemAImpl_vmaxsd_u128_r64_fallback;
3927FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsqrtss_u128_r32, iemAImpl_vsqrtss_u128_r32_fallback;
3928FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsqrtsd_u128_r64, iemAImpl_vsqrtsd_u128_r64_fallback;
3929
3930FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddps_u256, iemAImpl_vaddps_u256_fallback;
3931FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddpd_u256, iemAImpl_vaddpd_u256_fallback;
3932FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulps_u256, iemAImpl_vmulps_u256_fallback;
3933FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulpd_u256, iemAImpl_vmulpd_u256_fallback;
3934FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubps_u256, iemAImpl_vsubps_u256_fallback;
3935FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubpd_u256, iemAImpl_vsubpd_u256_fallback;
3936FNIEMAIMPLFPAVXF3U256 iemAImpl_vminps_u256, iemAImpl_vminps_u256_fallback;
3937FNIEMAIMPLFPAVXF3U256 iemAImpl_vminpd_u256, iemAImpl_vminpd_u256_fallback;
3938FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivps_u256, iemAImpl_vdivps_u256_fallback;
3939FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivpd_u256, iemAImpl_vdivpd_u256_fallback;
3940FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxps_u256, iemAImpl_vmaxps_u256_fallback;
3941FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxpd_u256, iemAImpl_vmaxpd_u256_fallback;
3942FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddps_u256, iemAImpl_vhaddps_u256_fallback;
3943FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddpd_u256, iemAImpl_vhaddpd_u256_fallback;
3944FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubps_u256, iemAImpl_vhsubps_u256_fallback;
3945FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubpd_u256, iemAImpl_vhsubpd_u256_fallback;
3946FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubps_u256, iemAImpl_vhaddsubps_u256_fallback;
3947FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubpd_u256, iemAImpl_vhaddsubpd_u256_fallback;
3948FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtpd2ps_u256, iemAImpl_vcvtpd2ps_u256_fallback;
3949FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtps2pd_u256, iemAImpl_vcvtps2pd_u256_fallback;
3950/** @} */
3951
3952/** @name C instruction implementations for anything slightly complicated.
3953 * @{ */
3954
3955/**
3956 * For typedef'ing or declaring a C instruction implementation function taking
3957 * no extra arguments.
3958 *
3959 * @param a_Name The name of the type.
3960 */
3961# define IEM_CIMPL_DECL_TYPE_0(a_Name) \
3962 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
3963/**
3964 * For defining a C instruction implementation function taking no extra
3965 * arguments.
3966 *
3967 * @param a_Name The name of the function
3968 */
3969# define IEM_CIMPL_DEF_0(a_Name) \
3970 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
3971/**
3972 * Prototype version of IEM_CIMPL_DEF_0.
3973 */
3974# define IEM_CIMPL_PROTO_0(a_Name) \
3975 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
3976/**
3977 * For calling a C instruction implementation function taking no extra
3978 * arguments.
3979 *
3980 * This special call macro adds default arguments to the call and allow us to
3981 * change these later.
3982 *
3983 * @param a_fn The name of the function.
3984 */
3985# define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr)
3986
3987/** Type for a C instruction implementation function taking no extra
3988 * arguments. */
3989typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
3990/** Function pointer type for a C instruction implementation function taking
3991 * no extra arguments. */
3992typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
3993
3994/**
3995 * For typedef'ing or declaring a C instruction implementation function taking
3996 * one extra argument.
3997 *
3998 * @param a_Name The name of the type.
3999 * @param a_Type0 The argument type.
4000 * @param a_Arg0 The argument name.
4001 */
4002# define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
4003 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4004/**
4005 * For defining a C instruction implementation function taking one extra
4006 * argument.
4007 *
4008 * @param a_Name The name of the function
4009 * @param a_Type0 The argument type.
4010 * @param a_Arg0 The argument name.
4011 */
4012# define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
4013 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4014/**
4015 * Prototype version of IEM_CIMPL_DEF_1.
4016 */
4017# define IEM_CIMPL_PROTO_1(a_Name, a_Type0, a_Arg0) \
4018 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4019/**
4020 * For calling a C instruction implementation function taking one extra
4021 * argument.
4022 *
4023 * This special call macro adds default arguments to the call and allow us to
4024 * change these later.
4025 *
4026 * @param a_fn The name of the function.
4027 * @param a0 The name of the 1st argument.
4028 */
4029# define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0))
4030
4031/**
4032 * For typedef'ing or declaring a C instruction implementation function taking
4033 * two extra arguments.
4034 *
4035 * @param a_Name The name of the type.
4036 * @param a_Type0 The type of the 1st argument
4037 * @param a_Arg0 The name of the 1st argument.
4038 * @param a_Type1 The type of the 2nd argument.
4039 * @param a_Arg1 The name of the 2nd argument.
4040 */
4041# define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4042 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4043/**
4044 * For defining a C instruction implementation function taking two extra
4045 * arguments.
4046 *
4047 * @param a_Name The name of the function.
4048 * @param a_Type0 The type of the 1st argument
4049 * @param a_Arg0 The name of the 1st argument.
4050 * @param a_Type1 The type of the 2nd argument.
4051 * @param a_Arg1 The name of the 2nd argument.
4052 */
4053# define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4054 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4055/**
4056 * Prototype version of IEM_CIMPL_DEF_2.
4057 */
4058# define IEM_CIMPL_PROTO_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4059 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4060/**
4061 * For calling a C instruction implementation function taking two extra
4062 * arguments.
4063 *
4064 * This special call macro adds default arguments to the call and allow us to
4065 * change these later.
4066 *
4067 * @param a_fn The name of the function.
4068 * @param a0 The name of the 1st argument.
4069 * @param a1 The name of the 2nd argument.
4070 */
4071# define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1))
4072
4073/**
4074 * For typedef'ing or declaring a C instruction implementation function taking
4075 * three extra arguments.
4076 *
4077 * @param a_Name The name of the type.
4078 * @param a_Type0 The type of the 1st argument
4079 * @param a_Arg0 The name of the 1st argument.
4080 * @param a_Type1 The type of the 2nd argument.
4081 * @param a_Arg1 The name of the 2nd argument.
4082 * @param a_Type2 The type of the 3rd argument.
4083 * @param a_Arg2 The name of the 3rd argument.
4084 */
4085# define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4086 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4087/**
4088 * For defining a C instruction implementation function taking three extra
4089 * arguments.
4090 *
4091 * @param a_Name The name of the function.
4092 * @param a_Type0 The type of the 1st argument
4093 * @param a_Arg0 The name of the 1st argument.
4094 * @param a_Type1 The type of the 2nd argument.
4095 * @param a_Arg1 The name of the 2nd argument.
4096 * @param a_Type2 The type of the 3rd argument.
4097 * @param a_Arg2 The name of the 3rd argument.
4098 */
4099# define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4100 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4101/**
4102 * Prototype version of IEM_CIMPL_DEF_3.
4103 */
4104# define IEM_CIMPL_PROTO_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4105 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4106/**
4107 * For calling a C instruction implementation function taking three extra
4108 * arguments.
4109 *
4110 * This special call macro adds default arguments to the call and allow us to
4111 * change these later.
4112 *
4113 * @param a_fn The name of the function.
4114 * @param a0 The name of the 1st argument.
4115 * @param a1 The name of the 2nd argument.
4116 * @param a2 The name of the 3rd argument.
4117 */
4118# define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
4119
4120
4121/**
4122 * For typedef'ing or declaring a C instruction implementation function taking
4123 * four extra arguments.
4124 *
4125 * @param a_Name The name of the type.
4126 * @param a_Type0 The type of the 1st argument
4127 * @param a_Arg0 The name of the 1st argument.
4128 * @param a_Type1 The type of the 2nd argument.
4129 * @param a_Arg1 The name of the 2nd argument.
4130 * @param a_Type2 The type of the 3rd argument.
4131 * @param a_Arg2 The name of the 3rd argument.
4132 * @param a_Type3 The type of the 4th argument.
4133 * @param a_Arg3 The name of the 4th argument.
4134 */
4135# define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4136 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
4137/**
4138 * For defining a C instruction implementation function taking four extra
4139 * arguments.
4140 *
4141 * @param a_Name The name of the function.
4142 * @param a_Type0 The type of the 1st argument
4143 * @param a_Arg0 The name of the 1st argument.
4144 * @param a_Type1 The type of the 2nd argument.
4145 * @param a_Arg1 The name of the 2nd argument.
4146 * @param a_Type2 The type of the 3rd argument.
4147 * @param a_Arg2 The name of the 3rd argument.
4148 * @param a_Type3 The type of the 4th argument.
4149 * @param a_Arg3 The name of the 4th argument.
4150 */
4151# define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4152 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4153 a_Type2 a_Arg2, a_Type3 a_Arg3))
4154/**
4155 * Prototype version of IEM_CIMPL_DEF_4.
4156 */
4157# define IEM_CIMPL_PROTO_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4158 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4159 a_Type2 a_Arg2, a_Type3 a_Arg3))
4160/**
4161 * For calling a C instruction implementation function taking four extra
4162 * arguments.
4163 *
4164 * This special call macro adds default arguments to the call and allow us to
4165 * change these later.
4166 *
4167 * @param a_fn The name of the function.
4168 * @param a0 The name of the 1st argument.
4169 * @param a1 The name of the 2nd argument.
4170 * @param a2 The name of the 3rd argument.
4171 * @param a3 The name of the 4th argument.
4172 */
4173# define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
4174
4175
4176/**
4177 * For typedef'ing or declaring a C instruction implementation function taking
4178 * five extra arguments.
4179 *
4180 * @param a_Name The name of the type.
4181 * @param a_Type0 The type of the 1st argument
4182 * @param a_Arg0 The name of the 1st argument.
4183 * @param a_Type1 The type of the 2nd argument.
4184 * @param a_Arg1 The name of the 2nd argument.
4185 * @param a_Type2 The type of the 3rd argument.
4186 * @param a_Arg2 The name of the 3rd argument.
4187 * @param a_Type3 The type of the 4th argument.
4188 * @param a_Arg3 The name of the 4th argument.
4189 * @param a_Type4 The type of the 5th argument.
4190 * @param a_Arg4 The name of the 5th argument.
4191 */
4192# define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4193 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, \
4194 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
4195 a_Type3 a_Arg3, a_Type4 a_Arg4))
4196/**
4197 * For defining a C instruction implementation function taking five extra
4198 * arguments.
4199 *
4200 * @param a_Name The name of the function.
4201 * @param a_Type0 The type of the 1st argument
4202 * @param a_Arg0 The name of the 1st argument.
4203 * @param a_Type1 The type of the 2nd argument.
4204 * @param a_Arg1 The name of the 2nd argument.
4205 * @param a_Type2 The type of the 3rd argument.
4206 * @param a_Arg2 The name of the 3rd argument.
4207 * @param a_Type3 The type of the 4th argument.
4208 * @param a_Arg3 The name of the 4th argument.
4209 * @param a_Type4 The type of the 5th argument.
4210 * @param a_Arg4 The name of the 5th argument.
4211 */
4212# define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4213 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4214 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4215/**
4216 * Prototype version of IEM_CIMPL_DEF_5.
4217 */
4218# define IEM_CIMPL_PROTO_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4219 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4220 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4221/**
4222 * For calling a C instruction implementation function taking five extra
4223 * arguments.
4224 *
4225 * This special call macro adds default arguments to the call and allow us to
4226 * change these later.
4227 *
4228 * @param a_fn The name of the function.
4229 * @param a0 The name of the 1st argument.
4230 * @param a1 The name of the 2nd argument.
4231 * @param a2 The name of the 3rd argument.
4232 * @param a3 The name of the 4th argument.
4233 * @param a4 The name of the 5th argument.
4234 */
4235# define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
4236
4237/** @} */
4238
4239
4240/** @name Opcode Decoder Function Types.
4241 * @{ */
4242
4243/** @typedef PFNIEMOP
4244 * Pointer to an opcode decoder function.
4245 */
4246
4247/** @def FNIEMOP_DEF
4248 * Define an opcode decoder function.
4249 *
4250 * We're using macors for this so that adding and removing parameters as well as
4251 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
4252 *
4253 * @param a_Name The function name.
4254 */
4255
4256/** @typedef PFNIEMOPRM
4257 * Pointer to an opcode decoder function with RM byte.
4258 */
4259
4260/** @def FNIEMOPRM_DEF
4261 * Define an opcode decoder function with RM byte.
4262 *
4263 * We're using macors for this so that adding and removing parameters as well as
4264 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
4265 *
4266 * @param a_Name The function name.
4267 */
4268
4269#if defined(__GNUC__) && defined(RT_ARCH_X86)
4270typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
4271typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4272# define FNIEMOP_DEF(a_Name) \
4273 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
4274# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4275 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4276# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4277 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4278
4279#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
4280typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
4281typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4282# define FNIEMOP_DEF(a_Name) \
4283 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4284# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4285 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4286# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4287 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4288
4289#elif defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
4290typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4291typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4292# define FNIEMOP_DEF(a_Name) \
4293 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
4294# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4295 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4296# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4297 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4298
4299#else
4300typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4301typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4302# define FNIEMOP_DEF(a_Name) \
4303 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4304# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4305 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4306# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4307 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4308
4309#endif
4310#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
4311
4312/**
4313 * Call an opcode decoder function.
4314 *
4315 * We're using macors for this so that adding and removing parameters can be
4316 * done as we please. See FNIEMOP_DEF.
4317 */
4318#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
4319
4320/**
4321 * Call a common opcode decoder function taking one extra argument.
4322 *
4323 * We're using macors for this so that adding and removing parameters can be
4324 * done as we please. See FNIEMOP_DEF_1.
4325 */
4326#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
4327
4328/**
4329 * Call a common opcode decoder function taking one extra argument.
4330 *
4331 * We're using macors for this so that adding and removing parameters can be
4332 * done as we please. See FNIEMOP_DEF_1.
4333 */
4334#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
4335/** @} */
4336
4337
4338/** @name Misc Helpers
4339 * @{ */
4340
4341/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
4342 * due to GCC lacking knowledge about the value range of a switch. */
4343#if RT_CPLUSPLUS_PREREQ(202000)
4344# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: [[unlikely]] AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4345#else
4346# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4347#endif
4348
4349/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
4350#if RT_CPLUSPLUS_PREREQ(202000)
4351# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: [[unlikely]] AssertFailedReturn(a_RetValue)
4352#else
4353# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
4354#endif
4355
4356/**
4357 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4358 * occation.
4359 */
4360#ifdef LOG_ENABLED
4361# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4362 do { \
4363 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
4364 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4365 } while (0)
4366#else
4367# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4368 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4369#endif
4370
4371/**
4372 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4373 * occation using the supplied logger statement.
4374 *
4375 * @param a_LoggerArgs What to log on failure.
4376 */
4377#ifdef LOG_ENABLED
4378# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4379 do { \
4380 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
4381 /*LogFunc(a_LoggerArgs);*/ \
4382 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4383 } while (0)
4384#else
4385# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4386 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4387#endif
4388
4389/**
4390 * Gets the CPU mode (from fExec) as a IEMMODE value.
4391 *
4392 * @returns IEMMODE
4393 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4394 */
4395#define IEM_GET_CPU_MODE(a_pVCpu) ((a_pVCpu)->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK)
4396
4397/**
4398 * Check if we're currently executing in real or virtual 8086 mode.
4399 *
4400 * @returns @c true if it is, @c false if not.
4401 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4402 */
4403#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (( ((a_pVCpu)->iem.s.fExec ^ IEM_F_MODE_X86_PROT_MASK) \
4404 & (IEM_F_MODE_X86_V86_MASK | IEM_F_MODE_X86_PROT_MASK)) != 0)
4405
4406/**
4407 * Check if we're currently executing in virtual 8086 mode.
4408 *
4409 * @returns @c true if it is, @c false if not.
4410 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4411 */
4412#define IEM_IS_V86_MODE(a_pVCpu) (((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_V86_MASK) != 0)
4413
4414/**
4415 * Check if we're currently executing in long mode.
4416 *
4417 * @returns @c true if it is, @c false if not.
4418 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4419 */
4420#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
4421
4422/**
4423 * Check if we're currently executing in a 16-bit code segment.
4424 *
4425 * @returns @c true if it is, @c false if not.
4426 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4427 */
4428#define IEM_IS_16BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_16BIT)
4429
4430/**
4431 * Check if we're currently executing in a 32-bit code segment.
4432 *
4433 * @returns @c true if it is, @c false if not.
4434 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4435 */
4436#define IEM_IS_32BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_32BIT)
4437
4438/**
4439 * Check if we're currently executing in a 64-bit code segment.
4440 *
4441 * @returns @c true if it is, @c false if not.
4442 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4443 */
4444#define IEM_IS_64BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_64BIT)
4445
4446/**
4447 * Check if we're currently executing in real mode.
4448 *
4449 * @returns @c true if it is, @c false if not.
4450 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4451 */
4452#define IEM_IS_REAL_MODE(a_pVCpu) (!((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_PROT_MASK))
4453
4454/**
4455 * Gets the current protection level (CPL).
4456 *
4457 * @returns 0..3
4458 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4459 */
4460#define IEM_GET_CPL(a_pVCpu) (((a_pVCpu)->iem.s.fExec >> IEM_F_X86_CPL_SHIFT) & IEM_F_X86_CPL_SMASK)
4461
4462/**
4463 * Sets the current protection level (CPL).
4464 *
4465 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4466 */
4467#define IEM_SET_CPL(a_pVCpu, a_uCpl) \
4468 do { (a_pVCpu)->iem.s.fExec = ((a_pVCpu)->iem.s.fExec & ~IEM_F_X86_CPL_MASK) | ((a_uCpl) << IEM_F_X86_CPL_SHIFT); } while (0)
4469
4470/**
4471 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
4472 * @returns PCCPUMFEATURES
4473 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4474 */
4475#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
4476
4477/**
4478 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
4479 * @returns PCCPUMFEATURES
4480 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4481 */
4482#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&g_CpumHostFeatures.s)
4483
4484/**
4485 * Evaluates to true if we're presenting an Intel CPU to the guest.
4486 */
4487#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
4488
4489/**
4490 * Evaluates to true if we're presenting an AMD CPU to the guest.
4491 */
4492#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
4493
4494/**
4495 * Check if the address is canonical.
4496 */
4497#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
4498
4499/** Checks if the ModR/M byte is in register mode or not. */
4500#define IEM_IS_MODRM_REG_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT) )
4501/** Checks if the ModR/M byte is in memory mode or not. */
4502#define IEM_IS_MODRM_MEM_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT) )
4503
4504/**
4505 * Gets the register (reg) part of a ModR/M encoding, with REX.R added in.
4506 *
4507 * For use during decoding.
4508 */
4509#define IEM_GET_MODRM_REG(a_pVCpu, a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | (a_pVCpu)->iem.s.uRexReg )
4510/**
4511 * Gets the r/m part of a ModR/M encoding as a register index, with REX.B added in.
4512 *
4513 * For use during decoding.
4514 */
4515#define IEM_GET_MODRM_RM(a_pVCpu, a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) | (a_pVCpu)->iem.s.uRexB )
4516
4517/**
4518 * Gets the register (reg) part of a ModR/M encoding, without REX.R.
4519 *
4520 * For use during decoding.
4521 */
4522#define IEM_GET_MODRM_REG_8(a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) )
4523/**
4524 * Gets the r/m part of a ModR/M encoding as a register index, without REX.B.
4525 *
4526 * For use during decoding.
4527 */
4528#define IEM_GET_MODRM_RM_8(a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) )
4529
4530/**
4531 * Gets the register (reg) part of a ModR/M encoding as an extended 8-bit
4532 * register index, with REX.R added in.
4533 *
4534 * For use during decoding.
4535 *
4536 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
4537 */
4538#define IEM_GET_MODRM_REG_EX8(a_pVCpu, a_bRm) \
4539 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
4540 || !((a_bRm) & (4 << X86_MODRM_REG_SHIFT)) /* IEM_GET_MODRM_REG(pVCpu, a_bRm) < 4 */ \
4541 ? IEM_GET_MODRM_REG(pVCpu, a_bRm) : (((a_bRm) >> X86_MODRM_REG_SHIFT) & 3) | 16)
4542/**
4543 * Gets the r/m part of a ModR/M encoding as an extended 8-bit register index,
4544 * with REX.B added in.
4545 *
4546 * For use during decoding.
4547 *
4548 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
4549 */
4550#define IEM_GET_MODRM_RM_EX8(a_pVCpu, a_bRm) \
4551 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
4552 || !((a_bRm) & 4) /* IEM_GET_MODRM_RM(pVCpu, a_bRm) < 4 */ \
4553 ? IEM_GET_MODRM_RM(pVCpu, a_bRm) : ((a_bRm) & 3) | 16)
4554
4555/**
4556 * Combines the prefix REX and ModR/M byte for passing to
4557 * iemOpHlpCalcRmEffAddrThreadedAddr64().
4558 *
4559 * @returns The ModRM byte but with bit 3 set to REX.B and bit 4 to REX.X.
4560 * The two bits are part of the REG sub-field, which isn't needed in
4561 * iemOpHlpCalcRmEffAddrThreadedAddr64().
4562 *
4563 * For use during decoding/recompiling.
4564 */
4565#define IEM_GET_MODRM_EX(a_pVCpu, a_bRm) \
4566 ( ((a_bRm) & ~X86_MODRM_REG_MASK) \
4567 | (uint8_t)( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X)) >> (26 - 3) ) )
4568AssertCompile(IEM_OP_PRF_REX_B == RT_BIT_32(26));
4569AssertCompile(IEM_OP_PRF_REX_X == RT_BIT_32(27));
4570
4571/**
4572 * Gets the effective VEX.VVVV value.
4573 *
4574 * The 4th bit is ignored if not 64-bit code.
4575 * @returns effective V-register value.
4576 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4577 */
4578#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
4579 (IEM_IS_64BIT_CODE(a_pVCpu) ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
4580
4581
4582/**
4583 * Checks if we're executing inside an AMD-V or VT-x guest.
4584 */
4585#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
4586# define IEM_IS_IN_GUEST(a_pVCpu) RT_BOOL((a_pVCpu)->iem.s.fExec & IEM_F_X86_CTX_IN_GUEST)
4587#else
4588# define IEM_IS_IN_GUEST(a_pVCpu) false
4589#endif
4590
4591
4592#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4593
4594/**
4595 * Check if the guest has entered VMX root operation.
4596 */
4597# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
4598
4599/**
4600 * Check if the guest has entered VMX non-root operation.
4601 */
4602# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) ( ((a_pVCpu)->iem.s.fExec & (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST)) \
4603 == (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST) )
4604
4605/**
4606 * Check if the nested-guest has the given Pin-based VM-execution control set.
4607 */
4608# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
4609
4610/**
4611 * Check if the nested-guest has the given Processor-based VM-execution control set.
4612 */
4613# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
4614
4615/**
4616 * Check if the nested-guest has the given Secondary Processor-based VM-execution
4617 * control set.
4618 */
4619# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
4620
4621/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
4622# define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
4623
4624/** Whether a shadow VMCS is present for the given VCPU. */
4625# define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
4626
4627/** Gets the VMXON region pointer. */
4628# define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
4629
4630/** Gets the guest-physical address of the current VMCS for the given VCPU. */
4631# define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
4632
4633/** Whether a current VMCS is present for the given VCPU. */
4634# define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
4635
4636/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
4637# define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
4638 do \
4639 { \
4640 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
4641 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
4642 } while (0)
4643
4644/** Clears any current VMCS for the given VCPU. */
4645# define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
4646 do \
4647 { \
4648 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
4649 } while (0)
4650
4651/**
4652 * Invokes the VMX VM-exit handler for an instruction intercept.
4653 */
4654# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
4655 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
4656
4657/**
4658 * Invokes the VMX VM-exit handler for an instruction intercept where the
4659 * instruction provides additional VM-exit information.
4660 */
4661# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
4662 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
4663
4664/**
4665 * Invokes the VMX VM-exit handler for a task switch.
4666 */
4667# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
4668 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
4669
4670/**
4671 * Invokes the VMX VM-exit handler for MWAIT.
4672 */
4673# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
4674 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
4675
4676/**
4677 * Invokes the VMX VM-exit handler for EPT faults.
4678 */
4679# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
4680 do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
4681
4682/**
4683 * Invokes the VMX VM-exit handler.
4684 */
4685# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
4686 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
4687
4688#else
4689# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
4690# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
4691# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
4692# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
4693# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
4694# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4695# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4696# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4697# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4698# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4699# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
4700
4701#endif
4702
4703#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4704/**
4705 * Checks if we're executing a guest using AMD-V.
4706 */
4707# define IEM_SVM_IS_IN_GUEST(a_pVCpu) ( (a_pVCpu->iem.s.fExec & (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST)) \
4708 == (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST))
4709/**
4710 * Check if an SVM control/instruction intercept is set.
4711 */
4712# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
4713 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
4714
4715/**
4716 * Check if an SVM read CRx intercept is set.
4717 */
4718# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
4719 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
4720
4721/**
4722 * Check if an SVM write CRx intercept is set.
4723 */
4724# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
4725 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
4726
4727/**
4728 * Check if an SVM read DRx intercept is set.
4729 */
4730# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
4731 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
4732
4733/**
4734 * Check if an SVM write DRx intercept is set.
4735 */
4736# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
4737 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
4738
4739/**
4740 * Check if an SVM exception intercept is set.
4741 */
4742# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
4743 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
4744
4745/**
4746 * Invokes the SVM \#VMEXIT handler for the nested-guest.
4747 */
4748# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
4749 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
4750
4751/**
4752 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
4753 * corresponding decode assist information.
4754 */
4755# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
4756 do \
4757 { \
4758 uint64_t uExitInfo1; \
4759 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
4760 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
4761 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
4762 else \
4763 uExitInfo1 = 0; \
4764 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
4765 } while (0)
4766
4767/** Check and handles SVM nested-guest instruction intercept and updates
4768 * NRIP if needed.
4769 */
4770# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
4771 do \
4772 { \
4773 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
4774 { \
4775 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
4776 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
4777 } \
4778 } while (0)
4779
4780/** Checks and handles SVM nested-guest CR0 read intercept. */
4781# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
4782 do \
4783 { \
4784 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
4785 { /* probably likely */ } \
4786 else \
4787 { \
4788 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
4789 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
4790 } \
4791 } while (0)
4792
4793/**
4794 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
4795 */
4796# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) \
4797 do { \
4798 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
4799 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_cbInstr)); \
4800 } while (0)
4801
4802#else
4803# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
4804# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
4805# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
4806# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
4807# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
4808# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
4809# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
4810# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
4811# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, \
4812 a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
4813# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
4814# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) do { } while (0)
4815
4816#endif
4817
4818/** @} */
4819
4820uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu);
4821VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu);
4822
4823
4824/**
4825 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
4826 */
4827typedef union IEMSELDESC
4828{
4829 /** The legacy view. */
4830 X86DESC Legacy;
4831 /** The long mode view. */
4832 X86DESC64 Long;
4833} IEMSELDESC;
4834/** Pointer to a selector descriptor table entry. */
4835typedef IEMSELDESC *PIEMSELDESC;
4836
4837/** @name Raising Exceptions.
4838 * @{ */
4839VBOXSTRICTRC iemTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, uint32_t uNextEip, uint32_t fFlags,
4840 uint16_t uErr, uint64_t uCr2, RTSEL SelTSS, PIEMSELDESC pNewDescTSS) RT_NOEXCEPT;
4841
4842VBOXSTRICTRC iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
4843 uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
4844#ifdef IEM_WITH_SETJMP
4845DECL_NO_RETURN(void) iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
4846 uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
4847#endif
4848VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
4849VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
4850VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT;
4851VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
4852VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT;
4853VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4854VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT;
4855VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
4856VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
4857/*VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;*/
4858VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4859VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
4860VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
4861VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4862VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4863VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
4864#ifdef IEM_WITH_SETJMP
4865DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
4866#endif
4867VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
4868VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT;
4869VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
4870#ifdef IEM_WITH_SETJMP
4871DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
4872#endif
4873VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
4874#ifdef IEM_WITH_SETJMP
4875DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP;
4876#endif
4877VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
4878#ifdef IEM_WITH_SETJMP
4879DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
4880#endif
4881VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
4882#ifdef IEM_WITH_SETJMP
4883DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
4884#endif
4885VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
4886VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
4887#ifdef IEM_WITH_SETJMP
4888DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
4889#endif
4890VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
4891
4892void iemLogSyscallProtModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
4893
4894IEM_CIMPL_DEF_0(iemCImplRaiseDivideError);
4895IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix);
4896IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode);
4897
4898/**
4899 * Macro for calling iemCImplRaiseDivideError().
4900 *
4901 * This is for things that will _always_ decode to an \#DE, taking the
4902 * recompiler into consideration and everything.
4903 *
4904 * @return Strict VBox status code.
4905 */
4906#define IEMOP_RAISE_DIVIDE_ERROR_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseDivideError)
4907
4908/**
4909 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4910 *
4911 * This is for things that will _always_ decode to an \#UD, taking the
4912 * recompiler into consideration and everything.
4913 *
4914 * @return Strict VBox status code.
4915 */
4916#define IEMOP_RAISE_INVALID_LOCK_PREFIX_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidLockPrefix)
4917
4918/**
4919 * Macro for calling iemCImplRaiseInvalidOpcode() for decode/static \#UDs.
4920 *
4921 * This is for things that will _always_ decode to an \#UD, taking the
4922 * recompiler into consideration and everything.
4923 *
4924 * @return Strict VBox status code.
4925 */
4926#define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidOpcode)
4927
4928/**
4929 * Macro for calling iemCImplRaiseInvalidOpcode() for runtime-style \#UDs.
4930 *
4931 * Using this macro means you've got _buggy_ _code_ and are doing things that
4932 * belongs exclusively in IEMAllCImpl.cpp during decoding.
4933 *
4934 * @return Strict VBox status code.
4935 * @see IEMOP_RAISE_INVALID_OPCODE_RET
4936 */
4937#define IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, iemCImplRaiseInvalidOpcode)
4938
4939/** @} */
4940
4941/** @name Register Access.
4942 * @{ */
4943VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
4944 IEMMODE enmEffOpSize) RT_NOEXCEPT;
4945VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT;
4946VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
4947 IEMMODE enmEffOpSize) RT_NOEXCEPT;
4948VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewRip) RT_NOEXCEPT;
4949VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewRip) RT_NOEXCEPT;
4950VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT;
4951/** @} */
4952
4953/** @name FPU access and helpers.
4954 * @{ */
4955void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
4956void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4957void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
4958void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
4959void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
4960void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4961 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4962void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4963 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4964void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
4965void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
4966void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
4967void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4968void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
4969void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4970void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
4971void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4972void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
4973void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4974void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
4975void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
4976void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
4977void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
4978void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
4979/** @} */
4980
4981/** @name SSE+AVX SIMD access and helpers.
4982 * @{ */
4983void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT;
4984void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT;
4985/** @} */
4986
4987/** @name Memory access.
4988 * @{ */
4989
4990/** Report a \#GP instead of \#AC and do not restrict to ring-3 */
4991#define IEM_MEMMAP_F_ALIGN_GP RT_BIT_32(16)
4992/** SSE access that should report a \#GP instead of \#AC, unless MXCSR.MM=1
4993 * when it works like normal \#AC. Always used with IEM_MEMMAP_F_ALIGN_GP. */
4994#define IEM_MEMMAP_F_ALIGN_SSE RT_BIT_32(17)
4995/** If \#AC is applicable, raise it. Always used with IEM_MEMMAP_F_ALIGN_GP.
4996 * Users include FXSAVE & FXRSTOR. */
4997#define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18)
4998
4999VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5000 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
5001VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
5002#ifndef IN_RING3
5003VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT;
5004#endif
5005void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
5006VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
5007VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5008VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT;
5009
5010void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr);
5011void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr);
5012#ifdef IEM_WITH_CODE_TLB
5013void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP;
5014#else
5015VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT;
5016#endif
5017#ifdef IEM_WITH_SETJMP
5018uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5019uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5020uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5021uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5022#else
5023VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT;
5024VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5025VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5026VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5027VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5028VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5029VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5030VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5031VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5032VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5033VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5034#endif
5035
5036VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5037VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5038VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5039VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5040VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5041VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5042VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5043VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5044VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5045VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5046VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5047VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5048VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
5049 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT;
5050#ifdef IEM_WITH_SETJMP
5051uint8_t iemMemFetchDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5052uint16_t iemMemFetchDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5053uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5054uint32_t iemMemFlatFetchDataU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5055uint64_t iemMemFetchDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5056uint64_t iemMemFetchDataU64AlignedU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5057void iemMemFetchDataR80SafeJmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5058void iemMemFetchDataD80SafeJmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5059void iemMemFetchDataU128SafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5060void iemMemFetchDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5061void iemMemFetchDataU256SafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5062void iemMemFetchDataU256AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5063# if 0 /* these are inlined now */
5064uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5065uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5066uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5067uint32_t iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5068uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5069uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5070# endif
5071void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5072void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5073void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5074void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5075void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5076void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5077#endif
5078
5079VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5080VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5081VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5082VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5083VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT;
5084
5085VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT;
5086VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT;
5087VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT;
5088VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT;
5089VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5090VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5091VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5092VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5093VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5094#ifdef IEM_WITH_SETJMP
5095void iemMemStoreDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5096void iemMemStoreDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5097void iemMemStoreDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5098void iemMemStoreDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5099void iemMemStoreDataU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5100void iemMemStoreDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5101void iemMemStoreDataU256SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5102void iemMemStoreDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5103#if 0
5104void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5105void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5106void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5107void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5108#endif
5109void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5110void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5111void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5112void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5113#endif
5114
5115#ifdef IEM_WITH_SETJMP
5116uint8_t *iemMemMapDataU8RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5117uint8_t *iemMemMapDataU8WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5118uint8_t const *iemMemMapDataU8RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5119uint16_t *iemMemMapDataU16RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5120uint16_t *iemMemMapDataU16WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5121uint16_t const *iemMemMapDataU16RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5122uint32_t *iemMemMapDataU32RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5123uint32_t *iemMemMapDataU32WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5124uint32_t const *iemMemMapDataU32RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5125uint64_t *iemMemMapDataU64RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5126uint64_t *iemMemMapDataU64WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5127uint64_t const *iemMemMapDataU64RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5128
5129void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5130void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5131void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5132#endif
5133
5134VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5135 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
5136VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT;
5137VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
5138VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5139VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT;
5140VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5141VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5142VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5143VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5144VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5145 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT;
5146VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
5147 void const **ppvMem, uint64_t uCurNewRsp) RT_NOEXCEPT;
5148VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT;
5149VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT;
5150VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT;
5151VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT;
5152VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5153VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5154VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5155
5156#ifdef IEM_WITH_SETJMP
5157void iemMemStackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5158void iemMemStackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5159void iemMemStackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5160void iemMemStackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5161uint16_t iemMemStackPopU16SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5162uint32_t iemMemStackPopU32SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5163uint64_t iemMemStackPopU64SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5164
5165void iemMemFlat32StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5166void iemMemFlat32StackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5167void iemMemFlat32StackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5168uint16_t iemMemFlat32StackPopU16SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5169uint32_t iemMemFlat32StackPopU32SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5170
5171void iemMemFlat64StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5172void iemMemFlat64StackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5173uint16_t iemMemFlat64StackPopU16SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5174uint64_t iemMemFlat64StackPopU64SafeJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5175#endif
5176
5177/** @} */
5178
5179/** @name IEMAllCImpl.cpp
5180 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/'
5181 * @{ */
5182IEM_CIMPL_PROTO_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5183IEM_CIMPL_PROTO_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5184IEM_CIMPL_PROTO_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5185IEM_CIMPL_PROTO_0(iemCImpl_popa_16);
5186IEM_CIMPL_PROTO_0(iemCImpl_popa_32);
5187IEM_CIMPL_PROTO_0(iemCImpl_pusha_16);
5188IEM_CIMPL_PROTO_0(iemCImpl_pusha_32);
5189IEM_CIMPL_PROTO_1(iemCImpl_pushf, IEMMODE, enmEffOpSize);
5190IEM_CIMPL_PROTO_1(iemCImpl_popf, IEMMODE, enmEffOpSize);
5191IEM_CIMPL_PROTO_1(iemCImpl_call_16, uint16_t, uNewPC);
5192IEM_CIMPL_PROTO_1(iemCImpl_call_rel_16, int16_t, offDisp);
5193IEM_CIMPL_PROTO_1(iemCImpl_call_32, uint32_t, uNewPC);
5194IEM_CIMPL_PROTO_1(iemCImpl_call_rel_32, int32_t, offDisp);
5195IEM_CIMPL_PROTO_1(iemCImpl_call_64, uint64_t, uNewPC);
5196IEM_CIMPL_PROTO_1(iemCImpl_call_rel_64, int64_t, offDisp);
5197IEM_CIMPL_PROTO_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5198IEM_CIMPL_PROTO_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5199typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5200typedef FNIEMCIMPLFARBRANCH *PFNIEMCIMPLFARBRANCH;
5201IEM_CIMPL_PROTO_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop);
5202IEM_CIMPL_PROTO_0(iemCImpl_retn_16);
5203IEM_CIMPL_PROTO_0(iemCImpl_retn_32);
5204IEM_CIMPL_PROTO_0(iemCImpl_retn_64);
5205IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_16, uint16_t, cbPop);
5206IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_32, uint16_t, cbPop);
5207IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_64, uint16_t, cbPop);
5208IEM_CIMPL_PROTO_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters);
5209IEM_CIMPL_PROTO_1(iemCImpl_leave, IEMMODE, enmEffOpSize);
5210IEM_CIMPL_PROTO_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt);
5211IEM_CIMPL_PROTO_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize);
5212IEM_CIMPL_PROTO_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp);
5213IEM_CIMPL_PROTO_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize);
5214IEM_CIMPL_PROTO_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize);
5215IEM_CIMPL_PROTO_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize);
5216IEM_CIMPL_PROTO_1(iemCImpl_iret, IEMMODE, enmEffOpSize);
5217IEM_CIMPL_PROTO_0(iemCImpl_loadall286);
5218IEM_CIMPL_PROTO_0(iemCImpl_syscall);
5219IEM_CIMPL_PROTO_1(iemCImpl_sysret, IEMMODE, enmEffOpSize);
5220IEM_CIMPL_PROTO_0(iemCImpl_sysenter);
5221IEM_CIMPL_PROTO_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize);
5222IEM_CIMPL_PROTO_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel);
5223IEM_CIMPL_PROTO_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel);
5224IEM_CIMPL_PROTO_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize);
5225IEM_CIMPL_PROTO_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize);
5226IEM_CIMPL_PROTO_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite);
5227IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar);
5228IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar);
5229IEM_CIMPL_PROTO_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5230IEM_CIMPL_PROTO_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5231IEM_CIMPL_PROTO_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5232IEM_CIMPL_PROTO_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5233IEM_CIMPL_PROTO_1(iemCImpl_lldt, uint16_t, uNewLdt);
5234IEM_CIMPL_PROTO_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5235IEM_CIMPL_PROTO_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5236IEM_CIMPL_PROTO_1(iemCImpl_ltr, uint16_t, uNewTr);
5237IEM_CIMPL_PROTO_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5238IEM_CIMPL_PROTO_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5239IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg);
5240IEM_CIMPL_PROTO_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5241IEM_CIMPL_PROTO_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5242IEM_CIMPL_PROTO_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg);
5243IEM_CIMPL_PROTO_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg);
5244IEM_CIMPL_PROTO_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst);
5245IEM_CIMPL_PROTO_0(iemCImpl_clts);
5246IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg);
5247IEM_CIMPL_PROTO_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg);
5248IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg);
5249IEM_CIMPL_PROTO_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg);
5250IEM_CIMPL_PROTO_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage);
5251IEM_CIMPL_PROTO_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType);
5252IEM_CIMPL_PROTO_0(iemCImpl_invd);
5253IEM_CIMPL_PROTO_0(iemCImpl_wbinvd);
5254IEM_CIMPL_PROTO_0(iemCImpl_rsm);
5255IEM_CIMPL_PROTO_0(iemCImpl_rdtsc);
5256IEM_CIMPL_PROTO_0(iemCImpl_rdtscp);
5257IEM_CIMPL_PROTO_0(iemCImpl_rdpmc);
5258IEM_CIMPL_PROTO_0(iemCImpl_rdmsr);
5259IEM_CIMPL_PROTO_0(iemCImpl_wrmsr);
5260IEM_CIMPL_PROTO_3(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5261IEM_CIMPL_PROTO_2(iemCImpl_in_eAX_DX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5262IEM_CIMPL_PROTO_3(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5263IEM_CIMPL_PROTO_2(iemCImpl_out_DX_eAX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5264IEM_CIMPL_PROTO_0(iemCImpl_cli);
5265IEM_CIMPL_PROTO_0(iemCImpl_sti);
5266IEM_CIMPL_PROTO_0(iemCImpl_hlt);
5267IEM_CIMPL_PROTO_1(iemCImpl_monitor, uint8_t, iEffSeg);
5268IEM_CIMPL_PROTO_0(iemCImpl_mwait);
5269IEM_CIMPL_PROTO_0(iemCImpl_swapgs);
5270IEM_CIMPL_PROTO_0(iemCImpl_cpuid);
5271IEM_CIMPL_PROTO_1(iemCImpl_aad, uint8_t, bImm);
5272IEM_CIMPL_PROTO_1(iemCImpl_aam, uint8_t, bImm);
5273IEM_CIMPL_PROTO_0(iemCImpl_daa);
5274IEM_CIMPL_PROTO_0(iemCImpl_das);
5275IEM_CIMPL_PROTO_0(iemCImpl_aaa);
5276IEM_CIMPL_PROTO_0(iemCImpl_aas);
5277IEM_CIMPL_PROTO_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound);
5278IEM_CIMPL_PROTO_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound);
5279IEM_CIMPL_PROTO_0(iemCImpl_xgetbv);
5280IEM_CIMPL_PROTO_0(iemCImpl_xsetbv);
5281IEM_CIMPL_PROTO_4(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
5282 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags);
5283IEM_CIMPL_PROTO_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5284IEM_CIMPL_PROTO_1(iemCImpl_finit, bool, fCheckXcpts);
5285IEM_CIMPL_PROTO_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5286IEM_CIMPL_PROTO_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5287IEM_CIMPL_PROTO_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5288IEM_CIMPL_PROTO_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5289IEM_CIMPL_PROTO_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5290IEM_CIMPL_PROTO_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5291IEM_CIMPL_PROTO_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5292IEM_CIMPL_PROTO_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5293IEM_CIMPL_PROTO_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5294IEM_CIMPL_PROTO_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5295IEM_CIMPL_PROTO_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5296IEM_CIMPL_PROTO_1(iemCImpl_fldcw, uint16_t, u16Fcw);
5297IEM_CIMPL_PROTO_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode);
5298IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode);
5299IEM_CIMPL_PROTO_2(iemCImpl_rdseed, uint8_t, iReg, IEMMODE, enmEffOpSize);
5300IEM_CIMPL_PROTO_2(iemCImpl_rdrand, uint8_t, iReg, IEMMODE, enmEffOpSize);
5301/** @} */
5302
5303/** @name IEMAllCImplStrInstr.cpp.h
5304 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/' -e 's/RT_CONCAT4(//' \
5305 * -e 's/,ADDR_SIZE)/64/g' -e 's/,OP_SIZE,/64/g' -e 's/,OP_rAX,/rax/g' IEMAllCImplStrInstr.cpp.h
5306 * @{ */
5307IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr16, uint8_t, iEffSeg);
5308IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr16, uint8_t, iEffSeg);
5309IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m16);
5310IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m16);
5311IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr16, uint8_t, iEffSeg);
5312IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m16);
5313IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m16, int8_t, iEffSeg);
5314IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr16, bool, fIoChecked);
5315IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr16, bool, fIoChecked);
5316IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5317IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5318
5319IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr16, uint8_t, iEffSeg);
5320IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr16, uint8_t, iEffSeg);
5321IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m16);
5322IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m16);
5323IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr16, uint8_t, iEffSeg);
5324IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m16);
5325IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m16, int8_t, iEffSeg);
5326IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr16, bool, fIoChecked);
5327IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr16, bool, fIoChecked);
5328IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5329IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5330
5331IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr16, uint8_t, iEffSeg);
5332IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr16, uint8_t, iEffSeg);
5333IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m16);
5334IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m16);
5335IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr16, uint8_t, iEffSeg);
5336IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m16);
5337IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m16, int8_t, iEffSeg);
5338IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr16, bool, fIoChecked);
5339IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr16, bool, fIoChecked);
5340IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5341IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5342
5343
5344IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr32, uint8_t, iEffSeg);
5345IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr32, uint8_t, iEffSeg);
5346IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m32);
5347IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m32);
5348IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr32, uint8_t, iEffSeg);
5349IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m32);
5350IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m32, int8_t, iEffSeg);
5351IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr32, bool, fIoChecked);
5352IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr32, bool, fIoChecked);
5353IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5354IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5355
5356IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr32, uint8_t, iEffSeg);
5357IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr32, uint8_t, iEffSeg);
5358IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m32);
5359IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m32);
5360IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr32, uint8_t, iEffSeg);
5361IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m32);
5362IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m32, int8_t, iEffSeg);
5363IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr32, bool, fIoChecked);
5364IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr32, bool, fIoChecked);
5365IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5366IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5367
5368IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr32, uint8_t, iEffSeg);
5369IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr32, uint8_t, iEffSeg);
5370IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m32);
5371IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m32);
5372IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr32, uint8_t, iEffSeg);
5373IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m32);
5374IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m32, int8_t, iEffSeg);
5375IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr32, bool, fIoChecked);
5376IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr32, bool, fIoChecked);
5377IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5378IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5379
5380IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr32, uint8_t, iEffSeg);
5381IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr32, uint8_t, iEffSeg);
5382IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m32);
5383IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m32);
5384IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr32, uint8_t, iEffSeg);
5385IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m32);
5386IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m32, int8_t, iEffSeg);
5387IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr32, bool, fIoChecked);
5388IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr32, bool, fIoChecked);
5389IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5390IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5391
5392
5393IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr64, uint8_t, iEffSeg);
5394IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr64, uint8_t, iEffSeg);
5395IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m64);
5396IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m64);
5397IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr64, uint8_t, iEffSeg);
5398IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m64);
5399IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m64, int8_t, iEffSeg);
5400IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr64, bool, fIoChecked);
5401IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr64, bool, fIoChecked);
5402IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5403IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5404
5405IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr64, uint8_t, iEffSeg);
5406IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr64, uint8_t, iEffSeg);
5407IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m64);
5408IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m64);
5409IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr64, uint8_t, iEffSeg);
5410IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m64);
5411IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m64, int8_t, iEffSeg);
5412IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr64, bool, fIoChecked);
5413IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr64, bool, fIoChecked);
5414IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5415IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5416
5417IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr64, uint8_t, iEffSeg);
5418IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr64, uint8_t, iEffSeg);
5419IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m64);
5420IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m64);
5421IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr64, uint8_t, iEffSeg);
5422IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m64);
5423IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m64, int8_t, iEffSeg);
5424IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr64, bool, fIoChecked);
5425IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr64, bool, fIoChecked);
5426IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5427IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5428
5429IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr64, uint8_t, iEffSeg);
5430IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr64, uint8_t, iEffSeg);
5431IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m64);
5432IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m64);
5433IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr64, uint8_t, iEffSeg);
5434IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m64);
5435IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m64, int8_t, iEffSeg);
5436IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr64, bool, fIoChecked);
5437IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr64, bool, fIoChecked);
5438IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5439IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5440/** @} */
5441
5442#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5443VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual) RT_NOEXCEPT;
5444VBOXSTRICTRC iemVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr) RT_NOEXCEPT;
5445VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr) RT_NOEXCEPT;
5446VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr) RT_NOEXCEPT;
5447VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr) RT_NOEXCEPT;
5448VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
5449VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT;
5450VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu) RT_NOEXCEPT;
5451VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPUCC pVCpu, bool fMonitorHwArmed, uint8_t cbInstr) RT_NOEXCEPT;
5452VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port,
5453 bool fImm, uint8_t cbAccess, uint8_t cbInstr) RT_NOEXCEPT;
5454VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess,
5455 bool fRep, VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr) RT_NOEXCEPT;
5456VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5457VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5458VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5459VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPUCC pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5460VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5461VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPUCC pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5462VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT;
5463VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPUCC pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw,
5464 RTGCPTR GCPtrEffDst, uint8_t cbInstr) RT_NOEXCEPT;
5465VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr) RT_NOEXCEPT;
5466VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPUCC pVCpu) RT_NOEXCEPT;
5467VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess, size_t cbAccess, uint32_t fAccess) RT_NOEXCEPT;
5468uint32_t iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg) RT_NOEXCEPT;
5469void iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg) RT_NOEXCEPT;
5470VBOXSTRICTRC iemVmxInvvpid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
5471 uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT;
5472bool iemVmxIsRdmsrWrmsrInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr) RT_NOEXCEPT;
5473IEM_CIMPL_PROTO_0(iemCImpl_vmxoff);
5474IEM_CIMPL_PROTO_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon);
5475IEM_CIMPL_PROTO_0(iemCImpl_vmlaunch);
5476IEM_CIMPL_PROTO_0(iemCImpl_vmresume);
5477IEM_CIMPL_PROTO_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5478IEM_CIMPL_PROTO_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5479IEM_CIMPL_PROTO_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5480IEM_CIMPL_PROTO_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64VmcsField);
5481IEM_CIMPL_PROTO_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64VmcsField);
5482IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg64, uint64_t *, pu64Dst, uint64_t, u64VmcsField);
5483IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg32, uint64_t *, pu32Dst, uint32_t, u32VmcsField);
5484IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64VmcsField);
5485IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32VmcsField);
5486IEM_CIMPL_PROTO_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType);
5487IEM_CIMPL_PROTO_3(iemCImpl_invept, uint8_t, iEffSeg, RTGCPTR, GCPtrInveptDesc, uint64_t, uInveptType);
5488IEM_CIMPL_PROTO_0(iemCImpl_vmx_pause);
5489#endif
5490
5491#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5492VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) RT_NOEXCEPT;
5493VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2) RT_NOEXCEPT;
5494VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPUCC pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
5495 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) RT_NOEXCEPT;
5496VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPUCC pVCpu, uint32_t idMsr, bool fWrite, uint8_t cbInstr) RT_NOEXCEPT;
5497IEM_CIMPL_PROTO_0(iemCImpl_vmrun);
5498IEM_CIMPL_PROTO_0(iemCImpl_vmload);
5499IEM_CIMPL_PROTO_0(iemCImpl_vmsave);
5500IEM_CIMPL_PROTO_0(iemCImpl_clgi);
5501IEM_CIMPL_PROTO_0(iemCImpl_stgi);
5502IEM_CIMPL_PROTO_0(iemCImpl_invlpga);
5503IEM_CIMPL_PROTO_0(iemCImpl_skinit);
5504IEM_CIMPL_PROTO_0(iemCImpl_svm_pause);
5505#endif
5506
5507IEM_CIMPL_PROTO_0(iemCImpl_vmcall); /* vmx */
5508IEM_CIMPL_PROTO_0(iemCImpl_vmmcall); /* svm */
5509IEM_CIMPL_PROTO_1(iemCImpl_Hypercall, uint16_t, uDisOpcode); /* both */
5510
5511extern const PFNIEMOP g_apfnIemInterpretOnlyOneByteMap[256];
5512extern const PFNIEMOP g_apfnIemInterpretOnlyTwoByteMap[1024];
5513extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f3a[1024];
5514extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f38[1024];
5515extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap1[1024];
5516extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap2[1024];
5517extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap3[1024];
5518
5519/*
5520 * Recompiler related stuff.
5521 */
5522extern const PFNIEMOP g_apfnIemThreadedRecompilerOneByteMap[256];
5523extern const PFNIEMOP g_apfnIemThreadedRecompilerTwoByteMap[1024];
5524extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f3a[1024];
5525extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f38[1024];
5526extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap1[1024];
5527extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap2[1024];
5528extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap3[1024];
5529
5530DECLCALLBACK(int) iemTbInit(PVMCC pVM, uint32_t cInitialTbs, uint32_t cMaxTbs,
5531 uint64_t cbInitialExec, uint64_t cbMaxExec, uint32_t cbChunkExec);
5532void iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb, bool fSafeToFree);
5533void iemTbAllocatorProcessDelayedFrees(PVMCPU pVCpu, PIEMTBALLOCATOR pTbAllocator);
5534
5535
5536/** @todo FNIEMTHREADEDFUNC and friends may need more work... */
5537#if defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
5538typedef VBOXSTRICTRC /*__attribute__((__nothrow__))*/ FNIEMTHREADEDFUNC(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
5539typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
5540# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
5541 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
5542# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
5543 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
5544
5545#else
5546typedef VBOXSTRICTRC (FNIEMTHREADEDFUNC)(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
5547typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
5548# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
5549 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
5550# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
5551 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
5552#endif
5553
5554
5555IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_DeferToCImpl0);
5556
5557IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckIrq);
5558IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckMode);
5559IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckHwInstrBps);
5560IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLim);
5561
5562IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes);
5563IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodes);
5564IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim);
5565
5566/* Branching: */
5567IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes);
5568IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodes);
5569IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim);
5570
5571IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb);
5572IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb);
5573IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim);
5574
5575/* Natural page crossing: */
5576IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb);
5577IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb);
5578IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim);
5579
5580IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb);
5581IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb);
5582IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim);
5583
5584IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb);
5585IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb);
5586IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim);
5587
5588bool iemThreadedCompileEmitIrqCheckBefore(PVMCPUCC pVCpu, PIEMTB pTb);
5589bool iemThreadedCompileBeginEmitCallsComplications(PVMCPUCC pVCpu, PIEMTB pTb);
5590
5591/* Native recompiler public bits: */
5592DECLHIDDEN(PIEMTB) iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT;
5593int iemExecMemAllocatorInit(PVMCPU pVCpu, uint64_t cbMax, uint64_t cbInitial, uint32_t cbChunk);
5594void iemExecMemAllocatorFree(PVMCPU pVCpu, void *pv, size_t cb);
5595
5596
5597/** @} */
5598
5599RT_C_DECLS_END
5600
5601#endif /* !VMM_INCLUDED_SRC_include_IEMInternal_h */
5602
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette