VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInternal.h@ 102700

Last change on this file since 102700 was 102663, checked in by vboxsync, 16 months ago

VMM/IEM: Working on BODY_CHECK_PC_AFTER_BRANCH and sideeffects of it. Fixed bug in 8-bit register stores (AMD64). Fixed bug in iemNativeEmitBltInCheckOpcodes (AMD64). Added a way to inject state logging between each instruction, currently only really implemented for AMD64. Relaxed the heave flushing code, no need to set the buffer pointer to NULL. Started looking at avoiding code TLB flushing when allocating memory to replace zero pages. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 288.6 KB
Line 
1/* $Id: IEMInternal.h 102663 2023-12-21 01:55:07Z vboxsync $ */
2/** @file
3 * IEM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInternal_h
29#define VMM_INCLUDED_SRC_include_IEMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/vmm/cpum.h>
35#include <VBox/vmm/iem.h>
36#include <VBox/vmm/pgm.h>
37#include <VBox/vmm/stam.h>
38#include <VBox/param.h>
39
40#include <iprt/setjmp-without-sigmask.h>
41#include <iprt/list.h>
42
43
44RT_C_DECLS_BEGIN
45
46
47/** @defgroup grp_iem_int Internals
48 * @ingroup grp_iem
49 * @internal
50 * @{
51 */
52
53/** For expanding symbol in slickedit and other products tagging and
54 * crossreferencing IEM symbols. */
55#ifndef IEM_STATIC
56# define IEM_STATIC static
57#endif
58
59/** @def IEM_WITH_SETJMP
60 * Enables alternative status code handling using setjmps.
61 *
62 * This adds a bit of expense via the setjmp() call since it saves all the
63 * non-volatile registers. However, it eliminates return code checks and allows
64 * for more optimal return value passing (return regs instead of stack buffer).
65 */
66#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
67# define IEM_WITH_SETJMP
68#endif
69
70/** @def IEM_WITH_THROW_CATCH
71 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
72 * mode code when IEM_WITH_SETJMP is in effect.
73 *
74 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
75 * setjmp/long resulted in bs2-test-1 running 3.00% faster and all but on test
76 * result value improving by more than 1%. (Best out of three.)
77 *
78 * With Visual C++ 2019 and code TLB on windows, using throw/catch instead of
79 * setjmp/long resulted in bs2-test-1 running 3.68% faster and all but some of
80 * the MMIO and CPUID tests ran noticeably faster. Variation is greater than on
81 * Linux, but it should be quite a bit faster for normal code.
82 */
83#if (defined(__cplusplus) && defined(IEM_WITH_SETJMP) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER))) \
84 || defined(DOXYGEN_RUNNING)
85# define IEM_WITH_THROW_CATCH
86#endif
87
88/** @def IEM_DO_LONGJMP
89 *
90 * Wrapper around longjmp / throw.
91 *
92 * @param a_pVCpu The CPU handle.
93 * @param a_rc The status code jump back with / throw.
94 */
95#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
96# ifdef IEM_WITH_THROW_CATCH
97# define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
98# else
99# define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
100# endif
101#endif
102
103/** For use with IEM function that may do a longjmp (when enabled).
104 *
105 * Visual C++ has trouble longjmp'ing from/over functions with the noexcept
106 * attribute. So, we indicate that function that may be part of a longjmp may
107 * throw "exceptions" and that the compiler should definitely not generate and
108 * std::terminate calling unwind code.
109 *
110 * Here is one example of this ending in std::terminate:
111 * @code{.txt}
11200 00000041`cadfda10 00007ffc`5d5a1f9f ucrtbase!abort+0x4e
11301 00000041`cadfda40 00007ffc`57af229a ucrtbase!terminate+0x1f
11402 00000041`cadfda70 00007ffb`eec91030 VCRUNTIME140!__std_terminate+0xa [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\ehhelpers.cpp @ 192]
11503 00000041`cadfdaa0 00007ffb`eec92c6d VCRUNTIME140_1!_CallSettingFrame+0x20 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\handlers.asm @ 50]
11604 00000041`cadfdad0 00007ffb`eec93ae5 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToState+0x241 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 1085]
11705 00000041`cadfdc00 00007ffb`eec92258 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToEmptyState+0x2d [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 218]
11806 00000041`cadfdc30 00007ffb`eec940e9 VCRUNTIME140_1!__InternalCxxFrameHandler<__FrameHandler4>+0x194 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 304]
11907 00000041`cadfdcd0 00007ffc`5f9f249f VCRUNTIME140_1!__CxxFrameHandler4+0xa9 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 290]
12008 00000041`cadfdd40 00007ffc`5f980939 ntdll!RtlpExecuteHandlerForUnwind+0xf
12109 00000041`cadfdd70 00007ffc`5f9a0edd ntdll!RtlUnwindEx+0x339
1220a 00000041`cadfe490 00007ffc`57aff976 ntdll!RtlUnwind+0xcd
1230b 00000041`cadfea00 00007ffb`e1b5de01 VCRUNTIME140!__longjmp_internal+0xe6 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\longjmp.asm @ 140]
1240c (Inline Function) --------`-------- VBoxVMM!iemOpcodeGetNextU8SlowJmp+0x95 [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 1155]
1250d 00000041`cadfea50 00007ffb`e1b60f6b VBoxVMM!iemOpcodeGetNextU8Jmp+0xc1 [L:\vbox-intern\src\VBox\VMM\include\IEMInline.h @ 402]
1260e 00000041`cadfea90 00007ffb`e1cc6201 VBoxVMM!IEMExecForExits+0xdb [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 10185]
1270f 00000041`cadfec70 00007ffb`e1d0df8d VBoxVMM!EMHistoryExec+0x4f1 [L:\vbox-intern\src\VBox\VMM\VMMAll\EMAll.cpp @ 452]
12810 00000041`cadfed60 00007ffb`e1d0d4c0 VBoxVMM!nemR3WinHandleExitCpuId+0x79d [L:\vbox-intern\src\VBox\VMM\VMMAll\NEMAllNativeTemplate-win.cpp.h @ 1829] @encode
129 @endcode
130 *
131 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
132 */
133#if defined(IEM_WITH_SETJMP) && (defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH))
134# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false)
135#else
136# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT
137#endif
138
139#define IEM_IMPLEMENTS_TASKSWITCH
140
141/** @def IEM_WITH_3DNOW
142 * Includes the 3DNow decoding. */
143#if (!defined(IEM_WITH_3DNOW) && !defined(IEM_WITHOUT_3DNOW)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
144# define IEM_WITH_3DNOW
145#endif
146
147/** @def IEM_WITH_THREE_0F_38
148 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
149#if (!defined(IEM_WITH_THREE_0F_38) && !defined(IEM_WITHOUT_THREE_0F_38)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
150# define IEM_WITH_THREE_0F_38
151#endif
152
153/** @def IEM_WITH_THREE_0F_3A
154 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
155#if (!defined(IEM_WITH_THREE_0F_3A) && !defined(IEM_WITHOUT_THREE_0F_3A)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
156# define IEM_WITH_THREE_0F_3A
157#endif
158
159/** @def IEM_WITH_VEX
160 * Includes the VEX decoding. */
161#if (!defined(IEM_WITH_VEX) && !defined(IEM_WITHOUT_VEX)) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
162# define IEM_WITH_VEX
163#endif
164
165/** @def IEM_CFG_TARGET_CPU
166 * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
167 *
168 * By default we allow this to be configured by the user via the
169 * CPUM/GuestCpuName config string, but this comes at a slight cost during
170 * decoding. So, for applications of this code where there is no need to
171 * be dynamic wrt target CPU, just modify this define.
172 */
173#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
174# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
175#endif
176
177//#define IEM_WITH_CODE_TLB // - work in progress
178//#define IEM_WITH_DATA_TLB // - work in progress
179
180
181/** @def IEM_USE_UNALIGNED_DATA_ACCESS
182 * Use unaligned accesses instead of elaborate byte assembly. */
183#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
184# define IEM_USE_UNALIGNED_DATA_ACCESS
185#endif
186
187//#define IEM_LOG_MEMORY_WRITES
188
189#if !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
190/** Instruction statistics. */
191typedef struct IEMINSTRSTATS
192{
193# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
194# include "IEMInstructionStatisticsTmpl.h"
195# undef IEM_DO_INSTR_STAT
196} IEMINSTRSTATS;
197#else
198struct IEMINSTRSTATS;
199typedef struct IEMINSTRSTATS IEMINSTRSTATS;
200#endif
201/** Pointer to IEM instruction statistics. */
202typedef IEMINSTRSTATS *PIEMINSTRSTATS;
203
204
205/** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour
206 * @{ */
207#define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 0 /**< Native x86 EFLAGS result; Intel EFLAGS when on non-x86 hosts. */
208#define IEMTARGETCPU_EFL_BEHAVIOR_INTEL 1 /**< Intel EFLAGS result. */
209#define IEMTARGETCPU_EFL_BEHAVIOR_AMD 2 /**< AMD EFLAGS result */
210#define IEMTARGETCPU_EFL_BEHAVIOR_RESERVED 3 /**< Reserved/dummy entry slot that's the same as 0. */
211#define IEMTARGETCPU_EFL_BEHAVIOR_MASK 3 /**< For masking the index before use. */
212/** Selects the right variant from a_aArray.
213 * pVCpu is implicit in the caller context. */
214#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \
215 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
216/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can
217 * be used because the host CPU does not support the operation. */
218#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \
219 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
220/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional
221 * array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index
222 * into the two.
223 * @sa IEM_SELECT_NATIVE_OR_FALLBACK */
224#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
225# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
226 (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
227#else
228# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
229 (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
230#endif
231/** @} */
232
233/**
234 * Picks @a a_pfnNative or @a a_pfnFallback according to the host CPU feature
235 * indicator given by @a a_fCpumFeatureMember (CPUMFEATURES member).
236 *
237 * On non-x86 hosts, this will shortcut to the fallback w/o checking the
238 * indicator.
239 *
240 * @sa IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX
241 */
242#if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
243# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) \
244 (g_CpumHostFeatures.s.a_fCpumFeatureMember ? a_pfnNative : a_pfnFallback)
245#else
246# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) (a_pfnFallback)
247#endif
248
249
250/**
251 * Extended operand mode that includes a representation of 8-bit.
252 *
253 * This is used for packing down modes when invoking some C instruction
254 * implementations.
255 */
256typedef enum IEMMODEX
257{
258 IEMMODEX_16BIT = IEMMODE_16BIT,
259 IEMMODEX_32BIT = IEMMODE_32BIT,
260 IEMMODEX_64BIT = IEMMODE_64BIT,
261 IEMMODEX_8BIT
262} IEMMODEX;
263AssertCompileSize(IEMMODEX, 4);
264
265
266/**
267 * Branch types.
268 */
269typedef enum IEMBRANCH
270{
271 IEMBRANCH_JUMP = 1,
272 IEMBRANCH_CALL,
273 IEMBRANCH_TRAP,
274 IEMBRANCH_SOFTWARE_INT,
275 IEMBRANCH_HARDWARE_INT
276} IEMBRANCH;
277AssertCompileSize(IEMBRANCH, 4);
278
279
280/**
281 * INT instruction types.
282 */
283typedef enum IEMINT
284{
285 /** INT n instruction (opcode 0xcd imm). */
286 IEMINT_INTN = 0,
287 /** Single byte INT3 instruction (opcode 0xcc). */
288 IEMINT_INT3 = IEM_XCPT_FLAGS_BP_INSTR,
289 /** Single byte INTO instruction (opcode 0xce). */
290 IEMINT_INTO = IEM_XCPT_FLAGS_OF_INSTR,
291 /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
292 IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
293} IEMINT;
294AssertCompileSize(IEMINT, 4);
295
296
297/**
298 * A FPU result.
299 */
300typedef struct IEMFPURESULT
301{
302 /** The output value. */
303 RTFLOAT80U r80Result;
304 /** The output status. */
305 uint16_t FSW;
306} IEMFPURESULT;
307AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
308/** Pointer to a FPU result. */
309typedef IEMFPURESULT *PIEMFPURESULT;
310/** Pointer to a const FPU result. */
311typedef IEMFPURESULT const *PCIEMFPURESULT;
312
313
314/**
315 * A FPU result consisting of two output values and FSW.
316 */
317typedef struct IEMFPURESULTTWO
318{
319 /** The first output value. */
320 RTFLOAT80U r80Result1;
321 /** The output status. */
322 uint16_t FSW;
323 /** The second output value. */
324 RTFLOAT80U r80Result2;
325} IEMFPURESULTTWO;
326AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
327AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
328/** Pointer to a FPU result consisting of two output values and FSW. */
329typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
330/** Pointer to a const FPU result consisting of two output values and FSW. */
331typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
332
333
334/**
335 * IEM TLB entry.
336 *
337 * Lookup assembly:
338 * @code{.asm}
339 ; Calculate tag.
340 mov rax, [VA]
341 shl rax, 16
342 shr rax, 16 + X86_PAGE_SHIFT
343 or rax, [uTlbRevision]
344
345 ; Do indexing.
346 movzx ecx, al
347 lea rcx, [pTlbEntries + rcx]
348
349 ; Check tag.
350 cmp [rcx + IEMTLBENTRY.uTag], rax
351 jne .TlbMiss
352
353 ; Check access.
354 mov rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
355 and rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
356 cmp rax, [uTlbPhysRev]
357 jne .TlbMiss
358
359 ; Calc address and we're done.
360 mov eax, X86_PAGE_OFFSET_MASK
361 and eax, [VA]
362 or rax, [rcx + IEMTLBENTRY.pMappingR3]
363 %ifdef VBOX_WITH_STATISTICS
364 inc qword [cTlbHits]
365 %endif
366 jmp .Done
367
368 .TlbMiss:
369 mov r8d, ACCESS_FLAGS
370 mov rdx, [VA]
371 mov rcx, [pVCpu]
372 call iemTlbTypeMiss
373 .Done:
374
375 @endcode
376 *
377 */
378typedef struct IEMTLBENTRY
379{
380 /** The TLB entry tag.
381 * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
382 * is ASSUMING a virtual address width of 48 bits.
383 *
384 * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
385 *
386 * The TLB lookup code uses the current TLB revision, which won't ever be zero,
387 * enabling an extremely cheap TLB invalidation most of the time. When the TLB
388 * revision wraps around though, the tags needs to be zeroed.
389 *
390 * @note Try use SHRD instruction? After seeing
391 * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
392 *
393 * @todo This will need to be reorganized for 57-bit wide virtual address and
394 * PCID (currently 12 bits) and ASID (currently 6 bits) support. We'll
395 * have to move the TLB entry versioning entirely to the
396 * fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
397 * 19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
398 * consumed by PCID and ASID (12 + 6 = 18).
399 */
400 uint64_t uTag;
401 /** Access flags and physical TLB revision.
402 *
403 * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
404 * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
405 * - Bit 2 - page tables - not user (complemented X86_PTE_US).
406 * - Bit 3 - pgm phys/virt - not directly writable.
407 * - Bit 4 - pgm phys page - not directly readable.
408 * - Bit 5 - page tables - not accessed (complemented X86_PTE_A).
409 * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
410 * - Bit 7 - tlb entry - pMappingR3 member not valid.
411 * - Bits 63 thru 8 are used for the physical TLB revision number.
412 *
413 * We're using complemented bit meanings here because it makes it easy to check
414 * whether special action is required. For instance a user mode write access
415 * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
416 * non-zero result would mean special handling needed because either it wasn't
417 * writable, or it wasn't user, or the page wasn't dirty. A user mode read
418 * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
419 * need to check any PTE flag.
420 */
421 uint64_t fFlagsAndPhysRev;
422 /** The guest physical page address. */
423 uint64_t GCPhys;
424 /** Pointer to the ring-3 mapping. */
425 R3PTRTYPE(uint8_t *) pbMappingR3;
426#if HC_ARCH_BITS == 32
427 uint32_t u32Padding1;
428#endif
429} IEMTLBENTRY;
430AssertCompileSize(IEMTLBENTRY, 32);
431/** Pointer to an IEM TLB entry. */
432typedef IEMTLBENTRY *PIEMTLBENTRY;
433
434/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
435 * @{ */
436#define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
437#define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
438#define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
439#define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
440#define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
441#define IEMTLBE_F_PT_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */
442#define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
443#define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(7) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
444#define IEMTLBE_F_PG_UNASSIGNED RT_BIT_64(8) /**< Phys page: Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
445#define IEMTLBE_F_PG_CODE_PAGE RT_BIT_64(9) /**< Phys page: Code page. */
446#define IEMTLBE_F_PHYS_REV UINT64_C(0xfffffffffffffc00) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
447/** @} */
448
449
450/**
451 * An IEM TLB.
452 *
453 * We've got two of these, one for data and one for instructions.
454 */
455typedef struct IEMTLB
456{
457 /** The TLB entries.
458 * We've choosen 256 because that way we can obtain the result directly from a
459 * 8-bit register without an additional AND instruction. */
460 IEMTLBENTRY aEntries[256];
461 /** The TLB revision.
462 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
463 * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
464 * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
465 * (The revision zero indicates an invalid TLB entry.)
466 *
467 * The initial value is choosen to cause an early wraparound. */
468 uint64_t uTlbRevision;
469 /** The TLB physical address revision - shadow of PGM variable.
470 *
471 * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
472 * incremented by adding RT_BIT_64(8). When it wraps around and becomes zero,
473 * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
474 * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
475 *
476 * The initial value is choosen to cause an early wraparound. */
477 uint64_t volatile uTlbPhysRev;
478
479 /* Statistics: */
480
481 /** TLB hits (VBOX_WITH_STATISTICS only). */
482 uint64_t cTlbHits;
483 /** TLB misses. */
484 uint32_t cTlbMisses;
485 /** Slow read path. */
486 uint32_t cTlbSlowReadPath;
487 /** Safe read path. */
488 uint32_t cTlbSafeReadPath;
489 /** Safe write path. */
490 uint32_t cTlbSafeWritePath;
491#if 0
492 /** TLB misses because of tag mismatch. */
493 uint32_t cTlbMissesTag;
494 /** TLB misses because of virtual access violation. */
495 uint32_t cTlbMissesVirtAccess;
496 /** TLB misses because of dirty bit. */
497 uint32_t cTlbMissesDirty;
498 /** TLB misses because of MMIO */
499 uint32_t cTlbMissesMmio;
500 /** TLB misses because of write access handlers. */
501 uint32_t cTlbMissesWriteHandler;
502 /** TLB misses because no r3(/r0) mapping. */
503 uint32_t cTlbMissesMapping;
504#endif
505 /** Alignment padding. */
506 uint32_t au32Padding[6];
507} IEMTLB;
508AssertCompileSizeAlignment(IEMTLB, 64);
509/** IEMTLB::uTlbRevision increment. */
510#define IEMTLB_REVISION_INCR RT_BIT_64(36)
511/** IEMTLB::uTlbRevision mask. */
512#define IEMTLB_REVISION_MASK (~(RT_BIT_64(36) - 1))
513/** IEMTLB::uTlbPhysRev increment.
514 * @sa IEMTLBE_F_PHYS_REV */
515#define IEMTLB_PHYS_REV_INCR RT_BIT_64(10)
516/**
517 * Calculates the TLB tag for a virtual address.
518 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
519 * @param a_pTlb The TLB.
520 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
521 * the clearing of the top 16 bits won't work (if 32-bit
522 * we'll end up with mostly zeros).
523 */
524#define IEMTLB_CALC_TAG(a_pTlb, a_GCPtr) ( IEMTLB_CALC_TAG_NO_REV(a_GCPtr) | (a_pTlb)->uTlbRevision )
525/**
526 * Calculates the TLB tag for a virtual address but without TLB revision.
527 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
528 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
529 * the clearing of the top 16 bits won't work (if 32-bit
530 * we'll end up with mostly zeros).
531 */
532#define IEMTLB_CALC_TAG_NO_REV(a_GCPtr) ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
533/**
534 * Converts a TLB tag value into a TLB index.
535 * @returns Index into IEMTLB::aEntries.
536 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
537 */
538#define IEMTLB_TAG_TO_INDEX(a_uTag) ( (uint8_t)(a_uTag) )
539/**
540 * Converts a TLB tag value into a TLB index.
541 * @returns Index into IEMTLB::aEntries.
542 * @param a_pTlb The TLB.
543 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
544 */
545#define IEMTLB_TAG_TO_ENTRY(a_pTlb, a_uTag) ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_INDEX(a_uTag)] )
546
547
548/** @name IEM_MC_F_XXX - MC block flags/clues.
549 * @todo Merge with IEM_CIMPL_F_XXX
550 * @{ */
551#define IEM_MC_F_ONLY_8086 RT_BIT_32(0)
552#define IEM_MC_F_MIN_186 RT_BIT_32(1)
553#define IEM_MC_F_MIN_286 RT_BIT_32(2)
554#define IEM_MC_F_NOT_286_OR_OLDER IEM_MC_F_MIN_386
555#define IEM_MC_F_MIN_386 RT_BIT_32(3)
556#define IEM_MC_F_MIN_486 RT_BIT_32(4)
557#define IEM_MC_F_MIN_PENTIUM RT_BIT_32(5)
558#define IEM_MC_F_MIN_PENTIUM_II IEM_MC_F_MIN_PENTIUM
559#define IEM_MC_F_MIN_CORE IEM_MC_F_MIN_PENTIUM
560#define IEM_MC_F_64BIT RT_BIT_32(6)
561#define IEM_MC_F_NOT_64BIT RT_BIT_32(7)
562/** This is set by IEMAllN8vePython.py to indicate a variation without the
563 * flags-clearing-and-checking, when there is also a variation with that.
564 * @note Do not use this manully, it's only for python and for testing in
565 * the native recompiler! */
566#define IEM_MC_F_WITHOUT_FLAGS RT_BIT_32(8)
567/** @} */
568
569/** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls.
570 *
571 * These clues are mainly for the recompiler, so that it can emit correct code.
572 *
573 * They are processed by the python script and which also automatically
574 * calculates flags for MC blocks based on the statements, extending the use of
575 * these flags to describe MC block behavior to the recompiler core. The python
576 * script pass the flags to the IEM_MC2_END_EMIT_CALLS macro, but mainly for
577 * error checking purposes. The script emits the necessary fEndTb = true and
578 * similar statements as this reduces compile time a tiny bit.
579 *
580 * @{ */
581/** Flag set if direct branch, clear if absolute or indirect. */
582#define IEM_CIMPL_F_BRANCH_DIRECT RT_BIT_32(0)
583/** Flag set if indirect branch, clear if direct or relative.
584 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
585 * as well as for return instructions (RET, IRET, RETF). */
586#define IEM_CIMPL_F_BRANCH_INDIRECT RT_BIT_32(1)
587/** Flag set if relative branch, clear if absolute or indirect. */
588#define IEM_CIMPL_F_BRANCH_RELATIVE RT_BIT_32(2)
589/** Flag set if conditional branch, clear if unconditional. */
590#define IEM_CIMPL_F_BRANCH_CONDITIONAL RT_BIT_32(3)
591/** Flag set if it's a far branch (changes CS). */
592#define IEM_CIMPL_F_BRANCH_FAR RT_BIT_32(4)
593/** Convenience: Testing any kind of branch. */
594#define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
595
596/** Execution flags may change (IEMCPU::fExec). */
597#define IEM_CIMPL_F_MODE RT_BIT_32(5)
598/** May change significant portions of RFLAGS. */
599#define IEM_CIMPL_F_RFLAGS RT_BIT_32(6)
600/** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS. */
601#define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(7)
602/** May trigger interrupt shadowing. */
603#define IEM_CIMPL_F_INHIBIT_SHADOW RT_BIT_32(8)
604/** May enable interrupts, so recheck IRQ immediately afterwards executing
605 * the instruction. */
606#define IEM_CIMPL_F_CHECK_IRQ_AFTER RT_BIT_32(9)
607/** May disable interrupts, so recheck IRQ immediately before executing the
608 * instruction. */
609#define IEM_CIMPL_F_CHECK_IRQ_BEFORE RT_BIT_32(10)
610/** Convenience: Check for IRQ both before and after an instruction. */
611#define IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER (IEM_CIMPL_F_CHECK_IRQ_BEFORE | IEM_CIMPL_F_CHECK_IRQ_AFTER)
612/** May trigger a VM exit (treated like IEM_CIMPL_F_MODE atm). */
613#define IEM_CIMPL_F_VMEXIT RT_BIT_32(11)
614/** May modify FPU state.
615 * @todo Not sure if this is useful yet. */
616#define IEM_CIMPL_F_FPU RT_BIT_32(12)
617/** REP prefixed instruction which may yield before updating PC.
618 * @todo Not sure if this is useful, REP functions now return non-zero
619 * status if they don't update the PC. */
620#define IEM_CIMPL_F_REP RT_BIT_32(13)
621/** I/O instruction.
622 * @todo Not sure if this is useful yet. */
623#define IEM_CIMPL_F_IO RT_BIT_32(14)
624/** Force end of TB after the instruction. */
625#define IEM_CIMPL_F_END_TB RT_BIT_32(15)
626/** Flag set if a branch may also modify the stack (push/pop return address). */
627#define IEM_CIMPL_F_BRANCH_STACK RT_BIT_32(16)
628/** Flag set if a branch may also modify the stack (push/pop return address)
629 * and switch it (load/restore SS:RSP). */
630#define IEM_CIMPL_F_BRANCH_STACK_FAR RT_BIT_32(17)
631/** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
632#define IEM_CIMPL_F_XCPT \
633 (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_STACK_FAR \
634 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
635
636/** The block calls a C-implementation instruction function with two implicit arguments.
637 * Mutually exclusive with IEM_CIMPL_F_CALLS_AIMPL and
638 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
639 * @note The python scripts will add this is missing. */
640#define IEM_CIMPL_F_CALLS_CIMPL RT_BIT_32(18)
641/** The block calls an ASM-implementation instruction function.
642 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and
643 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
644 * @note The python scripts will add this is missing. */
645#define IEM_CIMPL_F_CALLS_AIMPL RT_BIT_32(19)
646/** The block calls an ASM-implementation instruction function with an implicit
647 * X86FXSTATE pointer argument.
648 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and IEM_CIMPL_F_CALLS_AIMPL.
649 * @note The python scripts will add this is missing. */
650#define IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE RT_BIT_32(20)
651/** @} */
652
653
654/** @name IEM_F_XXX - Execution mode flags (IEMCPU::fExec, IEMTB::fFlags).
655 *
656 * These flags are set when entering IEM and adjusted as code is executed, such
657 * that they will always contain the current values as instructions are
658 * finished.
659 *
660 * In recompiled execution mode, (most of) these flags are included in the
661 * translation block selection key and stored in IEMTB::fFlags alongside the
662 * IEMTB_F_XXX flags. The latter flags uses bits 31 thru 24, which are all zero
663 * in IEMCPU::fExec.
664 *
665 * @{ */
666/** Mode: The block target mode mask. */
667#define IEM_F_MODE_MASK UINT32_C(0x0000001f)
668/** Mode: The IEMMODE part of the IEMTB_F_MODE_MASK value. */
669#define IEM_F_MODE_CPUMODE_MASK UINT32_C(0x00000003)
670/** X86 Mode: Bit used to indicating pre-386 CPU in 16-bit mode (for eliminating
671 * conditional in EIP/IP updating), and flat wide open CS, SS, DS, and ES in
672 * 32-bit mode (for simplifying most memory accesses). */
673#define IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK UINT32_C(0x00000004)
674/** X86 Mode: Bit indicating protected mode, real mode (or SMM) when not set. */
675#define IEM_F_MODE_X86_PROT_MASK UINT32_C(0x00000008)
676/** X86 Mode: Bit used to indicate virtual 8086 mode (only 16-bit). */
677#define IEM_F_MODE_X86_V86_MASK UINT32_C(0x00000010)
678
679/** X86 Mode: 16-bit on 386 or later. */
680#define IEM_F_MODE_X86_16BIT UINT32_C(0x00000000)
681/** X86 Mode: 80286, 80186 and 8086/88 targetting blocks (EIP update opt). */
682#define IEM_F_MODE_X86_16BIT_PRE_386 UINT32_C(0x00000004)
683/** X86 Mode: 16-bit protected mode on 386 or later. */
684#define IEM_F_MODE_X86_16BIT_PROT UINT32_C(0x00000008)
685/** X86 Mode: 16-bit protected mode on 386 or later. */
686#define IEM_F_MODE_X86_16BIT_PROT_PRE_386 UINT32_C(0x0000000c)
687/** X86 Mode: 16-bit virtual 8086 protected mode (on 386 or later). */
688#define IEM_F_MODE_X86_16BIT_PROT_V86 UINT32_C(0x00000018)
689
690/** X86 Mode: 32-bit on 386 or later. */
691#define IEM_F_MODE_X86_32BIT UINT32_C(0x00000001)
692/** X86 Mode: 32-bit mode with wide open flat CS, SS, DS and ES. */
693#define IEM_F_MODE_X86_32BIT_FLAT UINT32_C(0x00000005)
694/** X86 Mode: 32-bit protected mode. */
695#define IEM_F_MODE_X86_32BIT_PROT UINT32_C(0x00000009)
696/** X86 Mode: 32-bit protected mode with wide open flat CS, SS, DS and ES. */
697#define IEM_F_MODE_X86_32BIT_PROT_FLAT UINT32_C(0x0000000d)
698
699/** X86 Mode: 64-bit (includes protected, but not the flat bit). */
700#define IEM_F_MODE_X86_64BIT UINT32_C(0x0000000a)
701
702/** X86 Mode: Checks if @a a_fExec represent a FLAT mode. */
703#define IEM_F_MODE_X86_IS_FLAT(a_fExec) ( ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT \
704 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT \
705 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT)
706
707/** Bypass access handlers when set. */
708#define IEM_F_BYPASS_HANDLERS UINT32_C(0x00010000)
709/** Have pending hardware instruction breakpoints. */
710#define IEM_F_PENDING_BRK_INSTR UINT32_C(0x00020000)
711/** Have pending hardware data breakpoints. */
712#define IEM_F_PENDING_BRK_DATA UINT32_C(0x00040000)
713
714/** X86: Have pending hardware I/O breakpoints. */
715#define IEM_F_PENDING_BRK_X86_IO UINT32_C(0x00000400)
716/** X86: Disregard the lock prefix (implied or not) when set. */
717#define IEM_F_X86_DISREGARD_LOCK UINT32_C(0x00000800)
718
719/** Pending breakpoint mask (what iemCalcExecDbgFlags works out). */
720#define IEM_F_PENDING_BRK_MASK (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA | IEM_F_PENDING_BRK_X86_IO)
721
722/** Caller configurable options. */
723#define IEM_F_USER_OPTS (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK)
724
725/** X86: The current protection level (CPL) shift factor. */
726#define IEM_F_X86_CPL_SHIFT 8
727/** X86: The current protection level (CPL) mask. */
728#define IEM_F_X86_CPL_MASK UINT32_C(0x00000300)
729/** X86: The current protection level (CPL) shifted mask. */
730#define IEM_F_X86_CPL_SMASK UINT32_C(0x00000003)
731
732/** X86 execution context.
733 * The IEM_F_X86_CTX_XXX values are individual flags that can be combined (with
734 * the exception of IEM_F_X86_CTX_NORMAL). This allows running VMs from SMM
735 * mode. */
736#define IEM_F_X86_CTX_MASK UINT32_C(0x0000f000)
737/** X86 context: Plain regular execution context. */
738#define IEM_F_X86_CTX_NORMAL UINT32_C(0x00000000)
739/** X86 context: VT-x enabled. */
740#define IEM_F_X86_CTX_VMX UINT32_C(0x00001000)
741/** X86 context: AMD-V enabled. */
742#define IEM_F_X86_CTX_SVM UINT32_C(0x00002000)
743/** X86 context: In AMD-V or VT-x guest mode. */
744#define IEM_F_X86_CTX_IN_GUEST UINT32_C(0x00004000)
745/** X86 context: System management mode (SMM). */
746#define IEM_F_X86_CTX_SMM UINT32_C(0x00008000)
747
748/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
749 * iemRegFinishClearingRF() most for most situations (CPUMCTX_DBG_HIT_DRX_MASK
750 * and CPUMCTX_DBG_DBGF_MASK are covered by the IEM_F_PENDING_BRK_XXX bits
751 * alread). */
752
753/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
754 * iemRegFinishClearingRF() most for most situations
755 * (CPUMCTX_DBG_HIT_DRX_MASK and CPUMCTX_DBG_DBGF_MASK are covered by
756 * the IEM_F_PENDING_BRK_XXX bits alread). */
757
758/** @} */
759
760
761/** @name IEMTB_F_XXX - Translation block flags (IEMTB::fFlags).
762 *
763 * Extends the IEM_F_XXX flags (subject to IEMTB_F_IEM_F_MASK) to make up the
764 * translation block flags. The combined flag mask (subject to
765 * IEMTB_F_KEY_MASK) is used as part of the lookup key for translation blocks.
766 *
767 * @{ */
768/** Mask of IEM_F_XXX flags included in IEMTB_F_XXX. */
769#define IEMTB_F_IEM_F_MASK UINT32_C(0x00ffffff)
770
771/** Type: The block type mask. */
772#define IEMTB_F_TYPE_MASK UINT32_C(0x03000000)
773/** Type: Purly threaded recompiler (via tables). */
774#define IEMTB_F_TYPE_THREADED UINT32_C(0x01000000)
775/** Type: Native recompilation. */
776#define IEMTB_F_TYPE_NATIVE UINT32_C(0x02000000)
777
778/** Set when we're starting the block in an "interrupt shadow".
779 * We don't need to distingish between the two types of this mask, thus the one.
780 * @see CPUMCTX_INHIBIT_SHADOW, CPUMIsInInterruptShadow() */
781#define IEMTB_F_INHIBIT_SHADOW UINT32_C(0x04000000)
782/** Set when we're currently inhibiting NMIs
783 * @see CPUMCTX_INHIBIT_NMI, CPUMAreInterruptsInhibitedByNmi() */
784#define IEMTB_F_INHIBIT_NMI UINT32_C(0x08000000)
785
786/** Checks that EIP/IP is wihin CS.LIM before each instruction. Used when
787 * we're close the limit before starting a TB, as determined by
788 * iemGetTbFlagsForCurrentPc(). */
789#define IEMTB_F_CS_LIM_CHECKS UINT32_C(0x10000000)
790
791/** Mask of the IEMTB_F_XXX flags that are part of the TB lookup key.
792 * @note We skip the CPL as we don't currently generate ring-specific code,
793 * that's all handled in CIMPL functions.
794 *
795 * For the same reasons, we skip all of IEM_F_X86_CTX_MASK, with the
796 * exception of SMM (which we don't implement). */
797#define IEMTB_F_KEY_MASK ( (UINT32_MAX & ~(IEM_F_X86_CTX_MASK | IEM_F_X86_CPL_MASK | IEMTB_F_TYPE_MASK)) \
798 | IEM_F_X86_CTX_SMM)
799/** @} */
800
801AssertCompile( (IEM_F_MODE_X86_16BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
802AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
803AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_PROT_MASK));
804AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_V86_MASK));
805AssertCompile( (IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
806AssertCompile( IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
807AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_PROT_MASK));
808AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
809AssertCompile( (IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
810AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
811AssertCompile( IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
812AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_V86_MASK));
813AssertCompile( (IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_16BIT);
814AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
815AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_PROT_MASK);
816AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
817AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_PROT_MASK);
818AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
819AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_V86_MASK);
820
821AssertCompile( (IEM_F_MODE_X86_32BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
822AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
823AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_PROT_MASK));
824AssertCompile( (IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
825AssertCompile( IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
826AssertCompile(!(IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_PROT_MASK));
827AssertCompile( (IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
828AssertCompile(!(IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
829AssertCompile( IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
830AssertCompile( (IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT);
831AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
832AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_PROT_MASK);
833
834AssertCompile( (IEM_F_MODE_X86_64BIT & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_64BIT);
835AssertCompile( IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_PROT_MASK);
836AssertCompile(!(IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
837
838/** Native instruction type for use with the native code generator.
839 * This is a byte (uint8_t) for x86 and amd64 and uint32_t for the other(s). */
840#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
841typedef uint8_t IEMNATIVEINSTR;
842#else
843typedef uint32_t IEMNATIVEINSTR;
844#endif
845/** Pointer to a native instruction unit. */
846typedef IEMNATIVEINSTR *PIEMNATIVEINSTR;
847/** Pointer to a const native instruction unit. */
848typedef IEMNATIVEINSTR const *PCIEMNATIVEINSTR;
849
850/**
851 * A call for the threaded call table.
852 */
853typedef struct IEMTHRDEDCALLENTRY
854{
855 /** The function to call (IEMTHREADEDFUNCS). */
856 uint16_t enmFunction;
857 /** Instruction number in the TB (for statistics). */
858 uint8_t idxInstr;
859 uint8_t uUnused0;
860
861 /** Offset into IEMTB::pabOpcodes. */
862 uint16_t offOpcode;
863 /** The opcode length. */
864 uint8_t cbOpcode;
865 /** Index in to IEMTB::aRanges. */
866 uint8_t idxRange;
867
868 /** Generic parameters. */
869 uint64_t auParams[3];
870} IEMTHRDEDCALLENTRY;
871AssertCompileSize(IEMTHRDEDCALLENTRY, sizeof(uint64_t) * 4);
872/** Pointer to a threaded call entry. */
873typedef struct IEMTHRDEDCALLENTRY *PIEMTHRDEDCALLENTRY;
874/** Pointer to a const threaded call entry. */
875typedef IEMTHRDEDCALLENTRY const *PCIEMTHRDEDCALLENTRY;
876
877/**
878 * Native IEM TB 'function' typedef.
879 *
880 * This will throw/longjmp on occation.
881 *
882 * @note AMD64 doesn't have that many non-volatile registers and does sport
883 * 32-bit address displacments, so we don't need pCtx.
884 *
885 * On ARM64 pCtx allows us to directly address the whole register
886 * context without requiring a separate indexing register holding the
887 * offset. This saves an instruction loading the offset for each guest
888 * CPU context access, at the cost of a non-volatile register.
889 * Fortunately, ARM64 has quite a lot more registers.
890 */
891typedef
892#ifdef RT_ARCH_AMD64
893int FNIEMTBNATIVE(PVMCPUCC pVCpu)
894#else
895int FNIEMTBNATIVE(PVMCPUCC pVCpu, PCPUMCTX pCtx)
896#endif
897#if RT_CPLUSPLUS_PREREQ(201700)
898 IEM_NOEXCEPT_MAY_LONGJMP
899#endif
900 ;
901/** Pointer to a native IEM TB entry point function.
902 * This will throw/longjmp on occation. */
903typedef FNIEMTBNATIVE *PFNIEMTBNATIVE;
904
905
906/**
907 * Translation block debug info entry type.
908 */
909typedef enum IEMTBDBGENTRYTYPE
910{
911 kIemTbDbgEntryType_Invalid = 0,
912 /** The entry is for marking a native code position.
913 * Entries following this all apply to this position. */
914 kIemTbDbgEntryType_NativeOffset,
915 /** The entry is for a new guest instruction. */
916 kIemTbDbgEntryType_GuestInstruction,
917 /** Marks the start of a threaded call. */
918 kIemTbDbgEntryType_ThreadedCall,
919 /** Marks the location of a label. */
920 kIemTbDbgEntryType_Label,
921 /** Info about a host register shadowing a guest register. */
922 kIemTbDbgEntryType_GuestRegShadowing,
923 kIemTbDbgEntryType_End
924} IEMTBDBGENTRYTYPE;
925
926/**
927 * Translation block debug info entry.
928 */
929typedef union IEMTBDBGENTRY
930{
931 /** Plain 32-bit view. */
932 uint32_t u;
933
934 /** Generic view for getting at the type field. */
935 struct
936 {
937 /** IEMTBDBGENTRYTYPE */
938 uint32_t uType : 4;
939 uint32_t uTypeSpecific : 28;
940 } Gen;
941
942 struct
943 {
944 /** kIemTbDbgEntryType_ThreadedCall1. */
945 uint32_t uType : 4;
946 /** Native code offset. */
947 uint32_t offNative : 28;
948 } NativeOffset;
949
950 struct
951 {
952 /** kIemTbDbgEntryType_GuestInstruction. */
953 uint32_t uType : 4;
954 uint32_t uUnused : 4;
955 /** The IEM_F_XXX flags. */
956 uint32_t fExec : 24;
957 } GuestInstruction;
958
959 struct
960 {
961 /* kIemTbDbgEntryType_ThreadedCall. */
962 uint32_t uType : 4;
963 /** Set if the call was recompiled to native code, clear if just calling
964 * threaded function. */
965 uint32_t fRecompiled : 1;
966 uint32_t uUnused : 11;
967 /** The threaded call number (IEMTHREADEDFUNCS). */
968 uint32_t enmCall : 16;
969 } ThreadedCall;
970
971 struct
972 {
973 /* kIemTbDbgEntryType_Label. */
974 uint32_t uType : 4;
975 uint32_t uUnused : 4;
976 /** The label type (IEMNATIVELABELTYPE). */
977 uint32_t enmLabel : 8;
978 /** The label data. */
979 uint32_t uData : 16;
980 } Label;
981
982 struct
983 {
984 /* kIemTbDbgEntryType_GuestRegShadowing. */
985 uint32_t uType : 4;
986 uint32_t uUnused : 4;
987 /** The guest register being shadowed (IEMNATIVEGSTREG). */
988 uint32_t idxGstReg : 8;
989 /** The host new register number, UINT8_MAX if dropped. */
990 uint32_t idxHstReg : 8;
991 /** The previous host register number, UINT8_MAX if new. */
992 uint32_t idxHstRegPrev : 8;
993 } GuestRegShadowing;
994} IEMTBDBGENTRY;
995AssertCompileSize(IEMTBDBGENTRY, sizeof(uint32_t));
996/** Pointer to a debug info entry. */
997typedef IEMTBDBGENTRY *PIEMTBDBGENTRY;
998/** Pointer to a const debug info entry. */
999typedef IEMTBDBGENTRY const *PCIEMTBDBGENTRY;
1000
1001/**
1002 * Translation block debug info.
1003 */
1004typedef struct IEMTBDBG
1005{
1006 /** Number of entries in aEntries. */
1007 uint32_t cEntries;
1008 /** Debug info entries. */
1009 RT_FLEXIBLE_ARRAY_EXTENSION
1010 IEMTBDBGENTRY aEntries[RT_FLEXIBLE_ARRAY];
1011} IEMTBDBG;
1012/** Pointer to TB debug info. */
1013typedef IEMTBDBG *PIEMTBDBG;
1014/** Pointer to const TB debug info. */
1015typedef IEMTBDBG const *PCIEMTBDBG;
1016
1017
1018/**
1019 * Translation block.
1020 *
1021 * The current plan is to just keep TBs and associated lookup hash table private
1022 * to each VCpu as that simplifies TB removal greatly (no races) and generally
1023 * avoids using expensive atomic primitives for updating lists and stuff.
1024 */
1025#pragma pack(2) /* to prevent the Thrd structure from being padded unnecessarily */
1026typedef struct IEMTB
1027{
1028 /** Next block with the same hash table entry. */
1029 struct IEMTB *pNext;
1030 /** Usage counter. */
1031 uint32_t cUsed;
1032 /** The IEMCPU::msRecompilerPollNow last time it was used. */
1033 uint32_t msLastUsed;
1034
1035 /** @name What uniquely identifies the block.
1036 * @{ */
1037 RTGCPHYS GCPhysPc;
1038 /** IEMTB_F_XXX (i.e. IEM_F_XXX ++). */
1039 uint32_t fFlags;
1040 union
1041 {
1042 struct
1043 {
1044 /**< Relevant CS X86DESCATTR_XXX bits. */
1045 uint16_t fAttr;
1046 } x86;
1047 };
1048 /** @} */
1049
1050 /** Number of opcode ranges. */
1051 uint8_t cRanges;
1052 /** Statistics: Number of instructions in the block. */
1053 uint8_t cInstructions;
1054
1055 /** Type specific info. */
1056 union
1057 {
1058 struct
1059 {
1060 /** The call sequence table. */
1061 PIEMTHRDEDCALLENTRY paCalls;
1062 /** Number of calls in paCalls. */
1063 uint16_t cCalls;
1064 /** Number of calls allocated. */
1065 uint16_t cAllocated;
1066 } Thrd;
1067 struct
1068 {
1069 /** The native instructions (PFNIEMTBNATIVE). */
1070 PIEMNATIVEINSTR paInstructions;
1071 /** Number of instructions pointed to by paInstructions. */
1072 uint32_t cInstructions;
1073 } Native;
1074 /** Generic view for zeroing when freeing. */
1075 struct
1076 {
1077 uintptr_t uPtr;
1078 uint32_t uData;
1079 } Gen;
1080 };
1081
1082 /** The allocation chunk this TB belongs to. */
1083 uint8_t idxAllocChunk;
1084 uint8_t bUnused;
1085
1086 /** Number of bytes of opcodes stored in pabOpcodes.
1087 * @todo this field isn't really needed, aRanges keeps the actual info. */
1088 uint16_t cbOpcodes;
1089 /** Pointer to the opcode bytes this block was recompiled from. */
1090 uint8_t *pabOpcodes;
1091
1092 /** Debug info if enabled.
1093 * This is only generated by the native recompiler. */
1094 PIEMTBDBG pDbgInfo;
1095
1096 /* --- 64 byte cache line end --- */
1097
1098 /** Opcode ranges.
1099 *
1100 * The opcode checkers and maybe TLB loading functions will use this to figure
1101 * out what to do. The parameter will specify an entry and the opcode offset to
1102 * start at and the minimum number of bytes to verify (instruction length).
1103 *
1104 * When VT-x and AMD-V looks up the opcode bytes for an exitting instruction,
1105 * they'll first translate RIP (+ cbInstr - 1) to a physical address using the
1106 * code TLB (must have a valid entry for that address) and scan the ranges to
1107 * locate the corresponding opcodes. Probably.
1108 */
1109 struct IEMTBOPCODERANGE
1110 {
1111 /** Offset within pabOpcodes. */
1112 uint16_t offOpcodes;
1113 /** Number of bytes. */
1114 uint16_t cbOpcodes;
1115 /** The page offset. */
1116 RT_GCC_EXTENSION
1117 uint16_t offPhysPage : 12;
1118 /** Unused bits. */
1119 RT_GCC_EXTENSION
1120 uint16_t u2Unused : 2;
1121 /** Index into GCPhysPc + aGCPhysPages for the physical page address. */
1122 RT_GCC_EXTENSION
1123 uint16_t idxPhysPage : 2;
1124 } aRanges[8];
1125
1126 /** Physical pages that this TB covers.
1127 * The GCPhysPc w/o page offset is element zero, so starting here with 1. */
1128 RTGCPHYS aGCPhysPages[2];
1129} IEMTB;
1130#pragma pack()
1131AssertCompileMemberAlignment(IEMTB, GCPhysPc, sizeof(RTGCPHYS));
1132AssertCompileMemberAlignment(IEMTB, Thrd, sizeof(void *));
1133AssertCompileMemberAlignment(IEMTB, pabOpcodes, sizeof(void *));
1134AssertCompileMemberAlignment(IEMTB, pDbgInfo, sizeof(void *));
1135AssertCompileMemberAlignment(IEMTB, aGCPhysPages, sizeof(RTGCPHYS));
1136AssertCompileMemberOffset(IEMTB, aRanges, 64);
1137AssertCompileMemberSize(IEMTB, aRanges[0], 6);
1138#if 1
1139AssertCompileSize(IEMTB, 128);
1140# define IEMTB_SIZE_IS_POWER_OF_TWO /**< The IEMTB size is a power of two. */
1141#else
1142AssertCompileSize(IEMTB, 168);
1143# undef IEMTB_SIZE_IS_POWER_OF_TWO
1144#endif
1145
1146/** Pointer to a translation block. */
1147typedef IEMTB *PIEMTB;
1148/** Pointer to a const translation block. */
1149typedef IEMTB const *PCIEMTB;
1150
1151/**
1152 * A chunk of memory in the TB allocator.
1153 */
1154typedef struct IEMTBCHUNK
1155{
1156 /** Pointer to the translation blocks in this chunk. */
1157 PIEMTB paTbs;
1158#ifdef IN_RING0
1159 /** Allocation handle. */
1160 RTR0MEMOBJ hMemObj;
1161#endif
1162} IEMTBCHUNK;
1163
1164/**
1165 * A per-CPU translation block allocator.
1166 *
1167 * Because of how the IEMTBCACHE uses the lower 6 bits of the TB address to keep
1168 * the length of the collision list, and of course also for cache line alignment
1169 * reasons, the TBs must be allocated with at least 64-byte alignment.
1170 * Memory is there therefore allocated using one of the page aligned allocators.
1171 *
1172 *
1173 * To avoid wasting too much memory, it is allocated piecemeal as needed,
1174 * in chunks (IEMTBCHUNK) of 2 MiB or more. The TB has an 8-bit chunk index
1175 * that enables us to quickly calculate the allocation bitmap position when
1176 * freeing the translation block.
1177 */
1178typedef struct IEMTBALLOCATOR
1179{
1180 /** Magic value (IEMTBALLOCATOR_MAGIC). */
1181 uint32_t uMagic;
1182
1183#ifdef IEMTB_SIZE_IS_POWER_OF_TWO
1184 /** Mask corresponding to cTbsPerChunk - 1. */
1185 uint32_t fChunkMask;
1186 /** Shift count corresponding to cTbsPerChunk. */
1187 uint8_t cChunkShift;
1188#else
1189 uint32_t uUnused;
1190 uint8_t bUnused;
1191#endif
1192 /** Number of chunks we're allowed to allocate. */
1193 uint8_t cMaxChunks;
1194 /** Number of chunks currently populated. */
1195 uint16_t cAllocatedChunks;
1196 /** Number of translation blocks per chunk. */
1197 uint32_t cTbsPerChunk;
1198 /** Chunk size. */
1199 uint32_t cbPerChunk;
1200
1201 /** The maximum number of TBs. */
1202 uint32_t cMaxTbs;
1203 /** Total number of TBs in the populated chunks.
1204 * (cAllocatedChunks * cTbsPerChunk) */
1205 uint32_t cTotalTbs;
1206 /** The current number of TBs in use.
1207 * The number of free TBs: cAllocatedTbs - cInUseTbs; */
1208 uint32_t cInUseTbs;
1209 /** Statistics: Number of the cInUseTbs that are native ones. */
1210 uint32_t cNativeTbs;
1211 /** Statistics: Number of the cInUseTbs that are threaded ones. */
1212 uint32_t cThreadedTbs;
1213
1214 /** Where to start pruning TBs from when we're out.
1215 * See iemTbAllocatorAllocSlow for details. */
1216 uint32_t iPruneFrom;
1217 /** Hint about which bit to start scanning the bitmap from. */
1218 uint32_t iStartHint;
1219 /** Where to start pruning native TBs from when we're out of executable memory.
1220 * See iemTbAllocatorFreeupNativeSpace for details. */
1221 uint32_t iPruneNativeFrom;
1222 uint32_t uPadding;
1223
1224 /** Statistics: Number of TB allocation calls. */
1225 STAMCOUNTER StatAllocs;
1226 /** Statistics: Number of TB free calls. */
1227 STAMCOUNTER StatFrees;
1228 /** Statistics: Time spend pruning. */
1229 STAMPROFILE StatPrune;
1230 /** Statistics: Time spend pruning native TBs. */
1231 STAMPROFILE StatPruneNative;
1232
1233 /** The delayed free list (see iemTbAlloctorScheduleForFree). */
1234 PIEMTB pDelayedFreeHead;
1235
1236 /** Allocation chunks. */
1237 IEMTBCHUNK aChunks[256];
1238
1239 /** Allocation bitmap for all possible chunk chunks. */
1240 RT_FLEXIBLE_ARRAY_EXTENSION
1241 uint64_t bmAllocated[RT_FLEXIBLE_ARRAY];
1242} IEMTBALLOCATOR;
1243/** Pointer to a TB allocator. */
1244typedef struct IEMTBALLOCATOR *PIEMTBALLOCATOR;
1245
1246/** Magic value for the TB allocator (Emmet Harley Cohen). */
1247#define IEMTBALLOCATOR_MAGIC UINT32_C(0x19900525)
1248
1249
1250/**
1251 * A per-CPU translation block cache (hash table).
1252 *
1253 * The hash table is allocated once during IEM initialization and size double
1254 * the max TB count, rounded up to the nearest power of two (so we can use and
1255 * AND mask rather than a rest division when hashing).
1256 */
1257typedef struct IEMTBCACHE
1258{
1259 /** Magic value (IEMTBCACHE_MAGIC). */
1260 uint32_t uMagic;
1261 /** Size of the hash table. This is a power of two. */
1262 uint32_t cHash;
1263 /** The mask corresponding to cHash. */
1264 uint32_t uHashMask;
1265 uint32_t uPadding;
1266
1267 /** @name Statistics
1268 * @{ */
1269 /** Number of collisions ever. */
1270 STAMCOUNTER cCollisions;
1271
1272 /** Statistics: Number of TB lookup misses. */
1273 STAMCOUNTER cLookupMisses;
1274 /** Statistics: Number of TB lookup hits (debug only). */
1275 STAMCOUNTER cLookupHits;
1276 STAMCOUNTER auPadding2[3];
1277 /** Statistics: Collision list length pruning. */
1278 STAMPROFILE StatPrune;
1279 /** @} */
1280
1281 /** The hash table itself.
1282 * @note The lower 6 bits of the pointer is used for keeping the collision
1283 * list length, so we can take action when it grows too long.
1284 * This works because TBs are allocated using a 64 byte (or
1285 * higher) alignment from page aligned chunks of memory, so the lower
1286 * 6 bits of the address will always be zero.
1287 * See IEMTBCACHE_PTR_COUNT_MASK, IEMTBCACHE_PTR_MAKE and friends.
1288 */
1289 RT_FLEXIBLE_ARRAY_EXTENSION
1290 PIEMTB apHash[RT_FLEXIBLE_ARRAY];
1291} IEMTBCACHE;
1292/** Pointer to a per-CPU translation block cahce. */
1293typedef IEMTBCACHE *PIEMTBCACHE;
1294
1295/** Magic value for IEMTBCACHE (Johnny O'Neal). */
1296#define IEMTBCACHE_MAGIC UINT32_C(0x19561010)
1297
1298/** The collision count mask for IEMTBCACHE::apHash entries. */
1299#define IEMTBCACHE_PTR_COUNT_MASK ((uintptr_t)0x3f)
1300/** The max collision count for IEMTBCACHE::apHash entries before pruning. */
1301#define IEMTBCACHE_PTR_MAX_COUNT ((uintptr_t)0x30)
1302/** Combine a TB pointer and a collision list length into a value for an
1303 * IEMTBCACHE::apHash entry. */
1304#define IEMTBCACHE_PTR_MAKE(a_pTb, a_cCount) (PIEMTB)((uintptr_t)(a_pTb) | (a_cCount))
1305/** Combine a TB pointer and a collision list length into a value for an
1306 * IEMTBCACHE::apHash entry. */
1307#define IEMTBCACHE_PTR_GET_TB(a_pHashEntry) (PIEMTB)((uintptr_t)(a_pHashEntry) & ~IEMTBCACHE_PTR_COUNT_MASK)
1308/** Combine a TB pointer and a collision list length into a value for an
1309 * IEMTBCACHE::apHash entry. */
1310#define IEMTBCACHE_PTR_GET_COUNT(a_pHashEntry) ((uintptr_t)(a_pHashEntry) & IEMTBCACHE_PTR_COUNT_MASK)
1311
1312/**
1313 * Calculates the hash table slot for a TB from physical PC address and TB flags.
1314 */
1315#define IEMTBCACHE_HASH(a_paCache, a_fTbFlags, a_GCPhysPc) \
1316 IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, (a_fTbFlags) & IEMTB_F_KEY_MASK, a_GCPhysPc)
1317
1318/**
1319 * Calculates the hash table slot for a TB from physical PC address and TB
1320 * flags, ASSUMING the caller has applied IEMTB_F_KEY_MASK to @a a_fTbFlags.
1321 */
1322#define IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, a_fTbFlags, a_GCPhysPc) \
1323 (((uint32_t)(a_GCPhysPc) ^ (a_fTbFlags)) & (a_paCache)->uHashMask)
1324
1325
1326/** @name IEMBRANCHED_F_XXX - Branched indicator (IEMCPU::fTbBranched).
1327 *
1328 * These flags parallels the main IEM_CIMPL_F_BRANCH_XXX flags.
1329 *
1330 * @{ */
1331/** Value if no branching happened recently. */
1332#define IEMBRANCHED_F_NO UINT8_C(0x00)
1333/** Flag set if direct branch, clear if absolute or indirect. */
1334#define IEMBRANCHED_F_DIRECT UINT8_C(0x01)
1335/** Flag set if indirect branch, clear if direct or relative. */
1336#define IEMBRANCHED_F_INDIRECT UINT8_C(0x02)
1337/** Flag set if relative branch, clear if absolute or indirect. */
1338#define IEMBRANCHED_F_RELATIVE UINT8_C(0x04)
1339/** Flag set if conditional branch, clear if unconditional. */
1340#define IEMBRANCHED_F_CONDITIONAL UINT8_C(0x08)
1341/** Flag set if it's a far branch. */
1342#define IEMBRANCHED_F_FAR UINT8_C(0x10)
1343/** Flag set if the stack pointer is modified. */
1344#define IEMBRANCHED_F_STACK UINT8_C(0x20)
1345/** Flag set if the stack pointer and (maybe) the stack segment are modified. */
1346#define IEMBRANCHED_F_STACK_FAR UINT8_C(0x40)
1347/** Flag set (by IEM_MC_REL_JMP_XXX) if it's a zero bytes relative jump. */
1348#define IEMBRANCHED_F_ZERO UINT8_C(0x80)
1349/** @} */
1350
1351
1352/**
1353 * The per-CPU IEM state.
1354 */
1355typedef struct IEMCPU
1356{
1357 /** Info status code that needs to be propagated to the IEM caller.
1358 * This cannot be passed internally, as it would complicate all success
1359 * checks within the interpreter making the code larger and almost impossible
1360 * to get right. Instead, we'll store status codes to pass on here. Each
1361 * source of these codes will perform appropriate sanity checks. */
1362 int32_t rcPassUp; /* 0x00 */
1363 /** Execution flag, IEM_F_XXX. */
1364 uint32_t fExec; /* 0x04 */
1365
1366 /** @name Decoder state.
1367 * @{ */
1368#ifdef IEM_WITH_CODE_TLB
1369 /** The offset of the next instruction byte. */
1370 uint32_t offInstrNextByte; /* 0x08 */
1371 /** The number of bytes available at pbInstrBuf for the current instruction.
1372 * This takes the max opcode length into account so that doesn't need to be
1373 * checked separately. */
1374 uint32_t cbInstrBuf; /* 0x0c */
1375 /** Pointer to the page containing RIP, user specified buffer or abOpcode.
1376 * This can be NULL if the page isn't mappable for some reason, in which
1377 * case we'll do fallback stuff.
1378 *
1379 * If we're executing an instruction from a user specified buffer,
1380 * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page
1381 * aligned pointer but pointer to the user data.
1382 *
1383 * For instructions crossing pages, this will start on the first page and be
1384 * advanced to the next page by the time we've decoded the instruction. This
1385 * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt>
1386 */
1387 uint8_t const *pbInstrBuf; /* 0x10 */
1388# if ARCH_BITS == 32
1389 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */
1390# endif
1391 /** The program counter corresponding to pbInstrBuf.
1392 * This is set to a non-canonical address when we need to invalidate it. */
1393 uint64_t uInstrBufPc; /* 0x18 */
1394 /** The guest physical address corresponding to pbInstrBuf. */
1395 RTGCPHYS GCPhysInstrBuf; /* 0x20 */
1396 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots).
1397 * This takes the CS segment limit into account.
1398 * @note Set to zero when the code TLB is flushed to trigger TLB reload. */
1399 uint16_t cbInstrBufTotal; /* 0x28 */
1400# ifndef IEM_WITH_OPAQUE_DECODER_STATE
1401 /** Offset into pbInstrBuf of the first byte of the current instruction.
1402 * Can be negative to efficiently handle cross page instructions. */
1403 int16_t offCurInstrStart; /* 0x2a */
1404
1405 /** The prefix mask (IEM_OP_PRF_XXX). */
1406 uint32_t fPrefixes; /* 0x2c */
1407 /** The extra REX ModR/M register field bit (REX.R << 3). */
1408 uint8_t uRexReg; /* 0x30 */
1409 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1410 * (REX.B << 3). */
1411 uint8_t uRexB; /* 0x31 */
1412 /** The extra REX SIB index field bit (REX.X << 3). */
1413 uint8_t uRexIndex; /* 0x32 */
1414
1415 /** The effective segment register (X86_SREG_XXX). */
1416 uint8_t iEffSeg; /* 0x33 */
1417
1418 /** The offset of the ModR/M byte relative to the start of the instruction. */
1419 uint8_t offModRm; /* 0x34 */
1420
1421# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
1422 /** The current offset into abOpcode. */
1423 uint8_t offOpcode; /* 0x35 */
1424# else
1425 uint8_t bUnused; /* 0x35 */
1426# endif
1427# else /* IEM_WITH_OPAQUE_DECODER_STATE */
1428 uint8_t abOpaqueDecoderPart1[0x36 - 0x2a];
1429# endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1430
1431#else /* !IEM_WITH_CODE_TLB */
1432# ifndef IEM_WITH_OPAQUE_DECODER_STATE
1433 /** The size of what has currently been fetched into abOpcode. */
1434 uint8_t cbOpcode; /* 0x08 */
1435 /** The current offset into abOpcode. */
1436 uint8_t offOpcode; /* 0x09 */
1437 /** The offset of the ModR/M byte relative to the start of the instruction. */
1438 uint8_t offModRm; /* 0x0a */
1439
1440 /** The effective segment register (X86_SREG_XXX). */
1441 uint8_t iEffSeg; /* 0x0b */
1442
1443 /** The prefix mask (IEM_OP_PRF_XXX). */
1444 uint32_t fPrefixes; /* 0x0c */
1445 /** The extra REX ModR/M register field bit (REX.R << 3). */
1446 uint8_t uRexReg; /* 0x10 */
1447 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
1448 * (REX.B << 3). */
1449 uint8_t uRexB; /* 0x11 */
1450 /** The extra REX SIB index field bit (REX.X << 3). */
1451 uint8_t uRexIndex; /* 0x12 */
1452
1453# else /* IEM_WITH_OPAQUE_DECODER_STATE */
1454 uint8_t abOpaqueDecoderPart1[0x13 - 0x08];
1455# endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1456#endif /* !IEM_WITH_CODE_TLB */
1457
1458#ifndef IEM_WITH_OPAQUE_DECODER_STATE
1459 /** The effective operand mode. */
1460 IEMMODE enmEffOpSize; /* 0x36, 0x13 */
1461 /** The default addressing mode. */
1462 IEMMODE enmDefAddrMode; /* 0x37, 0x14 */
1463 /** The effective addressing mode. */
1464 IEMMODE enmEffAddrMode; /* 0x38, 0x15 */
1465 /** The default operand mode. */
1466 IEMMODE enmDefOpSize; /* 0x39, 0x16 */
1467
1468 /** Prefix index (VEX.pp) for two byte and three byte tables. */
1469 uint8_t idxPrefix; /* 0x3a, 0x17 */
1470 /** 3rd VEX/EVEX/XOP register.
1471 * Please use IEM_GET_EFFECTIVE_VVVV to access. */
1472 uint8_t uVex3rdReg; /* 0x3b, 0x18 */
1473 /** The VEX/EVEX/XOP length field. */
1474 uint8_t uVexLength; /* 0x3c, 0x19 */
1475 /** Additional EVEX stuff. */
1476 uint8_t fEvexStuff; /* 0x3d, 0x1a */
1477
1478# ifndef IEM_WITH_CODE_TLB
1479 /** Explicit alignment padding. */
1480 uint8_t abAlignment2a[1]; /* 0x1b */
1481# endif
1482 /** The FPU opcode (FOP). */
1483 uint16_t uFpuOpcode; /* 0x3e, 0x1c */
1484# ifndef IEM_WITH_CODE_TLB
1485 /** Explicit alignment padding. */
1486 uint8_t abAlignment2b[2]; /* 0x1e */
1487# endif
1488
1489 /** The opcode bytes. */
1490 uint8_t abOpcode[15]; /* 0x40, 0x20 */
1491 /** Explicit alignment padding. */
1492# ifdef IEM_WITH_CODE_TLB
1493 //uint8_t abAlignment2c[0x4f - 0x4f]; /* 0x4f */
1494# else
1495 uint8_t abAlignment2c[0x4f - 0x2f]; /* 0x2f */
1496# endif
1497
1498#else /* IEM_WITH_OPAQUE_DECODER_STATE */
1499# ifdef IEM_WITH_CODE_TLB
1500 uint8_t abOpaqueDecoderPart2[0x4f - 0x36];
1501# else
1502 uint8_t abOpaqueDecoderPart2[0x4f - 0x13];
1503# endif
1504#endif /* IEM_WITH_OPAQUE_DECODER_STATE */
1505 /** @} */
1506
1507
1508 /** The number of active guest memory mappings. */
1509 uint8_t cActiveMappings; /* 0x4f, 0x4f */
1510
1511 /** Records for tracking guest memory mappings. */
1512 struct
1513 {
1514 /** The address of the mapped bytes. */
1515 R3R0PTRTYPE(void *) pv;
1516 /** The access flags (IEM_ACCESS_XXX).
1517 * IEM_ACCESS_INVALID if the entry is unused. */
1518 uint32_t fAccess;
1519#if HC_ARCH_BITS == 64
1520 uint32_t u32Alignment4; /**< Alignment padding. */
1521#endif
1522 } aMemMappings[3]; /* 0x50 LB 0x30 */
1523
1524 /** Locking records for the mapped memory. */
1525 union
1526 {
1527 PGMPAGEMAPLOCK Lock;
1528 uint64_t au64Padding[2];
1529 } aMemMappingLocks[3]; /* 0x80 LB 0x30 */
1530
1531 /** Bounce buffer info.
1532 * This runs in parallel to aMemMappings. */
1533 struct
1534 {
1535 /** The physical address of the first byte. */
1536 RTGCPHYS GCPhysFirst;
1537 /** The physical address of the second page. */
1538 RTGCPHYS GCPhysSecond;
1539 /** The number of bytes in the first page. */
1540 uint16_t cbFirst;
1541 /** The number of bytes in the second page. */
1542 uint16_t cbSecond;
1543 /** Whether it's unassigned memory. */
1544 bool fUnassigned;
1545 /** Explicit alignment padding. */
1546 bool afAlignment5[3];
1547 } aMemBbMappings[3]; /* 0xb0 LB 0x48 */
1548
1549 /** The flags of the current exception / interrupt. */
1550 uint32_t fCurXcpt; /* 0xf8 */
1551 /** The current exception / interrupt. */
1552 uint8_t uCurXcpt; /* 0xfc */
1553 /** Exception / interrupt recursion depth. */
1554 int8_t cXcptRecursions; /* 0xfb */
1555
1556 /** The next unused mapping index.
1557 * @todo try find room for this up with cActiveMappings. */
1558 uint8_t iNextMapping; /* 0xfd */
1559 uint8_t abAlignment7[1];
1560
1561 /** Bounce buffer storage.
1562 * This runs in parallel to aMemMappings and aMemBbMappings. */
1563 struct
1564 {
1565 uint8_t ab[512];
1566 } aBounceBuffers[3]; /* 0x100 LB 0x600 */
1567
1568
1569 /** Pointer set jump buffer - ring-3 context. */
1570 R3PTRTYPE(jmp_buf *) pJmpBufR3;
1571 /** Pointer set jump buffer - ring-0 context. */
1572 R0PTRTYPE(jmp_buf *) pJmpBufR0;
1573
1574 /** @todo Should move this near @a fCurXcpt later. */
1575 /** The CR2 for the current exception / interrupt. */
1576 uint64_t uCurXcptCr2;
1577 /** The error code for the current exception / interrupt. */
1578 uint32_t uCurXcptErr;
1579
1580 /** @name Statistics
1581 * @{ */
1582 /** The number of instructions we've executed. */
1583 uint32_t cInstructions;
1584 /** The number of potential exits. */
1585 uint32_t cPotentialExits;
1586 /** The number of bytes data or stack written (mostly for IEMExecOneEx).
1587 * This may contain uncommitted writes. */
1588 uint32_t cbWritten;
1589 /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
1590 uint32_t cRetInstrNotImplemented;
1591 /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
1592 uint32_t cRetAspectNotImplemented;
1593 /** Counts informational statuses returned (other than VINF_SUCCESS). */
1594 uint32_t cRetInfStatuses;
1595 /** Counts other error statuses returned. */
1596 uint32_t cRetErrStatuses;
1597 /** Number of times rcPassUp has been used. */
1598 uint32_t cRetPassUpStatus;
1599 /** Number of times RZ left with instruction commit pending for ring-3. */
1600 uint32_t cPendingCommit;
1601 /** Number of long jumps. */
1602 uint32_t cLongJumps;
1603 /** @} */
1604
1605 /** @name Target CPU information.
1606 * @{ */
1607#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1608 /** The target CPU. */
1609 uint8_t uTargetCpu;
1610#else
1611 uint8_t bTargetCpuPadding;
1612#endif
1613 /** For selecting assembly works matching the target CPU EFLAGS behaviour, see
1614 * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no
1615 * native host support and the 2nd for when there is.
1616 *
1617 * The two values are typically indexed by a g_CpumHostFeatures bit.
1618 *
1619 * This is for instance used for the BSF & BSR instructions where AMD and
1620 * Intel CPUs produce different EFLAGS. */
1621 uint8_t aidxTargetCpuEflFlavour[2];
1622
1623 /** The CPU vendor. */
1624 CPUMCPUVENDOR enmCpuVendor;
1625 /** @} */
1626
1627 /** @name Host CPU information.
1628 * @{ */
1629 /** The CPU vendor. */
1630 CPUMCPUVENDOR enmHostCpuVendor;
1631 /** @} */
1632
1633 /** Counts RDMSR \#GP(0) LogRel(). */
1634 uint8_t cLogRelRdMsr;
1635 /** Counts WRMSR \#GP(0) LogRel(). */
1636 uint8_t cLogRelWrMsr;
1637 /** Alignment padding. */
1638 uint8_t abAlignment9[46];
1639
1640 /** @name Recompilation
1641 * @{ */
1642 /** Pointer to the current translation block.
1643 * This can either be one being executed or one being compiled. */
1644 R3PTRTYPE(PIEMTB) pCurTbR3;
1645 /** Fixed TB used for threaded recompilation.
1646 * This is allocated once with maxed-out sizes and re-used afterwards. */
1647 R3PTRTYPE(PIEMTB) pThrdCompileTbR3;
1648 /** Pointer to the ring-3 TB cache for this EMT. */
1649 R3PTRTYPE(PIEMTBCACHE) pTbCacheR3;
1650 /** The PC (RIP) at the start of pCurTbR3/pCurTbR0.
1651 * The TBs are based on physical addresses, so this is needed to correleated
1652 * RIP to opcode bytes stored in the TB (AMD-V / VT-x). */
1653 uint64_t uCurTbStartPc;
1654 /** Number of threaded TBs executed. */
1655 uint64_t cTbExecThreaded;
1656 /** Number of native TBs executed. */
1657 uint64_t cTbExecNative;
1658 /** Whether we need to check the opcode bytes for the current instruction.
1659 * This is set by a previous instruction if it modified memory or similar. */
1660 bool fTbCheckOpcodes;
1661 /** Indicates whether and how we just branched - IEMBRANCHED_F_XXX. */
1662 uint8_t fTbBranched;
1663 /** Set when GCPhysInstrBuf is updated because of a page crossing. */
1664 bool fTbCrossedPage;
1665 /** Whether to end the current TB. */
1666 bool fEndTb;
1667 /** Number of instructions before we need emit an IRQ check call again.
1668 * This helps making sure we don't execute too long w/o checking for
1669 * interrupts and immediately following instructions that may enable
1670 * interrupts (e.g. POPF, IRET, STI). With STI an additional hack is
1671 * required to make sure we check following the next instruction as well, see
1672 * fTbCurInstrIsSti. */
1673 uint8_t cInstrTillIrqCheck;
1674 /** Indicates that the current instruction is an STI. This is set by the
1675 * iemCImpl_sti code and subsequently cleared by the recompiler. */
1676 bool fTbCurInstrIsSti;
1677 /** The size of the IEMTB::pabOpcodes allocation in pThrdCompileTbR3. */
1678 uint16_t cbOpcodesAllocated;
1679 /** The current instruction number in a native TB.
1680 * This is set by code that may trigger an unexpected TB exit (throw/longjmp)
1681 * and will be picked up by the TB execution loop. Only used when
1682 * IEMNATIVE_WITH_INSTRUCTION_COUNTING is defined. */
1683 uint8_t idxTbCurInstr;
1684 /** Spaced reserved for recompiler data / alignment. */
1685 bool afRecompilerStuff1[3];
1686 /** The virtual sync time at the last timer poll call. */
1687 uint32_t msRecompilerPollNow;
1688 /** The IEM_CIMPL_F_XXX mask for the current instruction. */
1689 uint32_t fTbCurInstr;
1690 /** The IEM_CIMPL_F_XXX mask for the previous instruction. */
1691 uint32_t fTbPrevInstr;
1692 /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set. */
1693 RTGCPHYS GCPhysInstrBufPrev;
1694 /** Copy of IEMCPU::GCPhysInstrBuf after decoding a branch instruction.
1695 * This is used together with fTbBranched and GCVirtTbBranchSrcBuf to determin
1696 * whether a branch instruction jumps to a new page or stays within the
1697 * current one. */
1698 RTGCPHYS GCPhysTbBranchSrcBuf;
1699 /** Copy of IEMCPU::uInstrBufPc after decoding a branch instruction. */
1700 uint64_t GCVirtTbBranchSrcBuf;
1701 /** Pointer to the ring-3 TB allocator for this EMT. */
1702 R3PTRTYPE(PIEMTBALLOCATOR) pTbAllocatorR3;
1703 /** Pointer to the ring-3 executable memory allocator for this EMT. */
1704 R3PTRTYPE(struct IEMEXECMEMALLOCATOR *) pExecMemAllocatorR3;
1705 /** Pointer to the native recompiler state for ring-3. */
1706 R3PTRTYPE(struct IEMRECOMPILERSTATE *) pNativeRecompilerStateR3;
1707 /** Alignment padding. */
1708 uint64_t auAlignment10[3];
1709 /** Statistics: Times TB execution was broken off before reaching the end. */
1710 STAMCOUNTER StatTbExecBreaks;
1711 /** Statistics: Times BltIn_CheckIrq breaks out of the TB. */
1712 STAMCOUNTER StatCheckIrqBreaks;
1713 /** Statistics: Times BltIn_CheckMode breaks out of the TB. */
1714 STAMCOUNTER StatCheckModeBreaks;
1715 /** Statistics: Times a post jump target check missed and had to find new TB. */
1716 STAMCOUNTER StatCheckBranchMisses;
1717 /** Statistics: Times a jump or page crossing required a TB with CS.LIM checking. */
1718 STAMCOUNTER StatCheckNeedCsLimChecking;
1719 /** Native TB statistics: Number of fully recompiled TBs. */
1720 STAMCOUNTER StatNativeFullyRecompiledTbs;
1721 /** Threaded TB statistics: Number of instructions per TB. */
1722 STAMPROFILE StatTbThreadedInstr;
1723 /** Threaded TB statistics: Number of calls per TB. */
1724 STAMPROFILE StatTbThreadedCalls;
1725 /** Native TB statistics: Native code size per TB. */
1726 STAMPROFILE StatTbNativeCode;
1727 /** Native TB statistics: Profiling native recompilation. */
1728 STAMPROFILE StatNativeRecompilation;
1729 /** Native TB statistics: Number of calls per TB that were recompiled properly. */
1730 STAMPROFILE StatNativeCallsRecompiled;
1731 /** Native TB statistics: Number of threaded calls per TB that weren't recompiled. */
1732 STAMPROFILE StatNativeCallsThreaded;
1733 /** @} */
1734
1735 /** Data TLB.
1736 * @remarks Must be 64-byte aligned. */
1737 IEMTLB DataTlb;
1738 /** Instruction TLB.
1739 * @remarks Must be 64-byte aligned. */
1740 IEMTLB CodeTlb;
1741
1742 /** Exception statistics. */
1743 STAMCOUNTER aStatXcpts[32];
1744 /** Interrupt statistics. */
1745 uint32_t aStatInts[256];
1746
1747#if defined(VBOX_WITH_STATISTICS) && !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
1748 /** Instruction statistics for ring-0/raw-mode. */
1749 IEMINSTRSTATS StatsRZ;
1750 /** Instruction statistics for ring-3. */
1751 IEMINSTRSTATS StatsR3;
1752#endif
1753} IEMCPU;
1754AssertCompileMemberOffset(IEMCPU, cActiveMappings, 0x4f);
1755AssertCompileMemberAlignment(IEMCPU, aMemMappings, 16);
1756AssertCompileMemberAlignment(IEMCPU, aMemMappingLocks, 16);
1757AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64);
1758AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
1759AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
1760
1761/** Pointer to the per-CPU IEM state. */
1762typedef IEMCPU *PIEMCPU;
1763/** Pointer to the const per-CPU IEM state. */
1764typedef IEMCPU const *PCIEMCPU;
1765
1766
1767/** @def IEM_GET_CTX
1768 * Gets the guest CPU context for the calling EMT.
1769 * @returns PCPUMCTX
1770 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1771 */
1772#define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
1773
1774/** @def IEM_CTX_ASSERT
1775 * Asserts that the @a a_fExtrnMbz is present in the CPU context.
1776 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1777 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
1778 */
1779#define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) \
1780 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
1781 ("fExtrn=%#RX64 & fExtrnMbz=%#RX64 -> %#RX64\n", \
1782 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz), (a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz) ))
1783
1784/** @def IEM_CTX_IMPORT_RET
1785 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
1786 *
1787 * Will call the keep to import the bits as needed.
1788 *
1789 * Returns on import failure.
1790 *
1791 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1792 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
1793 */
1794#define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \
1795 do { \
1796 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1797 { /* likely */ } \
1798 else \
1799 { \
1800 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1801 AssertRCReturn(rcCtxImport, rcCtxImport); \
1802 } \
1803 } while (0)
1804
1805/** @def IEM_CTX_IMPORT_NORET
1806 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
1807 *
1808 * Will call the keep to import the bits as needed.
1809 *
1810 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1811 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
1812 */
1813#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \
1814 do { \
1815 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1816 { /* likely */ } \
1817 else \
1818 { \
1819 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1820 AssertLogRelRC(rcCtxImport); \
1821 } \
1822 } while (0)
1823
1824/** @def IEM_CTX_IMPORT_JMP
1825 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
1826 *
1827 * Will call the keep to import the bits as needed.
1828 *
1829 * Jumps on import failure.
1830 *
1831 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1832 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
1833 */
1834#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \
1835 do { \
1836 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
1837 { /* likely */ } \
1838 else \
1839 { \
1840 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
1841 AssertRCStmt(rcCtxImport, IEM_DO_LONGJMP(pVCpu, rcCtxImport)); \
1842 } \
1843 } while (0)
1844
1845
1846
1847/** @def IEM_GET_TARGET_CPU
1848 * Gets the current IEMTARGETCPU value.
1849 * @returns IEMTARGETCPU value.
1850 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
1851 */
1852#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
1853# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
1854#else
1855# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
1856#endif
1857
1858/** @def IEM_GET_INSTR_LEN
1859 * Gets the instruction length. */
1860#ifdef IEM_WITH_CODE_TLB
1861# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(int32_t)(a_pVCpu)->iem.s.offCurInstrStart)
1862#else
1863# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offOpcode)
1864#endif
1865
1866/** @def IEM_TRY_SETJMP
1867 * Wrapper around setjmp / try, hiding all the ugly differences.
1868 *
1869 * @note Use with extreme care as this is a fragile macro.
1870 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1871 * @param a_rcTarget The variable that should receive the status code in case
1872 * of a longjmp/throw.
1873 */
1874/** @def IEM_TRY_SETJMP_AGAIN
1875 * For when setjmp / try is used again in the same variable scope as a previous
1876 * IEM_TRY_SETJMP invocation.
1877 */
1878/** @def IEM_CATCH_LONGJMP_BEGIN
1879 * Start wrapper for catch / setjmp-else.
1880 *
1881 * This will set up a scope.
1882 *
1883 * @note Use with extreme care as this is a fragile macro.
1884 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1885 * @param a_rcTarget The variable that should receive the status code in case
1886 * of a longjmp/throw.
1887 */
1888/** @def IEM_CATCH_LONGJMP_END
1889 * End wrapper for catch / setjmp-else.
1890 *
1891 * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
1892 * state.
1893 *
1894 * @note Use with extreme care as this is a fragile macro.
1895 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
1896 */
1897#if defined(IEM_WITH_SETJMP) || defined(DOXYGEN_RUNNING)
1898# ifdef IEM_WITH_THROW_CATCH
1899# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
1900 a_rcTarget = VINF_SUCCESS; \
1901 try
1902# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
1903 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
1904# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
1905 catch (int rcThrown) \
1906 { \
1907 a_rcTarget = rcThrown
1908# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
1909 } \
1910 ((void)0)
1911# else /* !IEM_WITH_THROW_CATCH */
1912# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
1913 jmp_buf JmpBuf; \
1914 jmp_buf * volatile pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
1915 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
1916 if ((rcStrict = setjmp(JmpBuf)) == 0)
1917# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
1918 pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
1919 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
1920 if ((rcStrict = setjmp(JmpBuf)) == 0)
1921# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
1922 else \
1923 { \
1924 ((void)0)
1925# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
1926 } \
1927 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
1928# endif /* !IEM_WITH_THROW_CATCH */
1929#endif /* IEM_WITH_SETJMP */
1930
1931
1932/**
1933 * Shared per-VM IEM data.
1934 */
1935typedef struct IEM
1936{
1937 /** The VMX APIC-access page handler type. */
1938 PGMPHYSHANDLERTYPE hVmxApicAccessPage;
1939#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
1940 /** Set if the CPUID host call functionality is enabled. */
1941 bool fCpuIdHostCall;
1942#endif
1943} IEM;
1944
1945
1946
1947/** @name IEM_ACCESS_XXX - Access details.
1948 * @{ */
1949#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
1950#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
1951#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
1952#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
1953#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
1954#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
1955#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
1956#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
1957#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
1958#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
1959/** The writes are partial, so if initialize the bounce buffer with the
1960 * orignal RAM content. */
1961#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
1962/** Used in aMemMappings to indicate that the entry is bounce buffered. */
1963#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
1964/** Bounce buffer with ring-3 write pending, first page. */
1965#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
1966/** Bounce buffer with ring-3 write pending, second page. */
1967#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
1968/** Not locked, accessed via the TLB. */
1969#define IEM_ACCESS_NOT_LOCKED UINT32_C(0x00001000)
1970/** Valid bit mask. */
1971#define IEM_ACCESS_VALID_MASK UINT32_C(0x00001fff)
1972/** Shift count for the TLB flags (upper word). */
1973#define IEM_ACCESS_SHIFT_TLB_FLAGS 16
1974
1975/** Read+write data alias. */
1976#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
1977/** Write data alias. */
1978#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
1979/** Read data alias. */
1980#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
1981/** Instruction fetch alias. */
1982#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
1983/** Stack write alias. */
1984#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
1985/** Stack read alias. */
1986#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
1987/** Stack read+write alias. */
1988#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
1989/** Read system table alias. */
1990#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
1991/** Read+write system table alias. */
1992#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
1993/** @} */
1994
1995/** @name Prefix constants (IEMCPU::fPrefixes)
1996 * @{ */
1997#define IEM_OP_PRF_SEG_CS RT_BIT_32(0) /**< CS segment prefix (0x2e). */
1998#define IEM_OP_PRF_SEG_SS RT_BIT_32(1) /**< SS segment prefix (0x36). */
1999#define IEM_OP_PRF_SEG_DS RT_BIT_32(2) /**< DS segment prefix (0x3e). */
2000#define IEM_OP_PRF_SEG_ES RT_BIT_32(3) /**< ES segment prefix (0x26). */
2001#define IEM_OP_PRF_SEG_FS RT_BIT_32(4) /**< FS segment prefix (0x64). */
2002#define IEM_OP_PRF_SEG_GS RT_BIT_32(5) /**< GS segment prefix (0x65). */
2003#define IEM_OP_PRF_SEG_MASK UINT32_C(0x3f)
2004
2005#define IEM_OP_PRF_SIZE_OP RT_BIT_32(8) /**< Operand size prefix (0x66). */
2006#define IEM_OP_PRF_SIZE_REX_W RT_BIT_32(9) /**< REX.W prefix (0x48-0x4f). */
2007#define IEM_OP_PRF_SIZE_ADDR RT_BIT_32(10) /**< Address size prefix (0x67). */
2008
2009#define IEM_OP_PRF_LOCK RT_BIT_32(16) /**< Lock prefix (0xf0). */
2010#define IEM_OP_PRF_REPNZ RT_BIT_32(17) /**< Repeat-not-zero prefix (0xf2). */
2011#define IEM_OP_PRF_REPZ RT_BIT_32(18) /**< Repeat-if-zero prefix (0xf3). */
2012
2013#define IEM_OP_PRF_REX RT_BIT_32(24) /**< Any REX prefix (0x40-0x4f). */
2014#define IEM_OP_PRF_REX_R RT_BIT_32(25) /**< REX.R prefix (0x44,0x45,0x46,0x47,0x4c,0x4d,0x4e,0x4f). */
2015#define IEM_OP_PRF_REX_B RT_BIT_32(26) /**< REX.B prefix (0x41,0x43,0x45,0x47,0x49,0x4b,0x4d,0x4f). */
2016#define IEM_OP_PRF_REX_X RT_BIT_32(27) /**< REX.X prefix (0x42,0x43,0x46,0x47,0x4a,0x4b,0x4e,0x4f). */
2017/** Mask with all the REX prefix flags.
2018 * This is generally for use when needing to undo the REX prefixes when they
2019 * are followed legacy prefixes and therefore does not immediately preceed
2020 * the first opcode byte.
2021 * For testing whether any REX prefix is present, use IEM_OP_PRF_REX instead. */
2022#define IEM_OP_PRF_REX_MASK (IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W )
2023
2024#define IEM_OP_PRF_VEX RT_BIT_32(28) /**< Indiciates VEX prefix. */
2025#define IEM_OP_PRF_EVEX RT_BIT_32(29) /**< Indiciates EVEX prefix. */
2026#define IEM_OP_PRF_XOP RT_BIT_32(30) /**< Indiciates XOP prefix. */
2027/** @} */
2028
2029/** @name IEMOPFORM_XXX - Opcode forms
2030 * @note These are ORed together with IEMOPHINT_XXX.
2031 * @{ */
2032/** ModR/M: reg, r/m */
2033#define IEMOPFORM_RM 0
2034/** ModR/M: reg, r/m (register) */
2035#define IEMOPFORM_RM_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
2036/** ModR/M: reg, r/m (memory) */
2037#define IEMOPFORM_RM_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
2038/** ModR/M: reg, r/m */
2039#define IEMOPFORM_RMI 1
2040/** ModR/M: reg, r/m (register) */
2041#define IEMOPFORM_RMI_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
2042/** ModR/M: reg, r/m (memory) */
2043#define IEMOPFORM_RMI_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
2044/** ModR/M: r/m, reg */
2045#define IEMOPFORM_MR 2
2046/** ModR/M: r/m (register), reg */
2047#define IEMOPFORM_MR_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
2048/** ModR/M: r/m (memory), reg */
2049#define IEMOPFORM_MR_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
2050/** ModR/M: r/m, reg */
2051#define IEMOPFORM_MRI 3
2052/** ModR/M: r/m (register), reg */
2053#define IEMOPFORM_MRI_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
2054/** ModR/M: r/m (memory), reg */
2055#define IEMOPFORM_MRI_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
2056/** ModR/M: r/m only */
2057#define IEMOPFORM_M 4
2058/** ModR/M: r/m only (register). */
2059#define IEMOPFORM_M_REG (IEMOPFORM_M | IEMOPFORM_MOD3)
2060/** ModR/M: r/m only (memory). */
2061#define IEMOPFORM_M_MEM (IEMOPFORM_M | IEMOPFORM_NOT_MOD3)
2062/** ModR/M: reg only */
2063#define IEMOPFORM_R 5
2064
2065/** VEX+ModR/M: reg, r/m */
2066#define IEMOPFORM_VEX_RM 8
2067/** VEX+ModR/M: reg, r/m (register) */
2068#define IEMOPFORM_VEX_RM_REG (IEMOPFORM_VEX_RM | IEMOPFORM_MOD3)
2069/** VEX+ModR/M: reg, r/m (memory) */
2070#define IEMOPFORM_VEX_RM_MEM (IEMOPFORM_VEX_RM | IEMOPFORM_NOT_MOD3)
2071/** VEX+ModR/M: r/m, reg */
2072#define IEMOPFORM_VEX_MR 9
2073/** VEX+ModR/M: r/m (register), reg */
2074#define IEMOPFORM_VEX_MR_REG (IEMOPFORM_VEX_MR | IEMOPFORM_MOD3)
2075/** VEX+ModR/M: r/m (memory), reg */
2076#define IEMOPFORM_VEX_MR_MEM (IEMOPFORM_VEX_MR | IEMOPFORM_NOT_MOD3)
2077/** VEX+ModR/M: r/m only */
2078#define IEMOPFORM_VEX_M 10
2079/** VEX+ModR/M: r/m only (register). */
2080#define IEMOPFORM_VEX_M_REG (IEMOPFORM_VEX_M | IEMOPFORM_MOD3)
2081/** VEX+ModR/M: r/m only (memory). */
2082#define IEMOPFORM_VEX_M_MEM (IEMOPFORM_VEX_M | IEMOPFORM_NOT_MOD3)
2083/** VEX+ModR/M: reg only */
2084#define IEMOPFORM_VEX_R 11
2085/** VEX+ModR/M: reg, vvvv, r/m */
2086#define IEMOPFORM_VEX_RVM 12
2087/** VEX+ModR/M: reg, vvvv, r/m (register). */
2088#define IEMOPFORM_VEX_RVM_REG (IEMOPFORM_VEX_RVM | IEMOPFORM_MOD3)
2089/** VEX+ModR/M: reg, vvvv, r/m (memory). */
2090#define IEMOPFORM_VEX_RVM_MEM (IEMOPFORM_VEX_RVM | IEMOPFORM_NOT_MOD3)
2091/** VEX+ModR/M: reg, r/m, vvvv */
2092#define IEMOPFORM_VEX_RMV 13
2093/** VEX+ModR/M: reg, r/m, vvvv (register). */
2094#define IEMOPFORM_VEX_RMV_REG (IEMOPFORM_VEX_RMV | IEMOPFORM_MOD3)
2095/** VEX+ModR/M: reg, r/m, vvvv (memory). */
2096#define IEMOPFORM_VEX_RMV_MEM (IEMOPFORM_VEX_RMV | IEMOPFORM_NOT_MOD3)
2097/** VEX+ModR/M: reg, r/m, imm8 */
2098#define IEMOPFORM_VEX_RMI 14
2099/** VEX+ModR/M: reg, r/m, imm8 (register). */
2100#define IEMOPFORM_VEX_RMI_REG (IEMOPFORM_VEX_RMI | IEMOPFORM_MOD3)
2101/** VEX+ModR/M: reg, r/m, imm8 (memory). */
2102#define IEMOPFORM_VEX_RMI_MEM (IEMOPFORM_VEX_RMI | IEMOPFORM_NOT_MOD3)
2103/** VEX+ModR/M: r/m, vvvv, reg */
2104#define IEMOPFORM_VEX_MVR 15
2105/** VEX+ModR/M: r/m, vvvv, reg (register) */
2106#define IEMOPFORM_VEX_MVR_REG (IEMOPFORM_VEX_MVR | IEMOPFORM_MOD3)
2107/** VEX+ModR/M: r/m, vvvv, reg (memory) */
2108#define IEMOPFORM_VEX_MVR_MEM (IEMOPFORM_VEX_MVR | IEMOPFORM_NOT_MOD3)
2109/** VEX+ModR/M+/n: vvvv, r/m */
2110#define IEMOPFORM_VEX_VM 16
2111/** VEX+ModR/M+/n: vvvv, r/m (register) */
2112#define IEMOPFORM_VEX_VM_REG (IEMOPFORM_VEX_VM | IEMOPFORM_MOD3)
2113/** VEX+ModR/M+/n: vvvv, r/m (memory) */
2114#define IEMOPFORM_VEX_VM_MEM (IEMOPFORM_VEX_VM | IEMOPFORM_NOT_MOD3)
2115
2116/** Fixed register instruction, no R/M. */
2117#define IEMOPFORM_FIXED 32
2118
2119/** The r/m is a register. */
2120#define IEMOPFORM_MOD3 RT_BIT_32(8)
2121/** The r/m is a memory access. */
2122#define IEMOPFORM_NOT_MOD3 RT_BIT_32(9)
2123/** @} */
2124
2125/** @name IEMOPHINT_XXX - Additional Opcode Hints
2126 * @note These are ORed together with IEMOPFORM_XXX.
2127 * @{ */
2128/** Ignores the operand size prefix (66h). */
2129#define IEMOPHINT_IGNORES_OZ_PFX RT_BIT_32(10)
2130/** Ignores REX.W (aka WIG). */
2131#define IEMOPHINT_IGNORES_REXW RT_BIT_32(11)
2132/** Both the operand size prefixes (66h + REX.W) are ignored. */
2133#define IEMOPHINT_IGNORES_OP_SIZES (IEMOPHINT_IGNORES_OZ_PFX | IEMOPHINT_IGNORES_REXW)
2134/** Allowed with the lock prefix. */
2135#define IEMOPHINT_LOCK_ALLOWED RT_BIT_32(11)
2136/** The VEX.L value is ignored (aka LIG). */
2137#define IEMOPHINT_VEX_L_IGNORED RT_BIT_32(12)
2138/** The VEX.L value must be zero (i.e. 128-bit width only). */
2139#define IEMOPHINT_VEX_L_ZERO RT_BIT_32(13)
2140/** The VEX.V value must be zero. */
2141#define IEMOPHINT_VEX_V_ZERO RT_BIT_32(14)
2142
2143/** Hint to IEMAllInstructionPython.py that this macro should be skipped. */
2144#define IEMOPHINT_SKIP_PYTHON RT_BIT_32(31)
2145/** @} */
2146
2147/**
2148 * Possible hardware task switch sources.
2149 */
2150typedef enum IEMTASKSWITCH
2151{
2152 /** Task switch caused by an interrupt/exception. */
2153 IEMTASKSWITCH_INT_XCPT = 1,
2154 /** Task switch caused by a far CALL. */
2155 IEMTASKSWITCH_CALL,
2156 /** Task switch caused by a far JMP. */
2157 IEMTASKSWITCH_JUMP,
2158 /** Task switch caused by an IRET. */
2159 IEMTASKSWITCH_IRET
2160} IEMTASKSWITCH;
2161AssertCompileSize(IEMTASKSWITCH, 4);
2162
2163/**
2164 * Possible CrX load (write) sources.
2165 */
2166typedef enum IEMACCESSCRX
2167{
2168 /** CrX access caused by 'mov crX' instruction. */
2169 IEMACCESSCRX_MOV_CRX,
2170 /** CrX (CR0) write caused by 'lmsw' instruction. */
2171 IEMACCESSCRX_LMSW,
2172 /** CrX (CR0) write caused by 'clts' instruction. */
2173 IEMACCESSCRX_CLTS,
2174 /** CrX (CR0) read caused by 'smsw' instruction. */
2175 IEMACCESSCRX_SMSW
2176} IEMACCESSCRX;
2177
2178#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2179/** @name IEM_SLAT_FAIL_XXX - Second-level address translation failure information.
2180 *
2181 * These flags provide further context to SLAT page-walk failures that could not be
2182 * determined by PGM (e.g, PGM is not privy to memory access permissions).
2183 *
2184 * @{
2185 */
2186/** Translating a nested-guest linear address failed accessing a nested-guest
2187 * physical address. */
2188# define IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR RT_BIT_32(0)
2189/** Translating a nested-guest linear address failed accessing a
2190 * paging-structure entry or updating accessed/dirty bits. */
2191# define IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE RT_BIT_32(1)
2192/** @} */
2193
2194DECLCALLBACK(FNPGMPHYSHANDLER) iemVmxApicAccessPageHandler;
2195# ifndef IN_RING3
2196DECLCALLBACK(FNPGMRZPHYSPFHANDLER) iemVmxApicAccessPagePfHandler;
2197# endif
2198#endif
2199
2200/**
2201 * Indicates to the verifier that the given flag set is undefined.
2202 *
2203 * Can be invoked again to add more flags.
2204 *
2205 * This is a NOOP if the verifier isn't compiled in.
2206 *
2207 * @note We're temporarily keeping this until code is converted to new
2208 * disassembler style opcode handling.
2209 */
2210#define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0)
2211
2212
2213/** @def IEM_DECL_IMPL_TYPE
2214 * For typedef'ing an instruction implementation function.
2215 *
2216 * @param a_RetType The return type.
2217 * @param a_Name The name of the type.
2218 * @param a_ArgList The argument list enclosed in parentheses.
2219 */
2220
2221/** @def IEM_DECL_IMPL_DEF
2222 * For defining an instruction implementation function.
2223 *
2224 * @param a_RetType The return type.
2225 * @param a_Name The name of the type.
2226 * @param a_ArgList The argument list enclosed in parentheses.
2227 */
2228
2229#if defined(__GNUC__) && defined(RT_ARCH_X86)
2230# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2231 __attribute__((__fastcall__)) a_RetType (a_Name) a_ArgList
2232# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2233 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2234# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2235 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
2236
2237#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
2238# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2239 a_RetType (__fastcall a_Name) a_ArgList
2240# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2241 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2242# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2243 a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
2244
2245#elif __cplusplus >= 201700 /* P0012R1 support */
2246# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2247 a_RetType (VBOXCALL a_Name) a_ArgList RT_NOEXCEPT
2248# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2249 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2250# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2251 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
2252
2253#else
2254# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
2255 a_RetType (VBOXCALL a_Name) a_ArgList
2256# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
2257 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2258# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
2259 DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
2260
2261#endif
2262
2263/** Defined in IEMAllAImplC.cpp but also used by IEMAllAImplA.asm. */
2264RT_C_DECLS_BEGIN
2265extern uint8_t const g_afParity[256];
2266RT_C_DECLS_END
2267
2268
2269/** @name Arithmetic assignment operations on bytes (binary).
2270 * @{ */
2271typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU8, (uint8_t *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
2272typedef FNIEMAIMPLBINU8 *PFNIEMAIMPLBINU8;
2273FNIEMAIMPLBINU8 iemAImpl_add_u8, iemAImpl_add_u8_locked;
2274FNIEMAIMPLBINU8 iemAImpl_adc_u8, iemAImpl_adc_u8_locked;
2275FNIEMAIMPLBINU8 iemAImpl_sub_u8, iemAImpl_sub_u8_locked;
2276FNIEMAIMPLBINU8 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked;
2277FNIEMAIMPLBINU8 iemAImpl_or_u8, iemAImpl_or_u8_locked;
2278FNIEMAIMPLBINU8 iemAImpl_xor_u8, iemAImpl_xor_u8_locked;
2279FNIEMAIMPLBINU8 iemAImpl_and_u8, iemAImpl_and_u8_locked;
2280/** @} */
2281
2282/** @name Arithmetic assignment operations on words (binary).
2283 * @{ */
2284typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU16, (uint16_t *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
2285typedef FNIEMAIMPLBINU16 *PFNIEMAIMPLBINU16;
2286FNIEMAIMPLBINU16 iemAImpl_add_u16, iemAImpl_add_u16_locked;
2287FNIEMAIMPLBINU16 iemAImpl_adc_u16, iemAImpl_adc_u16_locked;
2288FNIEMAIMPLBINU16 iemAImpl_sub_u16, iemAImpl_sub_u16_locked;
2289FNIEMAIMPLBINU16 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked;
2290FNIEMAIMPLBINU16 iemAImpl_or_u16, iemAImpl_or_u16_locked;
2291FNIEMAIMPLBINU16 iemAImpl_xor_u16, iemAImpl_xor_u16_locked;
2292FNIEMAIMPLBINU16 iemAImpl_and_u16, iemAImpl_and_u16_locked;
2293/** @} */
2294
2295/** @name Arithmetic assignment operations on double words (binary).
2296 * @{ */
2297typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU32, (uint32_t *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
2298typedef FNIEMAIMPLBINU32 *PFNIEMAIMPLBINU32;
2299FNIEMAIMPLBINU32 iemAImpl_add_u32, iemAImpl_add_u32_locked;
2300FNIEMAIMPLBINU32 iemAImpl_adc_u32, iemAImpl_adc_u32_locked;
2301FNIEMAIMPLBINU32 iemAImpl_sub_u32, iemAImpl_sub_u32_locked;
2302FNIEMAIMPLBINU32 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked;
2303FNIEMAIMPLBINU32 iemAImpl_or_u32, iemAImpl_or_u32_locked;
2304FNIEMAIMPLBINU32 iemAImpl_xor_u32, iemAImpl_xor_u32_locked;
2305FNIEMAIMPLBINU32 iemAImpl_and_u32, iemAImpl_and_u32_locked;
2306FNIEMAIMPLBINU32 iemAImpl_blsi_u32, iemAImpl_blsi_u32_fallback;
2307FNIEMAIMPLBINU32 iemAImpl_blsr_u32, iemAImpl_blsr_u32_fallback;
2308FNIEMAIMPLBINU32 iemAImpl_blsmsk_u32, iemAImpl_blsmsk_u32_fallback;
2309/** @} */
2310
2311/** @name Arithmetic assignment operations on quad words (binary).
2312 * @{ */
2313typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU64, (uint64_t *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
2314typedef FNIEMAIMPLBINU64 *PFNIEMAIMPLBINU64;
2315FNIEMAIMPLBINU64 iemAImpl_add_u64, iemAImpl_add_u64_locked;
2316FNIEMAIMPLBINU64 iemAImpl_adc_u64, iemAImpl_adc_u64_locked;
2317FNIEMAIMPLBINU64 iemAImpl_sub_u64, iemAImpl_sub_u64_locked;
2318FNIEMAIMPLBINU64 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked;
2319FNIEMAIMPLBINU64 iemAImpl_or_u64, iemAImpl_or_u64_locked;
2320FNIEMAIMPLBINU64 iemAImpl_xor_u64, iemAImpl_xor_u64_locked;
2321FNIEMAIMPLBINU64 iemAImpl_and_u64, iemAImpl_and_u64_locked;
2322FNIEMAIMPLBINU64 iemAImpl_blsi_u64, iemAImpl_blsi_u64_fallback;
2323FNIEMAIMPLBINU64 iemAImpl_blsr_u64, iemAImpl_blsr_u64_fallback;
2324FNIEMAIMPLBINU64 iemAImpl_blsmsk_u64, iemAImpl_blsmsk_u64_fallback;
2325/** @} */
2326
2327typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU8,(uint8_t const *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
2328typedef FNIEMAIMPLBINROU8 *PFNIEMAIMPLBINROU8;
2329typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU16,(uint16_t const *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
2330typedef FNIEMAIMPLBINROU16 *PFNIEMAIMPLBINROU16;
2331typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU32,(uint32_t const *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
2332typedef FNIEMAIMPLBINROU32 *PFNIEMAIMPLBINROU32;
2333typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINROU64,(uint64_t const *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
2334typedef FNIEMAIMPLBINROU64 *PFNIEMAIMPLBINROU64;
2335
2336/** @name Compare operations (thrown in with the binary ops).
2337 * @{ */
2338FNIEMAIMPLBINROU8 iemAImpl_cmp_u8;
2339FNIEMAIMPLBINROU16 iemAImpl_cmp_u16;
2340FNIEMAIMPLBINROU32 iemAImpl_cmp_u32;
2341FNIEMAIMPLBINROU64 iemAImpl_cmp_u64;
2342/** @} */
2343
2344/** @name Test operations (thrown in with the binary ops).
2345 * @{ */
2346FNIEMAIMPLBINROU8 iemAImpl_test_u8;
2347FNIEMAIMPLBINROU16 iemAImpl_test_u16;
2348FNIEMAIMPLBINROU32 iemAImpl_test_u32;
2349FNIEMAIMPLBINROU64 iemAImpl_test_u64;
2350/** @} */
2351
2352/** @name Bit operations operations (thrown in with the binary ops).
2353 * @{ */
2354FNIEMAIMPLBINROU16 iemAImpl_bt_u16;
2355FNIEMAIMPLBINROU32 iemAImpl_bt_u32;
2356FNIEMAIMPLBINROU64 iemAImpl_bt_u64;
2357FNIEMAIMPLBINU16 iemAImpl_btc_u16, iemAImpl_btc_u16_locked;
2358FNIEMAIMPLBINU32 iemAImpl_btc_u32, iemAImpl_btc_u32_locked;
2359FNIEMAIMPLBINU64 iemAImpl_btc_u64, iemAImpl_btc_u64_locked;
2360FNIEMAIMPLBINU16 iemAImpl_btr_u16, iemAImpl_btr_u16_locked;
2361FNIEMAIMPLBINU32 iemAImpl_btr_u32, iemAImpl_btr_u32_locked;
2362FNIEMAIMPLBINU64 iemAImpl_btr_u64, iemAImpl_btr_u64_locked;
2363FNIEMAIMPLBINU16 iemAImpl_bts_u16, iemAImpl_bts_u16_locked;
2364FNIEMAIMPLBINU32 iemAImpl_bts_u32, iemAImpl_bts_u32_locked;
2365FNIEMAIMPLBINU64 iemAImpl_bts_u64, iemAImpl_bts_u64_locked;
2366/** @} */
2367
2368/** @name Arithmetic three operand operations on double words (binary).
2369 * @{ */
2370typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2, uint32_t *pEFlags));
2371typedef FNIEMAIMPLBINVEXU32 *PFNIEMAIMPLBINVEXU32;
2372FNIEMAIMPLBINVEXU32 iemAImpl_andn_u32, iemAImpl_andn_u32_fallback;
2373FNIEMAIMPLBINVEXU32 iemAImpl_bextr_u32, iemAImpl_bextr_u32_fallback;
2374FNIEMAIMPLBINVEXU32 iemAImpl_bzhi_u32, iemAImpl_bzhi_u32_fallback;
2375/** @} */
2376
2377/** @name Arithmetic three operand operations on quad words (binary).
2378 * @{ */
2379typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2, uint32_t *pEFlags));
2380typedef FNIEMAIMPLBINVEXU64 *PFNIEMAIMPLBINVEXU64;
2381FNIEMAIMPLBINVEXU64 iemAImpl_andn_u64, iemAImpl_andn_u64_fallback;
2382FNIEMAIMPLBINVEXU64 iemAImpl_bextr_u64, iemAImpl_bextr_u64_fallback;
2383FNIEMAIMPLBINVEXU64 iemAImpl_bzhi_u64, iemAImpl_bzhi_u64_fallback;
2384/** @} */
2385
2386/** @name Arithmetic three operand operations on double words w/o EFLAGS (binary).
2387 * @{ */
2388typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32NOEFL, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2));
2389typedef FNIEMAIMPLBINVEXU32NOEFL *PFNIEMAIMPLBINVEXU32NOEFL;
2390FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pdep_u32, iemAImpl_pdep_u32_fallback;
2391FNIEMAIMPLBINVEXU32NOEFL iemAImpl_pext_u32, iemAImpl_pext_u32_fallback;
2392FNIEMAIMPLBINVEXU32NOEFL iemAImpl_sarx_u32, iemAImpl_sarx_u32_fallback;
2393FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shlx_u32, iemAImpl_shlx_u32_fallback;
2394FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shrx_u32, iemAImpl_shrx_u32_fallback;
2395FNIEMAIMPLBINVEXU32NOEFL iemAImpl_rorx_u32;
2396/** @} */
2397
2398/** @name Arithmetic three operand operations on quad words w/o EFLAGS (binary).
2399 * @{ */
2400typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64NOEFL, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2));
2401typedef FNIEMAIMPLBINVEXU64NOEFL *PFNIEMAIMPLBINVEXU64NOEFL;
2402FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pdep_u64, iemAImpl_pdep_u64_fallback;
2403FNIEMAIMPLBINVEXU64NOEFL iemAImpl_pext_u64, iemAImpl_pext_u64_fallback;
2404FNIEMAIMPLBINVEXU64NOEFL iemAImpl_sarx_u64, iemAImpl_sarx_u64_fallback;
2405FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shlx_u64, iemAImpl_shlx_u64_fallback;
2406FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shrx_u64, iemAImpl_shrx_u64_fallback;
2407FNIEMAIMPLBINVEXU64NOEFL iemAImpl_rorx_u64;
2408/** @} */
2409
2410/** @name MULX 32-bit and 64-bit.
2411 * @{ */
2412typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU32, (uint32_t *puDst1, uint32_t *puDst2, uint32_t uSrc1, uint32_t uSrc2));
2413typedef FNIEMAIMPLMULXVEXU32 *PFNIEMAIMPLMULXVEXU32;
2414FNIEMAIMPLMULXVEXU32 iemAImpl_mulx_u32, iemAImpl_mulx_u32_fallback;
2415
2416typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMULXVEXU64, (uint64_t *puDst1, uint64_t *puDst2, uint64_t uSrc1, uint64_t uSrc2));
2417typedef FNIEMAIMPLMULXVEXU64 *PFNIEMAIMPLMULXVEXU64;
2418FNIEMAIMPLMULXVEXU64 iemAImpl_mulx_u64, iemAImpl_mulx_u64_fallback;
2419/** @} */
2420
2421
2422/** @name Exchange memory with register operations.
2423 * @{ */
2424IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_locked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2425IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_locked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2426IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_locked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2427IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_locked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2428IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8_unlocked, (uint8_t *pu8Mem, uint8_t *pu8Reg));
2429IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16_unlocked,(uint16_t *pu16Mem, uint16_t *pu16Reg));
2430IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32_unlocked,(uint32_t *pu32Mem, uint32_t *pu32Reg));
2431IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64_unlocked,(uint64_t *pu64Mem, uint64_t *pu64Reg));
2432/** @} */
2433
2434/** @name Exchange and add operations.
2435 * @{ */
2436IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2437IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2438IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2439IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2440IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8_locked, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
2441IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16_locked,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
2442IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32_locked,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
2443IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64_locked,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
2444/** @} */
2445
2446/** @name Compare and exchange.
2447 * @{ */
2448IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2449IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8_locked, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
2450IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16, (uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2451IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16_locked,(uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
2452IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32, (uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2453IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32_locked,(uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
2454#if ARCH_BITS == 32
2455IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2456IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
2457#else
2458IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2459IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
2460#endif
2461IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2462 uint32_t *pEFlags));
2463IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b_locked,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
2464 uint32_t *pEFlags));
2465IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2466 uint32_t *pEFlags));
2467IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_locked,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
2468 uint32_t *pEFlags));
2469#ifndef RT_ARCH_ARM64
2470IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_fallback,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx,
2471 PRTUINT128U pu128RbxRcx, uint32_t *pEFlags));
2472#endif
2473/** @} */
2474
2475/** @name Memory ordering
2476 * @{ */
2477typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEMFENCE,(void));
2478typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE;
2479IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void));
2480IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void));
2481IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void));
2482#ifndef RT_ARCH_ARM64
2483IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void));
2484#endif
2485/** @} */
2486
2487/** @name Double precision shifts
2488 * @{ */
2489typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags));
2490typedef FNIEMAIMPLSHIFTDBLU16 *PFNIEMAIMPLSHIFTDBLU16;
2491typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU32,(uint32_t *pu32Dst, uint32_t u32Src, uint8_t cShift, uint32_t *pEFlags));
2492typedef FNIEMAIMPLSHIFTDBLU32 *PFNIEMAIMPLSHIFTDBLU32;
2493typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t cShift, uint32_t *pEFlags));
2494typedef FNIEMAIMPLSHIFTDBLU64 *PFNIEMAIMPLSHIFTDBLU64;
2495FNIEMAIMPLSHIFTDBLU16 iemAImpl_shld_u16, iemAImpl_shld_u16_amd, iemAImpl_shld_u16_intel;
2496FNIEMAIMPLSHIFTDBLU32 iemAImpl_shld_u32, iemAImpl_shld_u32_amd, iemAImpl_shld_u32_intel;
2497FNIEMAIMPLSHIFTDBLU64 iemAImpl_shld_u64, iemAImpl_shld_u64_amd, iemAImpl_shld_u64_intel;
2498FNIEMAIMPLSHIFTDBLU16 iemAImpl_shrd_u16, iemAImpl_shrd_u16_amd, iemAImpl_shrd_u16_intel;
2499FNIEMAIMPLSHIFTDBLU32 iemAImpl_shrd_u32, iemAImpl_shrd_u32_amd, iemAImpl_shrd_u32_intel;
2500FNIEMAIMPLSHIFTDBLU64 iemAImpl_shrd_u64, iemAImpl_shrd_u64_amd, iemAImpl_shrd_u64_intel;
2501/** @} */
2502
2503
2504/** @name Bit search operations (thrown in with the binary ops).
2505 * @{ */
2506FNIEMAIMPLBINU16 iemAImpl_bsf_u16, iemAImpl_bsf_u16_amd, iemAImpl_bsf_u16_intel;
2507FNIEMAIMPLBINU32 iemAImpl_bsf_u32, iemAImpl_bsf_u32_amd, iemAImpl_bsf_u32_intel;
2508FNIEMAIMPLBINU64 iemAImpl_bsf_u64, iemAImpl_bsf_u64_amd, iemAImpl_bsf_u64_intel;
2509FNIEMAIMPLBINU16 iemAImpl_bsr_u16, iemAImpl_bsr_u16_amd, iemAImpl_bsr_u16_intel;
2510FNIEMAIMPLBINU32 iemAImpl_bsr_u32, iemAImpl_bsr_u32_amd, iemAImpl_bsr_u32_intel;
2511FNIEMAIMPLBINU64 iemAImpl_bsr_u64, iemAImpl_bsr_u64_amd, iemAImpl_bsr_u64_intel;
2512FNIEMAIMPLBINU16 iemAImpl_lzcnt_u16, iemAImpl_lzcnt_u16_amd, iemAImpl_lzcnt_u16_intel;
2513FNIEMAIMPLBINU32 iemAImpl_lzcnt_u32, iemAImpl_lzcnt_u32_amd, iemAImpl_lzcnt_u32_intel;
2514FNIEMAIMPLBINU64 iemAImpl_lzcnt_u64, iemAImpl_lzcnt_u64_amd, iemAImpl_lzcnt_u64_intel;
2515FNIEMAIMPLBINU16 iemAImpl_tzcnt_u16, iemAImpl_tzcnt_u16_amd, iemAImpl_tzcnt_u16_intel;
2516FNIEMAIMPLBINU32 iemAImpl_tzcnt_u32, iemAImpl_tzcnt_u32_amd, iemAImpl_tzcnt_u32_intel;
2517FNIEMAIMPLBINU64 iemAImpl_tzcnt_u64, iemAImpl_tzcnt_u64_amd, iemAImpl_tzcnt_u64_intel;
2518FNIEMAIMPLBINU16 iemAImpl_popcnt_u16, iemAImpl_popcnt_u16_fallback;
2519FNIEMAIMPLBINU32 iemAImpl_popcnt_u32, iemAImpl_popcnt_u32_fallback;
2520FNIEMAIMPLBINU64 iemAImpl_popcnt_u64, iemAImpl_popcnt_u64_fallback;
2521/** @} */
2522
2523/** @name Signed multiplication operations (thrown in with the binary ops).
2524 * @{ */
2525FNIEMAIMPLBINU16 iemAImpl_imul_two_u16, iemAImpl_imul_two_u16_amd, iemAImpl_imul_two_u16_intel;
2526FNIEMAIMPLBINU32 iemAImpl_imul_two_u32, iemAImpl_imul_two_u32_amd, iemAImpl_imul_two_u32_intel;
2527FNIEMAIMPLBINU64 iemAImpl_imul_two_u64, iemAImpl_imul_two_u64_amd, iemAImpl_imul_two_u64_intel;
2528/** @} */
2529
2530/** @name Arithmetic assignment operations on bytes (unary).
2531 * @{ */
2532typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU8, (uint8_t *pu8Dst, uint32_t *pEFlags));
2533typedef FNIEMAIMPLUNARYU8 *PFNIEMAIMPLUNARYU8;
2534FNIEMAIMPLUNARYU8 iemAImpl_inc_u8, iemAImpl_inc_u8_locked;
2535FNIEMAIMPLUNARYU8 iemAImpl_dec_u8, iemAImpl_dec_u8_locked;
2536FNIEMAIMPLUNARYU8 iemAImpl_not_u8, iemAImpl_not_u8_locked;
2537FNIEMAIMPLUNARYU8 iemAImpl_neg_u8, iemAImpl_neg_u8_locked;
2538/** @} */
2539
2540/** @name Arithmetic assignment operations on words (unary).
2541 * @{ */
2542typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU16, (uint16_t *pu16Dst, uint32_t *pEFlags));
2543typedef FNIEMAIMPLUNARYU16 *PFNIEMAIMPLUNARYU16;
2544FNIEMAIMPLUNARYU16 iemAImpl_inc_u16, iemAImpl_inc_u16_locked;
2545FNIEMAIMPLUNARYU16 iemAImpl_dec_u16, iemAImpl_dec_u16_locked;
2546FNIEMAIMPLUNARYU16 iemAImpl_not_u16, iemAImpl_not_u16_locked;
2547FNIEMAIMPLUNARYU16 iemAImpl_neg_u16, iemAImpl_neg_u16_locked;
2548/** @} */
2549
2550/** @name Arithmetic assignment operations on double words (unary).
2551 * @{ */
2552typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU32, (uint32_t *pu32Dst, uint32_t *pEFlags));
2553typedef FNIEMAIMPLUNARYU32 *PFNIEMAIMPLUNARYU32;
2554FNIEMAIMPLUNARYU32 iemAImpl_inc_u32, iemAImpl_inc_u32_locked;
2555FNIEMAIMPLUNARYU32 iemAImpl_dec_u32, iemAImpl_dec_u32_locked;
2556FNIEMAIMPLUNARYU32 iemAImpl_not_u32, iemAImpl_not_u32_locked;
2557FNIEMAIMPLUNARYU32 iemAImpl_neg_u32, iemAImpl_neg_u32_locked;
2558/** @} */
2559
2560/** @name Arithmetic assignment operations on quad words (unary).
2561 * @{ */
2562typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU64, (uint64_t *pu64Dst, uint32_t *pEFlags));
2563typedef FNIEMAIMPLUNARYU64 *PFNIEMAIMPLUNARYU64;
2564FNIEMAIMPLUNARYU64 iemAImpl_inc_u64, iemAImpl_inc_u64_locked;
2565FNIEMAIMPLUNARYU64 iemAImpl_dec_u64, iemAImpl_dec_u64_locked;
2566FNIEMAIMPLUNARYU64 iemAImpl_not_u64, iemAImpl_not_u64_locked;
2567FNIEMAIMPLUNARYU64 iemAImpl_neg_u64, iemAImpl_neg_u64_locked;
2568/** @} */
2569
2570
2571/** @name Shift operations on bytes (Group 2).
2572 * @{ */
2573typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU8,(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags));
2574typedef FNIEMAIMPLSHIFTU8 *PFNIEMAIMPLSHIFTU8;
2575FNIEMAIMPLSHIFTU8 iemAImpl_rol_u8, iemAImpl_rol_u8_amd, iemAImpl_rol_u8_intel;
2576FNIEMAIMPLSHIFTU8 iemAImpl_ror_u8, iemAImpl_ror_u8_amd, iemAImpl_ror_u8_intel;
2577FNIEMAIMPLSHIFTU8 iemAImpl_rcl_u8, iemAImpl_rcl_u8_amd, iemAImpl_rcl_u8_intel;
2578FNIEMAIMPLSHIFTU8 iemAImpl_rcr_u8, iemAImpl_rcr_u8_amd, iemAImpl_rcr_u8_intel;
2579FNIEMAIMPLSHIFTU8 iemAImpl_shl_u8, iemAImpl_shl_u8_amd, iemAImpl_shl_u8_intel;
2580FNIEMAIMPLSHIFTU8 iemAImpl_shr_u8, iemAImpl_shr_u8_amd, iemAImpl_shr_u8_intel;
2581FNIEMAIMPLSHIFTU8 iemAImpl_sar_u8, iemAImpl_sar_u8_amd, iemAImpl_sar_u8_intel;
2582/** @} */
2583
2584/** @name Shift operations on words (Group 2).
2585 * @{ */
2586typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU16,(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags));
2587typedef FNIEMAIMPLSHIFTU16 *PFNIEMAIMPLSHIFTU16;
2588FNIEMAIMPLSHIFTU16 iemAImpl_rol_u16, iemAImpl_rol_u16_amd, iemAImpl_rol_u16_intel;
2589FNIEMAIMPLSHIFTU16 iemAImpl_ror_u16, iemAImpl_ror_u16_amd, iemAImpl_ror_u16_intel;
2590FNIEMAIMPLSHIFTU16 iemAImpl_rcl_u16, iemAImpl_rcl_u16_amd, iemAImpl_rcl_u16_intel;
2591FNIEMAIMPLSHIFTU16 iemAImpl_rcr_u16, iemAImpl_rcr_u16_amd, iemAImpl_rcr_u16_intel;
2592FNIEMAIMPLSHIFTU16 iemAImpl_shl_u16, iemAImpl_shl_u16_amd, iemAImpl_shl_u16_intel;
2593FNIEMAIMPLSHIFTU16 iemAImpl_shr_u16, iemAImpl_shr_u16_amd, iemAImpl_shr_u16_intel;
2594FNIEMAIMPLSHIFTU16 iemAImpl_sar_u16, iemAImpl_sar_u16_amd, iemAImpl_sar_u16_intel;
2595/** @} */
2596
2597/** @name Shift operations on double words (Group 2).
2598 * @{ */
2599typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU32,(uint32_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags));
2600typedef FNIEMAIMPLSHIFTU32 *PFNIEMAIMPLSHIFTU32;
2601FNIEMAIMPLSHIFTU32 iemAImpl_rol_u32, iemAImpl_rol_u32_amd, iemAImpl_rol_u32_intel;
2602FNIEMAIMPLSHIFTU32 iemAImpl_ror_u32, iemAImpl_ror_u32_amd, iemAImpl_ror_u32_intel;
2603FNIEMAIMPLSHIFTU32 iemAImpl_rcl_u32, iemAImpl_rcl_u32_amd, iemAImpl_rcl_u32_intel;
2604FNIEMAIMPLSHIFTU32 iemAImpl_rcr_u32, iemAImpl_rcr_u32_amd, iemAImpl_rcr_u32_intel;
2605FNIEMAIMPLSHIFTU32 iemAImpl_shl_u32, iemAImpl_shl_u32_amd, iemAImpl_shl_u32_intel;
2606FNIEMAIMPLSHIFTU32 iemAImpl_shr_u32, iemAImpl_shr_u32_amd, iemAImpl_shr_u32_intel;
2607FNIEMAIMPLSHIFTU32 iemAImpl_sar_u32, iemAImpl_sar_u32_amd, iemAImpl_sar_u32_intel;
2608/** @} */
2609
2610/** @name Shift operations on words (Group 2).
2611 * @{ */
2612typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU64,(uint64_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags));
2613typedef FNIEMAIMPLSHIFTU64 *PFNIEMAIMPLSHIFTU64;
2614FNIEMAIMPLSHIFTU64 iemAImpl_rol_u64, iemAImpl_rol_u64_amd, iemAImpl_rol_u64_intel;
2615FNIEMAIMPLSHIFTU64 iemAImpl_ror_u64, iemAImpl_ror_u64_amd, iemAImpl_ror_u64_intel;
2616FNIEMAIMPLSHIFTU64 iemAImpl_rcl_u64, iemAImpl_rcl_u64_amd, iemAImpl_rcl_u64_intel;
2617FNIEMAIMPLSHIFTU64 iemAImpl_rcr_u64, iemAImpl_rcr_u64_amd, iemAImpl_rcr_u64_intel;
2618FNIEMAIMPLSHIFTU64 iemAImpl_shl_u64, iemAImpl_shl_u64_amd, iemAImpl_shl_u64_intel;
2619FNIEMAIMPLSHIFTU64 iemAImpl_shr_u64, iemAImpl_shr_u64_amd, iemAImpl_shr_u64_intel;
2620FNIEMAIMPLSHIFTU64 iemAImpl_sar_u64, iemAImpl_sar_u64_amd, iemAImpl_sar_u64_intel;
2621/** @} */
2622
2623/** @name Multiplication and division operations.
2624 * @{ */
2625typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t *pEFlags));
2626typedef FNIEMAIMPLMULDIVU8 *PFNIEMAIMPLMULDIVU8;
2627FNIEMAIMPLMULDIVU8 iemAImpl_mul_u8, iemAImpl_mul_u8_amd, iemAImpl_mul_u8_intel;
2628FNIEMAIMPLMULDIVU8 iemAImpl_imul_u8, iemAImpl_imul_u8_amd, iemAImpl_imul_u8_intel;
2629FNIEMAIMPLMULDIVU8 iemAImpl_div_u8, iemAImpl_div_u8_amd, iemAImpl_div_u8_intel;
2630FNIEMAIMPLMULDIVU8 iemAImpl_idiv_u8, iemAImpl_idiv_u8_amd, iemAImpl_idiv_u8_intel;
2631
2632typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t *pEFlags));
2633typedef FNIEMAIMPLMULDIVU16 *PFNIEMAIMPLMULDIVU16;
2634FNIEMAIMPLMULDIVU16 iemAImpl_mul_u16, iemAImpl_mul_u16_amd, iemAImpl_mul_u16_intel;
2635FNIEMAIMPLMULDIVU16 iemAImpl_imul_u16, iemAImpl_imul_u16_amd, iemAImpl_imul_u16_intel;
2636FNIEMAIMPLMULDIVU16 iemAImpl_div_u16, iemAImpl_div_u16_amd, iemAImpl_div_u16_intel;
2637FNIEMAIMPLMULDIVU16 iemAImpl_idiv_u16, iemAImpl_idiv_u16_amd, iemAImpl_idiv_u16_intel;
2638
2639typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t *pEFlags));
2640typedef FNIEMAIMPLMULDIVU32 *PFNIEMAIMPLMULDIVU32;
2641FNIEMAIMPLMULDIVU32 iemAImpl_mul_u32, iemAImpl_mul_u32_amd, iemAImpl_mul_u32_intel;
2642FNIEMAIMPLMULDIVU32 iemAImpl_imul_u32, iemAImpl_imul_u32_amd, iemAImpl_imul_u32_intel;
2643FNIEMAIMPLMULDIVU32 iemAImpl_div_u32, iemAImpl_div_u32_amd, iemAImpl_div_u32_intel;
2644FNIEMAIMPLMULDIVU32 iemAImpl_idiv_u32, iemAImpl_idiv_u32_amd, iemAImpl_idiv_u32_intel;
2645
2646typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t *pEFlags));
2647typedef FNIEMAIMPLMULDIVU64 *PFNIEMAIMPLMULDIVU64;
2648FNIEMAIMPLMULDIVU64 iemAImpl_mul_u64, iemAImpl_mul_u64_amd, iemAImpl_mul_u64_intel;
2649FNIEMAIMPLMULDIVU64 iemAImpl_imul_u64, iemAImpl_imul_u64_amd, iemAImpl_imul_u64_intel;
2650FNIEMAIMPLMULDIVU64 iemAImpl_div_u64, iemAImpl_div_u64_amd, iemAImpl_div_u64_intel;
2651FNIEMAIMPLMULDIVU64 iemAImpl_idiv_u64, iemAImpl_idiv_u64_amd, iemAImpl_idiv_u64_intel;
2652/** @} */
2653
2654/** @name Byte Swap.
2655 * @{ */
2656IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u16,(uint32_t *pu32Dst)); /* Yes, 32-bit register access. */
2657IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u32,(uint32_t *pu32Dst));
2658IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u64,(uint64_t *pu64Dst));
2659/** @} */
2660
2661/** @name Misc.
2662 * @{ */
2663FNIEMAIMPLBINU16 iemAImpl_arpl;
2664/** @} */
2665
2666/** @name RDRAND and RDSEED
2667 * @{ */
2668typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU16,(uint16_t *puDst, uint32_t *pEFlags));
2669typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU32,(uint32_t *puDst, uint32_t *pEFlags));
2670typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLRDRANDSEEDU64,(uint64_t *puDst, uint32_t *pEFlags));
2671typedef FNIEMAIMPLRDRANDSEEDU16 *PFNIEMAIMPLRDRANDSEEDU16;
2672typedef FNIEMAIMPLRDRANDSEEDU32 *PFNIEMAIMPLRDRANDSEEDU32;
2673typedef FNIEMAIMPLRDRANDSEEDU64 *PFNIEMAIMPLRDRANDSEEDU64;
2674
2675FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdrand_u16, iemAImpl_rdrand_u16_fallback;
2676FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdrand_u32, iemAImpl_rdrand_u32_fallback;
2677FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdrand_u64, iemAImpl_rdrand_u64_fallback;
2678FNIEMAIMPLRDRANDSEEDU16 iemAImpl_rdseed_u16, iemAImpl_rdseed_u16_fallback;
2679FNIEMAIMPLRDRANDSEEDU32 iemAImpl_rdseed_u32, iemAImpl_rdseed_u32_fallback;
2680FNIEMAIMPLRDRANDSEEDU64 iemAImpl_rdseed_u64, iemAImpl_rdseed_u64_fallback;
2681/** @} */
2682
2683/** @name ADOX and ADCX
2684 * @{ */
2685typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLADXU32,(uint32_t *puDst, uint32_t *pfEFlags, uint32_t uSrc));
2686typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLADXU64,(uint64_t *puDst, uint32_t *pfEFlags, uint64_t uSrc));
2687typedef FNIEMAIMPLADXU32 *PFNIEMAIMPLADXU32;
2688typedef FNIEMAIMPLADXU64 *PFNIEMAIMPLADXU64;
2689
2690FNIEMAIMPLADXU32 iemAImpl_adcx_u32, iemAImpl_adcx_u32_fallback;
2691FNIEMAIMPLADXU64 iemAImpl_adcx_u64, iemAImpl_adcx_u64_fallback;
2692FNIEMAIMPLADXU32 iemAImpl_adox_u32, iemAImpl_adox_u32_fallback;
2693FNIEMAIMPLADXU64 iemAImpl_adox_u64, iemAImpl_adox_u64_fallback;
2694/** @} */
2695
2696/** @name FPU operations taking a 32-bit float argument
2697 * @{ */
2698typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2699 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
2700typedef FNIEMAIMPLFPUR32FSW *PFNIEMAIMPLFPUR32FSW;
2701
2702typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2703 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
2704typedef FNIEMAIMPLFPUR32 *PFNIEMAIMPLFPUR32;
2705
2706FNIEMAIMPLFPUR32FSW iemAImpl_fcom_r80_by_r32;
2707FNIEMAIMPLFPUR32 iemAImpl_fadd_r80_by_r32;
2708FNIEMAIMPLFPUR32 iemAImpl_fmul_r80_by_r32;
2709FNIEMAIMPLFPUR32 iemAImpl_fsub_r80_by_r32;
2710FNIEMAIMPLFPUR32 iemAImpl_fsubr_r80_by_r32;
2711FNIEMAIMPLFPUR32 iemAImpl_fdiv_r80_by_r32;
2712FNIEMAIMPLFPUR32 iemAImpl_fdivr_r80_by_r32;
2713
2714IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT32U pr32Val));
2715IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2716 PRTFLOAT32U pr32Val, PCRTFLOAT80U pr80Val));
2717/** @} */
2718
2719/** @name FPU operations taking a 64-bit float argument
2720 * @{ */
2721typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2722 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
2723typedef FNIEMAIMPLFPUR64FSW *PFNIEMAIMPLFPUR64FSW;
2724
2725typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2726 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
2727typedef FNIEMAIMPLFPUR64 *PFNIEMAIMPLFPUR64;
2728
2729FNIEMAIMPLFPUR64FSW iemAImpl_fcom_r80_by_r64;
2730FNIEMAIMPLFPUR64 iemAImpl_fadd_r80_by_r64;
2731FNIEMAIMPLFPUR64 iemAImpl_fmul_r80_by_r64;
2732FNIEMAIMPLFPUR64 iemAImpl_fsub_r80_by_r64;
2733FNIEMAIMPLFPUR64 iemAImpl_fsubr_r80_by_r64;
2734FNIEMAIMPLFPUR64 iemAImpl_fdiv_r80_by_r64;
2735FNIEMAIMPLFPUR64 iemAImpl_fdivr_r80_by_r64;
2736
2737IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT64U pr64Val));
2738IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2739 PRTFLOAT64U pr32Val, PCRTFLOAT80U pr80Val));
2740/** @} */
2741
2742/** @name FPU operations taking a 80-bit float argument
2743 * @{ */
2744typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2745 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2746typedef FNIEMAIMPLFPUR80 *PFNIEMAIMPLFPUR80;
2747FNIEMAIMPLFPUR80 iemAImpl_fadd_r80_by_r80;
2748FNIEMAIMPLFPUR80 iemAImpl_fmul_r80_by_r80;
2749FNIEMAIMPLFPUR80 iemAImpl_fsub_r80_by_r80;
2750FNIEMAIMPLFPUR80 iemAImpl_fsubr_r80_by_r80;
2751FNIEMAIMPLFPUR80 iemAImpl_fdiv_r80_by_r80;
2752FNIEMAIMPLFPUR80 iemAImpl_fdivr_r80_by_r80;
2753FNIEMAIMPLFPUR80 iemAImpl_fprem_r80_by_r80;
2754FNIEMAIMPLFPUR80 iemAImpl_fprem1_r80_by_r80;
2755FNIEMAIMPLFPUR80 iemAImpl_fscale_r80_by_r80;
2756
2757FNIEMAIMPLFPUR80 iemAImpl_fpatan_r80_by_r80, iemAImpl_fpatan_r80_by_r80_amd, iemAImpl_fpatan_r80_by_r80_intel;
2758FNIEMAIMPLFPUR80 iemAImpl_fyl2x_r80_by_r80, iemAImpl_fyl2x_r80_by_r80_amd, iemAImpl_fyl2x_r80_by_r80_intel;
2759FNIEMAIMPLFPUR80 iemAImpl_fyl2xp1_r80_by_r80, iemAImpl_fyl2xp1_r80_by_r80_amd, iemAImpl_fyl2xp1_r80_by_r80_intel;
2760
2761typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2762 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2763typedef FNIEMAIMPLFPUR80FSW *PFNIEMAIMPLFPUR80FSW;
2764FNIEMAIMPLFPUR80FSW iemAImpl_fcom_r80_by_r80;
2765FNIEMAIMPLFPUR80FSW iemAImpl_fucom_r80_by_r80;
2766
2767typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPUR80EFL,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
2768 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
2769typedef FNIEMAIMPLFPUR80EFL *PFNIEMAIMPLFPUR80EFL;
2770FNIEMAIMPLFPUR80EFL iemAImpl_fcomi_r80_by_r80;
2771FNIEMAIMPLFPUR80EFL iemAImpl_fucomi_r80_by_r80;
2772
2773typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARY,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
2774typedef FNIEMAIMPLFPUR80UNARY *PFNIEMAIMPLFPUR80UNARY;
2775FNIEMAIMPLFPUR80UNARY iemAImpl_fabs_r80;
2776FNIEMAIMPLFPUR80UNARY iemAImpl_fchs_r80;
2777FNIEMAIMPLFPUR80UNARY iemAImpl_f2xm1_r80, iemAImpl_f2xm1_r80_amd, iemAImpl_f2xm1_r80_intel;
2778FNIEMAIMPLFPUR80UNARY iemAImpl_fsqrt_r80;
2779FNIEMAIMPLFPUR80UNARY iemAImpl_frndint_r80;
2780FNIEMAIMPLFPUR80UNARY iemAImpl_fsin_r80, iemAImpl_fsin_r80_amd, iemAImpl_fsin_r80_intel;
2781FNIEMAIMPLFPUR80UNARY iemAImpl_fcos_r80, iemAImpl_fcos_r80_amd, iemAImpl_fcos_r80_intel;
2782
2783typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYFSW,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw, PCRTFLOAT80U pr80Val));
2784typedef FNIEMAIMPLFPUR80UNARYFSW *PFNIEMAIMPLFPUR80UNARYFSW;
2785FNIEMAIMPLFPUR80UNARYFSW iemAImpl_ftst_r80;
2786FNIEMAIMPLFPUR80UNARYFSW iemAImpl_fxam_r80;
2787
2788typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80LDCONST,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes));
2789typedef FNIEMAIMPLFPUR80LDCONST *PFNIEMAIMPLFPUR80LDCONST;
2790FNIEMAIMPLFPUR80LDCONST iemAImpl_fld1;
2791FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2t;
2792FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2e;
2793FNIEMAIMPLFPUR80LDCONST iemAImpl_fldpi;
2794FNIEMAIMPLFPUR80LDCONST iemAImpl_fldlg2;
2795FNIEMAIMPLFPUR80LDCONST iemAImpl_fldln2;
2796FNIEMAIMPLFPUR80LDCONST iemAImpl_fldz;
2797
2798typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYTWO,(PCX86FXSTATE pFpuState, PIEMFPURESULTTWO pFpuResTwo,
2799 PCRTFLOAT80U pr80Val));
2800typedef FNIEMAIMPLFPUR80UNARYTWO *PFNIEMAIMPLFPUR80UNARYTWO;
2801FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fptan_r80_r80, iemAImpl_fptan_r80_r80_amd, iemAImpl_fptan_r80_r80_intel;
2802FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fxtract_r80_r80;
2803FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fsincos_r80_r80, iemAImpl_fsincos_r80_r80_amd, iemAImpl_fsincos_r80_r80_intel;
2804
2805IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
2806IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2807 PRTFLOAT80U pr80Dst, PCRTFLOAT80U pr80Src));
2808
2809IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_d80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTPBCD80U pd80Val));
2810IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_d80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
2811 PRTPBCD80U pd80Dst, PCRTFLOAT80U pr80Src));
2812
2813/** @} */
2814
2815/** @name FPU operations taking a 16-bit signed integer argument
2816 * @{ */
2817typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2818 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
2819typedef FNIEMAIMPLFPUI16 *PFNIEMAIMPLFPUI16;
2820typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI16,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
2821 int16_t *pi16Dst, PCRTFLOAT80U pr80Src));
2822typedef FNIEMAIMPLFPUSTR80TOI16 *PFNIEMAIMPLFPUSTR80TOI16;
2823
2824FNIEMAIMPLFPUI16 iemAImpl_fiadd_r80_by_i16;
2825FNIEMAIMPLFPUI16 iemAImpl_fimul_r80_by_i16;
2826FNIEMAIMPLFPUI16 iemAImpl_fisub_r80_by_i16;
2827FNIEMAIMPLFPUI16 iemAImpl_fisubr_r80_by_i16;
2828FNIEMAIMPLFPUI16 iemAImpl_fidiv_r80_by_i16;
2829FNIEMAIMPLFPUI16 iemAImpl_fidivr_r80_by_i16;
2830
2831typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2832 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
2833typedef FNIEMAIMPLFPUI16FSW *PFNIEMAIMPLFPUI16FSW;
2834FNIEMAIMPLFPUI16FSW iemAImpl_ficom_r80_by_i16;
2835
2836IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int16_t const *pi16Val));
2837FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fist_r80_to_i16;
2838FNIEMAIMPLFPUSTR80TOI16 iemAImpl_fistt_r80_to_i16, iemAImpl_fistt_r80_to_i16_amd, iemAImpl_fistt_r80_to_i16_intel;
2839/** @} */
2840
2841/** @name FPU operations taking a 32-bit signed integer argument
2842 * @{ */
2843typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
2844 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
2845typedef FNIEMAIMPLFPUI32 *PFNIEMAIMPLFPUI32;
2846typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI32,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
2847 int32_t *pi32Dst, PCRTFLOAT80U pr80Src));
2848typedef FNIEMAIMPLFPUSTR80TOI32 *PFNIEMAIMPLFPUSTR80TOI32;
2849
2850FNIEMAIMPLFPUI32 iemAImpl_fiadd_r80_by_i32;
2851FNIEMAIMPLFPUI32 iemAImpl_fimul_r80_by_i32;
2852FNIEMAIMPLFPUI32 iemAImpl_fisub_r80_by_i32;
2853FNIEMAIMPLFPUI32 iemAImpl_fisubr_r80_by_i32;
2854FNIEMAIMPLFPUI32 iemAImpl_fidiv_r80_by_i32;
2855FNIEMAIMPLFPUI32 iemAImpl_fidivr_r80_by_i32;
2856
2857typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
2858 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
2859typedef FNIEMAIMPLFPUI32FSW *PFNIEMAIMPLFPUI32FSW;
2860FNIEMAIMPLFPUI32FSW iemAImpl_ficom_r80_by_i32;
2861
2862IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int32_t const *pi32Val));
2863FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fist_r80_to_i32;
2864FNIEMAIMPLFPUSTR80TOI32 iemAImpl_fistt_r80_to_i32;
2865/** @} */
2866
2867/** @name FPU operations taking a 64-bit signed integer argument
2868 * @{ */
2869typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUSTR80TOI64,(PCX86FXSTATE pFpuState, uint16_t *pFpuRes,
2870 int64_t *pi64Dst, PCRTFLOAT80U pr80Src));
2871typedef FNIEMAIMPLFPUSTR80TOI64 *PFNIEMAIMPLFPUSTR80TOI64;
2872
2873IEM_DECL_IMPL_DEF(void, iemAImpl_fild_r80_from_i64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int64_t const *pi64Val));
2874FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fist_r80_to_i64;
2875FNIEMAIMPLFPUSTR80TOI64 iemAImpl_fistt_r80_to_i64;
2876/** @} */
2877
2878
2879/** Temporary type representing a 256-bit vector register. */
2880typedef struct { uint64_t au64[4]; } IEMVMM256;
2881/** Temporary type pointing to a 256-bit vector register. */
2882typedef IEMVMM256 *PIEMVMM256;
2883/** Temporary type pointing to a const 256-bit vector register. */
2884typedef IEMVMM256 *PCIEMVMM256;
2885
2886
2887/** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1.
2888 * @{ */
2889typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *puDst, uint64_t const *puSrc));
2890typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF2U64;
2891typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U128,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc));
2892typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF2U128;
2893typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U128,(PX86XSAVEAREA pExtState, PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
2894typedef FNIEMAIMPLMEDIAF3U128 *PFNIEMAIMPLMEDIAF3U128;
2895typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF3U256,(PX86XSAVEAREA pExtState, PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
2896typedef FNIEMAIMPLMEDIAF3U256 *PFNIEMAIMPLMEDIAF3U256;
2897typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U64,(uint64_t *puDst, uint64_t const *puSrc));
2898typedef FNIEMAIMPLMEDIAOPTF2U64 *PFNIEMAIMPLMEDIAOPTF2U64;
2899typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128,(PRTUINT128U puDst, PCRTUINT128U puSrc));
2900typedef FNIEMAIMPLMEDIAOPTF2U128 *PFNIEMAIMPLMEDIAOPTF2U128;
2901typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2));
2902typedef FNIEMAIMPLMEDIAOPTF3U128 *PFNIEMAIMPLMEDIAOPTF3U128;
2903typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2));
2904typedef FNIEMAIMPLMEDIAOPTF3U256 *PFNIEMAIMPLMEDIAOPTF3U256;
2905typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U256,(PRTUINT256U puDst, PCRTUINT256U puSrc));
2906typedef FNIEMAIMPLMEDIAOPTF2U256 *PFNIEMAIMPLMEDIAOPTF2U256;
2907FNIEMAIMPLMEDIAF2U64 iemAImpl_pshufb_u64, iemAImpl_pshufb_u64_fallback;
2908FNIEMAIMPLMEDIAF2U64 iemAImpl_pand_u64, iemAImpl_pandn_u64, iemAImpl_por_u64, iemAImpl_pxor_u64;
2909FNIEMAIMPLMEDIAF2U64 iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqd_u64;
2910FNIEMAIMPLMEDIAF2U64 iemAImpl_pcmpgtb_u64, iemAImpl_pcmpgtw_u64, iemAImpl_pcmpgtd_u64;
2911FNIEMAIMPLMEDIAF2U64 iemAImpl_paddb_u64, iemAImpl_paddsb_u64, iemAImpl_paddusb_u64;
2912FNIEMAIMPLMEDIAF2U64 iemAImpl_paddw_u64, iemAImpl_paddsw_u64, iemAImpl_paddusw_u64;
2913FNIEMAIMPLMEDIAF2U64 iemAImpl_paddd_u64;
2914FNIEMAIMPLMEDIAF2U64 iemAImpl_paddq_u64;
2915FNIEMAIMPLMEDIAF2U64 iemAImpl_psubb_u64, iemAImpl_psubsb_u64, iemAImpl_psubusb_u64;
2916FNIEMAIMPLMEDIAF2U64 iemAImpl_psubw_u64, iemAImpl_psubsw_u64, iemAImpl_psubusw_u64;
2917FNIEMAIMPLMEDIAF2U64 iemAImpl_psubd_u64;
2918FNIEMAIMPLMEDIAF2U64 iemAImpl_psubq_u64;
2919FNIEMAIMPLMEDIAF2U64 iemAImpl_pmaddwd_u64;
2920FNIEMAIMPLMEDIAF2U64 iemAImpl_pmullw_u64, iemAImpl_pmulhw_u64;
2921FNIEMAIMPLMEDIAF2U64 iemAImpl_pminub_u64, iemAImpl_pmaxub_u64;
2922FNIEMAIMPLMEDIAF2U64 iemAImpl_pminsw_u64, iemAImpl_pmaxsw_u64;
2923FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsb_u64, iemAImpl_pabsb_u64_fallback;
2924FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsw_u64, iemAImpl_pabsw_u64_fallback;
2925FNIEMAIMPLMEDIAF2U64 iemAImpl_pabsd_u64, iemAImpl_pabsd_u64_fallback;
2926FNIEMAIMPLMEDIAF2U64 iemAImpl_psignb_u64, iemAImpl_psignb_u64_fallback;
2927FNIEMAIMPLMEDIAF2U64 iemAImpl_psignw_u64, iemAImpl_psignw_u64_fallback;
2928FNIEMAIMPLMEDIAF2U64 iemAImpl_psignd_u64, iemAImpl_psignd_u64_fallback;
2929FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddw_u64, iemAImpl_phaddw_u64_fallback;
2930FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddd_u64, iemAImpl_phaddd_u64_fallback;
2931FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubw_u64, iemAImpl_phsubw_u64_fallback;
2932FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubd_u64, iemAImpl_phsubd_u64_fallback;
2933FNIEMAIMPLMEDIAF2U64 iemAImpl_phaddsw_u64, iemAImpl_phaddsw_u64_fallback;
2934FNIEMAIMPLMEDIAF2U64 iemAImpl_phsubsw_u64, iemAImpl_phsubsw_u64_fallback;
2935FNIEMAIMPLMEDIAF2U64 iemAImpl_pmaddubsw_u64, iemAImpl_pmaddubsw_u64_fallback;
2936FNIEMAIMPLMEDIAF2U64 iemAImpl_pmulhrsw_u64, iemAImpl_pmulhrsw_u64_fallback;
2937FNIEMAIMPLMEDIAF2U64 iemAImpl_pmuludq_u64;
2938FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllw_u64, iemAImpl_psrlw_u64, iemAImpl_psraw_u64;
2939FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pslld_u64, iemAImpl_psrld_u64, iemAImpl_psrad_u64;
2940FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psllq_u64, iemAImpl_psrlq_u64;
2941FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packsswb_u64, iemAImpl_packuswb_u64;
2942FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_packssdw_u64;
2943FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pmulhuw_u64;
2944FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_pavgb_u64, iemAImpl_pavgw_u64;
2945FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_psadbw_u64;
2946
2947FNIEMAIMPLMEDIAF2U128 iemAImpl_pshufb_u128, iemAImpl_pshufb_u128_fallback;
2948FNIEMAIMPLMEDIAF2U128 iemAImpl_pand_u128, iemAImpl_pandn_u128, iemAImpl_por_u128, iemAImpl_pxor_u128;
2949FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpeqb_u128, iemAImpl_pcmpeqw_u128, iemAImpl_pcmpeqd_u128;
2950FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpeqq_u128, iemAImpl_pcmpeqq_u128_fallback;
2951FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpgtb_u128, iemAImpl_pcmpgtw_u128, iemAImpl_pcmpgtd_u128;
2952FNIEMAIMPLMEDIAF2U128 iemAImpl_pcmpgtq_u128, iemAImpl_pcmpgtq_u128_fallback;
2953FNIEMAIMPLMEDIAF2U128 iemAImpl_paddb_u128, iemAImpl_paddsb_u128, iemAImpl_paddusb_u128;
2954FNIEMAIMPLMEDIAF2U128 iemAImpl_paddw_u128, iemAImpl_paddsw_u128, iemAImpl_paddusw_u128;
2955FNIEMAIMPLMEDIAF2U128 iemAImpl_paddd_u128;
2956FNIEMAIMPLMEDIAF2U128 iemAImpl_paddq_u128;
2957FNIEMAIMPLMEDIAF2U128 iemAImpl_psubb_u128, iemAImpl_psubsb_u128, iemAImpl_psubusb_u128;
2958FNIEMAIMPLMEDIAF2U128 iemAImpl_psubw_u128, iemAImpl_psubsw_u128, iemAImpl_psubusw_u128;
2959FNIEMAIMPLMEDIAF2U128 iemAImpl_psubd_u128;
2960FNIEMAIMPLMEDIAF2U128 iemAImpl_psubq_u128;
2961FNIEMAIMPLMEDIAF2U128 iemAImpl_pmullw_u128, iemAImpl_pmullw_u128_fallback;
2962FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulhw_u128;
2963FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulld_u128, iemAImpl_pmulld_u128_fallback;
2964FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaddwd_u128;
2965FNIEMAIMPLMEDIAF2U128 iemAImpl_pminub_u128;
2966FNIEMAIMPLMEDIAF2U128 iemAImpl_pminud_u128, iemAImpl_pminud_u128_fallback;
2967FNIEMAIMPLMEDIAF2U128 iemAImpl_pminuw_u128, iemAImpl_pminuw_u128_fallback;
2968FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsb_u128, iemAImpl_pminsb_u128_fallback;
2969FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsd_u128, iemAImpl_pminsd_u128_fallback;
2970FNIEMAIMPLMEDIAF2U128 iemAImpl_pminsw_u128, iemAImpl_pminsw_u128_fallback;
2971FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxub_u128;
2972FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxud_u128, iemAImpl_pmaxud_u128_fallback;
2973FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxuw_u128, iemAImpl_pmaxuw_u128_fallback;
2974FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsb_u128, iemAImpl_pmaxsb_u128_fallback;
2975FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsw_u128;
2976FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaxsd_u128, iemAImpl_pmaxsd_u128_fallback;
2977FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsb_u128, iemAImpl_pabsb_u128_fallback;
2978FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsw_u128, iemAImpl_pabsw_u128_fallback;
2979FNIEMAIMPLMEDIAF2U128 iemAImpl_pabsd_u128, iemAImpl_pabsd_u128_fallback;
2980FNIEMAIMPLMEDIAF2U128 iemAImpl_psignb_u128, iemAImpl_psignb_u128_fallback;
2981FNIEMAIMPLMEDIAF2U128 iemAImpl_psignw_u128, iemAImpl_psignw_u128_fallback;
2982FNIEMAIMPLMEDIAF2U128 iemAImpl_psignd_u128, iemAImpl_psignd_u128_fallback;
2983FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddw_u128, iemAImpl_phaddw_u128_fallback;
2984FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddd_u128, iemAImpl_phaddd_u128_fallback;
2985FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubw_u128, iemAImpl_phsubw_u128_fallback;
2986FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubd_u128, iemAImpl_phsubd_u128_fallback;
2987FNIEMAIMPLMEDIAF2U128 iemAImpl_phaddsw_u128, iemAImpl_phaddsw_u128_fallback;
2988FNIEMAIMPLMEDIAF2U128 iemAImpl_phsubsw_u128, iemAImpl_phsubsw_u128_fallback;
2989FNIEMAIMPLMEDIAF2U128 iemAImpl_pmaddubsw_u128, iemAImpl_pmaddubsw_u128_fallback;
2990FNIEMAIMPLMEDIAF2U128 iemAImpl_pmulhrsw_u128, iemAImpl_pmulhrsw_u128_fallback;
2991FNIEMAIMPLMEDIAF2U128 iemAImpl_pmuludq_u128;
2992FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packsswb_u128, iemAImpl_packuswb_u128;
2993FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_packssdw_u128, iemAImpl_packusdw_u128;
2994FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllw_u128, iemAImpl_psrlw_u128, iemAImpl_psraw_u128;
2995FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pslld_u128, iemAImpl_psrld_u128, iemAImpl_psrad_u128;
2996FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psllq_u128, iemAImpl_psrlq_u128;
2997FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmulhuw_u128;
2998FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pavgb_u128, iemAImpl_pavgw_u128;
2999FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_psadbw_u128;
3000FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_pmuldq_u128, iemAImpl_pmuldq_u128_fallback;
3001FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpcklps_u128, iemAImpl_unpcklpd_u128;
3002FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_unpckhps_u128, iemAImpl_unpckhpd_u128;
3003FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_phminposuw_u128, iemAImpl_phminposuw_u128_fallback;
3004
3005FNIEMAIMPLMEDIAF3U128 iemAImpl_vpshufb_u128, iemAImpl_vpshufb_u128_fallback;
3006FNIEMAIMPLMEDIAF3U128 iemAImpl_vpand_u128, iemAImpl_vpand_u128_fallback;
3007FNIEMAIMPLMEDIAF3U128 iemAImpl_vpandn_u128, iemAImpl_vpandn_u128_fallback;
3008FNIEMAIMPLMEDIAF3U128 iemAImpl_vpor_u128, iemAImpl_vpor_u128_fallback;
3009FNIEMAIMPLMEDIAF3U128 iemAImpl_vpxor_u128, iemAImpl_vpxor_u128_fallback;
3010FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqb_u128, iemAImpl_vpcmpeqb_u128_fallback;
3011FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqw_u128, iemAImpl_vpcmpeqw_u128_fallback;
3012FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqd_u128, iemAImpl_vpcmpeqd_u128_fallback;
3013FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpeqq_u128, iemAImpl_vpcmpeqq_u128_fallback;
3014FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtb_u128, iemAImpl_vpcmpgtb_u128_fallback;
3015FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtw_u128, iemAImpl_vpcmpgtw_u128_fallback;
3016FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtd_u128, iemAImpl_vpcmpgtd_u128_fallback;
3017FNIEMAIMPLMEDIAF3U128 iemAImpl_vpcmpgtq_u128, iemAImpl_vpcmpgtq_u128_fallback;
3018FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddb_u128, iemAImpl_vpaddb_u128_fallback;
3019FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddw_u128, iemAImpl_vpaddw_u128_fallback;
3020FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddd_u128, iemAImpl_vpaddd_u128_fallback;
3021FNIEMAIMPLMEDIAF3U128 iemAImpl_vpaddq_u128, iemAImpl_vpaddq_u128_fallback;
3022FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubb_u128, iemAImpl_vpsubb_u128_fallback;
3023FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubw_u128, iemAImpl_vpsubw_u128_fallback;
3024FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubd_u128, iemAImpl_vpsubd_u128_fallback;
3025FNIEMAIMPLMEDIAF3U128 iemAImpl_vpsubq_u128, iemAImpl_vpsubq_u128_fallback;
3026FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminub_u128, iemAImpl_vpminub_u128_fallback;
3027FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminuw_u128, iemAImpl_vpminuw_u128_fallback;
3028FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminud_u128, iemAImpl_vpminud_u128_fallback;
3029FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsb_u128, iemAImpl_vpminsb_u128_fallback;
3030FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsw_u128, iemAImpl_vpminsw_u128_fallback;
3031FNIEMAIMPLMEDIAF3U128 iemAImpl_vpminsd_u128, iemAImpl_vpminsd_u128_fallback;
3032FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxub_u128, iemAImpl_vpmaxub_u128_fallback;
3033FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxuw_u128, iemAImpl_vpmaxuw_u128_fallback;
3034FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxud_u128, iemAImpl_vpmaxud_u128_fallback;
3035FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsb_u128, iemAImpl_vpmaxsb_u128_fallback;
3036FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsw_u128, iemAImpl_vpmaxsw_u128_fallback;
3037FNIEMAIMPLMEDIAF3U128 iemAImpl_vpmaxsd_u128, iemAImpl_vpmaxsd_u128_fallback;
3038FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpacksswb_u128, iemAImpl_vpacksswb_u128_fallback;
3039FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackssdw_u128, iemAImpl_vpackssdw_u128_fallback;
3040FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackuswb_u128, iemAImpl_vpackuswb_u128_fallback;
3041FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpackusdw_u128, iemAImpl_vpackusdw_u128_fallback;
3042FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmullw_u128, iemAImpl_vpmullw_u128_fallback;
3043FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulld_u128, iemAImpl_vpmulld_u128_fallback;
3044FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhw_u128, iemAImpl_vpmulhw_u128_fallback;
3045FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhuw_u128, iemAImpl_vpmulhuw_u128_fallback;
3046FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgb_u128, iemAImpl_vpavgb_u128_fallback;
3047FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpavgw_u128, iemAImpl_vpavgw_u128_fallback;
3048FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignb_u128, iemAImpl_vpsignb_u128_fallback;
3049FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignw_u128, iemAImpl_vpsignw_u128_fallback;
3050FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsignd_u128, iemAImpl_vpsignd_u128_fallback;
3051FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddw_u128, iemAImpl_vphaddw_u128_fallback;
3052FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddd_u128, iemAImpl_vphaddd_u128_fallback;
3053FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubw_u128, iemAImpl_vphsubw_u128_fallback;
3054FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubd_u128, iemAImpl_vphsubd_u128_fallback;
3055FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphaddsw_u128, iemAImpl_vphaddsw_u128_fallback;
3056FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vphsubsw_u128, iemAImpl_vphsubsw_u128_fallback;
3057FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmaddubsw_u128, iemAImpl_vpmaddubsw_u128_fallback;
3058FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmulhrsw_u128, iemAImpl_vpmulhrsw_u128_fallback;
3059FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsadbw_u128, iemAImpl_vpsadbw_u128_fallback;
3060FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuldq_u128, iemAImpl_vpmuldq_u128_fallback;
3061FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpmuludq_u128, iemAImpl_vpmuludq_u128_fallback;
3062FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsb_u128, iemAImpl_vpsubsb_u128_fallback;
3063FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubsw_u128, iemAImpl_vpsubsw_u128_fallback;
3064FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusb_u128, iemAImpl_vpsubusb_u128_fallback;
3065FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpsubusw_u128, iemAImpl_vpsubusw_u128_fallback;
3066FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusb_u128, iemAImpl_vpaddusb_u128_fallback;
3067FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddusw_u128, iemAImpl_vpaddusw_u128_fallback;
3068FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsb_u128, iemAImpl_vpaddsb_u128_fallback;
3069FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpaddsw_u128, iemAImpl_vpaddsw_u128_fallback;
3070
3071FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsb_u128, iemAImpl_vpabsb_u128_fallback;
3072FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsw_u128, iemAImpl_vpabsd_u128_fallback;
3073FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vpabsd_u128, iemAImpl_vpabsw_u128_fallback;
3074FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vphminposuw_u128, iemAImpl_vphminposuw_u128_fallback;
3075
3076FNIEMAIMPLMEDIAF3U256 iemAImpl_vpshufb_u256, iemAImpl_vpshufb_u256_fallback;
3077FNIEMAIMPLMEDIAF3U256 iemAImpl_vpand_u256, iemAImpl_vpand_u256_fallback;
3078FNIEMAIMPLMEDIAF3U256 iemAImpl_vpandn_u256, iemAImpl_vpandn_u256_fallback;
3079FNIEMAIMPLMEDIAF3U256 iemAImpl_vpor_u256, iemAImpl_vpor_u256_fallback;
3080FNIEMAIMPLMEDIAF3U256 iemAImpl_vpxor_u256, iemAImpl_vpxor_u256_fallback;
3081FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqb_u256, iemAImpl_vpcmpeqb_u256_fallback;
3082FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqw_u256, iemAImpl_vpcmpeqw_u256_fallback;
3083FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqd_u256, iemAImpl_vpcmpeqd_u256_fallback;
3084FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpeqq_u256, iemAImpl_vpcmpeqq_u256_fallback;
3085FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtb_u256, iemAImpl_vpcmpgtb_u256_fallback;
3086FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtw_u256, iemAImpl_vpcmpgtw_u256_fallback;
3087FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtd_u256, iemAImpl_vpcmpgtd_u256_fallback;
3088FNIEMAIMPLMEDIAF3U256 iemAImpl_vpcmpgtq_u256, iemAImpl_vpcmpgtq_u256_fallback;
3089FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddb_u256, iemAImpl_vpaddb_u256_fallback;
3090FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddw_u256, iemAImpl_vpaddw_u256_fallback;
3091FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddd_u256, iemAImpl_vpaddd_u256_fallback;
3092FNIEMAIMPLMEDIAF3U256 iemAImpl_vpaddq_u256, iemAImpl_vpaddq_u256_fallback;
3093FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubb_u256, iemAImpl_vpsubb_u256_fallback;
3094FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubw_u256, iemAImpl_vpsubw_u256_fallback;
3095FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubd_u256, iemAImpl_vpsubd_u256_fallback;
3096FNIEMAIMPLMEDIAF3U256 iemAImpl_vpsubq_u256, iemAImpl_vpsubq_u256_fallback;
3097FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminub_u256, iemAImpl_vpminub_u256_fallback;
3098FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminuw_u256, iemAImpl_vpminuw_u256_fallback;
3099FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminud_u256, iemAImpl_vpminud_u256_fallback;
3100FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsb_u256, iemAImpl_vpminsb_u256_fallback;
3101FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsw_u256, iemAImpl_vpminsw_u256_fallback;
3102FNIEMAIMPLMEDIAF3U256 iemAImpl_vpminsd_u256, iemAImpl_vpminsd_u256_fallback;
3103FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxub_u256, iemAImpl_vpmaxub_u256_fallback;
3104FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxuw_u256, iemAImpl_vpmaxuw_u256_fallback;
3105FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxud_u256, iemAImpl_vpmaxud_u256_fallback;
3106FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsb_u256, iemAImpl_vpmaxsb_u256_fallback;
3107FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsw_u256, iemAImpl_vpmaxsw_u256_fallback;
3108FNIEMAIMPLMEDIAF3U256 iemAImpl_vpmaxsd_u256, iemAImpl_vpmaxsd_u256_fallback;
3109FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpacksswb_u256, iemAImpl_vpacksswb_u256_fallback;
3110FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackssdw_u256, iemAImpl_vpackssdw_u256_fallback;
3111FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackuswb_u256, iemAImpl_vpackuswb_u256_fallback;
3112FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpackusdw_u256, iemAImpl_vpackusdw_u256_fallback;
3113FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmullw_u256, iemAImpl_vpmullw_u256_fallback;
3114FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulld_u256, iemAImpl_vpmulld_u256_fallback;
3115FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhw_u256, iemAImpl_vpmulhw_u256_fallback;
3116FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhuw_u256, iemAImpl_vpmulhuw_u256_fallback;
3117FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgb_u256, iemAImpl_vpavgb_u256_fallback;
3118FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpavgw_u256, iemAImpl_vpavgw_u256_fallback;
3119FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignb_u256, iemAImpl_vpsignb_u256_fallback;
3120FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignw_u256, iemAImpl_vpsignw_u256_fallback;
3121FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsignd_u256, iemAImpl_vpsignd_u256_fallback;
3122FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddw_u256, iemAImpl_vphaddw_u256_fallback;
3123FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddd_u256, iemAImpl_vphaddd_u256_fallback;
3124FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubw_u256, iemAImpl_vphsubw_u256_fallback;
3125FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubd_u256, iemAImpl_vphsubd_u256_fallback;
3126FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphaddsw_u256, iemAImpl_vphaddsw_u256_fallback;
3127FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vphsubsw_u256, iemAImpl_vphsubsw_u256_fallback;
3128FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmaddubsw_u256, iemAImpl_vpmaddubsw_u256_fallback;
3129FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmulhrsw_u256, iemAImpl_vpmulhrsw_u256_fallback;
3130FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsadbw_u256, iemAImpl_vpsadbw_u256_fallback;
3131FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuldq_u256, iemAImpl_vpmuldq_u256_fallback;
3132FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpmuludq_u256, iemAImpl_vpmuludq_u256_fallback;
3133FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsb_u256, iemAImpl_vpsubsb_u256_fallback;
3134FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubsw_u256, iemAImpl_vpsubsw_u256_fallback;
3135FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusb_u256, iemAImpl_vpsubusb_u256_fallback;
3136FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpsubusw_u256, iemAImpl_vpsubusw_u256_fallback;
3137FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusb_u256, iemAImpl_vpaddusb_u256_fallback;
3138FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddusw_u256, iemAImpl_vpaddusw_u256_fallback;
3139FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsb_u256, iemAImpl_vpaddsb_u256_fallback;
3140FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpaddsw_u256, iemAImpl_vpaddsw_u256_fallback;
3141
3142FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsb_u256, iemAImpl_vpabsb_u256_fallback;
3143FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsw_u256, iemAImpl_vpabsw_u256_fallback;
3144FNIEMAIMPLMEDIAOPTF2U256 iemAImpl_vpabsd_u256, iemAImpl_vpabsd_u256_fallback;
3145/** @} */
3146
3147/** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1.
3148 * @{ */
3149FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpcklbw_u64, iemAImpl_punpcklwd_u64, iemAImpl_punpckldq_u64;
3150FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128;
3151FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpcklbw_u128, iemAImpl_vpunpcklbw_u128_fallback,
3152 iemAImpl_vpunpcklwd_u128, iemAImpl_vpunpcklwd_u128_fallback,
3153 iemAImpl_vpunpckldq_u128, iemAImpl_vpunpckldq_u128_fallback,
3154 iemAImpl_vpunpcklqdq_u128, iemAImpl_vpunpcklqdq_u128_fallback,
3155 iemAImpl_vunpcklps_u128, iemAImpl_vunpcklps_u128_fallback,
3156 iemAImpl_vunpcklpd_u128, iemAImpl_vunpcklpd_u128_fallback,
3157 iemAImpl_vunpckhps_u128, iemAImpl_vunpckhps_u128_fallback,
3158 iemAImpl_vunpckhpd_u128, iemAImpl_vunpckhpd_u128_fallback;
3159
3160FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpcklbw_u256, iemAImpl_vpunpcklbw_u256_fallback,
3161 iemAImpl_vpunpcklwd_u256, iemAImpl_vpunpcklwd_u256_fallback,
3162 iemAImpl_vpunpckldq_u256, iemAImpl_vpunpckldq_u256_fallback,
3163 iemAImpl_vpunpcklqdq_u256, iemAImpl_vpunpcklqdq_u256_fallback,
3164 iemAImpl_vunpcklps_u256, iemAImpl_vunpcklps_u256_fallback,
3165 iemAImpl_vunpcklpd_u256, iemAImpl_vunpcklpd_u256_fallback,
3166 iemAImpl_vunpckhps_u256, iemAImpl_vunpckhps_u256_fallback,
3167 iemAImpl_vunpckhpd_u256, iemAImpl_vunpckhpd_u256_fallback;
3168/** @} */
3169
3170/** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1.
3171 * @{ */
3172FNIEMAIMPLMEDIAOPTF2U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64;
3173FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128;
3174FNIEMAIMPLMEDIAOPTF3U128 iemAImpl_vpunpckhbw_u128, iemAImpl_vpunpckhbw_u128_fallback,
3175 iemAImpl_vpunpckhwd_u128, iemAImpl_vpunpckhwd_u128_fallback,
3176 iemAImpl_vpunpckhdq_u128, iemAImpl_vpunpckhdq_u128_fallback,
3177 iemAImpl_vpunpckhqdq_u128, iemAImpl_vpunpckhqdq_u128_fallback;
3178FNIEMAIMPLMEDIAOPTF3U256 iemAImpl_vpunpckhbw_u256, iemAImpl_vpunpckhbw_u256_fallback,
3179 iemAImpl_vpunpckhwd_u256, iemAImpl_vpunpckhwd_u256_fallback,
3180 iemAImpl_vpunpckhdq_u256, iemAImpl_vpunpckhdq_u256_fallback,
3181 iemAImpl_vpunpckhqdq_u256, iemAImpl_vpunpckhqdq_u256_fallback;
3182/** @} */
3183
3184/** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil)
3185 * @{ */
3186typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3187typedef FNIEMAIMPLMEDIAPSHUFU128 *PFNIEMAIMPLMEDIAPSHUFU128;
3188typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUFU256,(PRTUINT256U puDst, PCRTUINT256U puSrc, uint8_t bEvil));
3189typedef FNIEMAIMPLMEDIAPSHUFU256 *PFNIEMAIMPLMEDIAPSHUFU256;
3190IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw_u64,(uint64_t *puDst, uint64_t const *puSrc, uint8_t bEvil));
3191FNIEMAIMPLMEDIAPSHUFU128 iemAImpl_pshufhw_u128, iemAImpl_pshuflw_u128, iemAImpl_pshufd_u128;
3192#ifndef IEM_WITHOUT_ASSEMBLY
3193FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256, iemAImpl_vpshuflw_u256, iemAImpl_vpshufd_u256;
3194#endif
3195FNIEMAIMPLMEDIAPSHUFU256 iemAImpl_vpshufhw_u256_fallback, iemAImpl_vpshuflw_u256_fallback, iemAImpl_vpshufd_u256_fallback;
3196/** @} */
3197
3198/** @name Media (SSE/MMX/AVX) operation: Shift Immediate Stuff (evil)
3199 * @{ */
3200typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU64,(uint64_t *puDst, uint8_t bShift));
3201typedef FNIEMAIMPLMEDIAPSHIFTU64 *PFNIEMAIMPLMEDIAPSHIFTU64;
3202typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU128,(PRTUINT128U puDst, uint8_t bShift));
3203typedef FNIEMAIMPLMEDIAPSHIFTU128 *PFNIEMAIMPLMEDIAPSHIFTU128;
3204typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHIFTU256,(PRTUINT256U puDst, uint8_t bShift));
3205typedef FNIEMAIMPLMEDIAPSHIFTU256 *PFNIEMAIMPLMEDIAPSHIFTU256;
3206FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psllw_imm_u64, iemAImpl_pslld_imm_u64, iemAImpl_psllq_imm_u64;
3207FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psrlw_imm_u64, iemAImpl_psrld_imm_u64, iemAImpl_psrlq_imm_u64;
3208FNIEMAIMPLMEDIAPSHIFTU64 iemAImpl_psraw_imm_u64, iemAImpl_psrad_imm_u64;
3209FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psllw_imm_u128, iemAImpl_pslld_imm_u128, iemAImpl_psllq_imm_u128;
3210FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psrlw_imm_u128, iemAImpl_psrld_imm_u128, iemAImpl_psrlq_imm_u128;
3211FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_psraw_imm_u128, iemAImpl_psrad_imm_u128;
3212FNIEMAIMPLMEDIAPSHIFTU128 iemAImpl_pslldq_imm_u128, iemAImpl_psrldq_imm_u128;
3213/** @} */
3214
3215/** @name Media (SSE/MMX/AVX) operation: Move Byte Mask
3216 * @{ */
3217IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u64,(uint64_t *pu64Dst, uint64_t const *puSrc));
3218IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u128,(uint64_t *pu64Dst, PCRTUINT128U puSrc));
3219#ifndef IEM_WITHOUT_ASSEMBLY
3220IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3221#endif
3222IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovmskb_u256_fallback,(uint64_t *pu64Dst, PCRTUINT256U puSrc));
3223/** @} */
3224
3225/** @name Media (SSE/MMX/AVX) operations: Variable Blend Packed Bytes/R32/R64.
3226 * @{ */
3227typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puMask));
3228typedef FNIEMAIMPLBLENDU128 *PFNIEMAIMPLBLENDU128;
3229typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, PCRTUINT128U puMask));
3230typedef FNIEMAIMPLAVXBLENDU128 *PFNIEMAIMPLAVXBLENDU128;
3231typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLAVXBLENDU256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, PCRTUINT256U puMask));
3232typedef FNIEMAIMPLAVXBLENDU256 *PFNIEMAIMPLAVXBLENDU256;
3233
3234FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128;
3235FNIEMAIMPLBLENDU128 iemAImpl_pblendvb_u128_fallback;
3236FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128;
3237FNIEMAIMPLAVXBLENDU128 iemAImpl_vpblendvb_u128_fallback;
3238FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256;
3239FNIEMAIMPLAVXBLENDU256 iemAImpl_vpblendvb_u256_fallback;
3240
3241FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128;
3242FNIEMAIMPLBLENDU128 iemAImpl_blendvps_u128_fallback;
3243FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128;
3244FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvps_u128_fallback;
3245FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256;
3246FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvps_u256_fallback;
3247
3248FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128;
3249FNIEMAIMPLBLENDU128 iemAImpl_blendvpd_u128_fallback;
3250FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128;
3251FNIEMAIMPLAVXBLENDU128 iemAImpl_vblendvpd_u128_fallback;
3252FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256;
3253FNIEMAIMPLAVXBLENDU256 iemAImpl_vblendvpd_u256_fallback;
3254/** @} */
3255
3256
3257/** @name Media (SSE/MMX/AVX) operation: Sort this later
3258 * @{ */
3259IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3260IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3261IEM_DECL_IMPL_DEF(void, iemAImpl_vmovshdup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3262IEM_DECL_IMPL_DEF(void, iemAImpl_vmovshdup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3263IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
3264IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
3265
3266IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3267IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3268IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3269IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3270IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3271
3272IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3273IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3274IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3275IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3276IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3277
3278IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3279IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3280IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3281IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3282IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3283
3284IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3285IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3286IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3287IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3288IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3289
3290IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3291IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3292IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3293IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3294IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3295
3296IEM_DECL_IMPL_DEF(void, iemAImpl_pmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3297IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3298IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3299IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3300IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovsxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3301
3302IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3303IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128,(PRTUINT128U puDst, uint64_t uSrc));
3304IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3305IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3306IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbw_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3307
3308IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3309IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128,(PRTUINT128U puDst, uint32_t uSrc));
3310IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3311IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3312IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3313
3314IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3315IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128,(PRTUINT128U puDst, uint16_t uSrc));
3316IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u128_fallback,(PRTUINT128U puDst, uint16_t uSrc));
3317IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3318IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxbq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3319
3320IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3321IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128,(PRTUINT128U puDst, uint64_t uSrc));
3322IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3323IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3324IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwd_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3325
3326IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3327IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128,(PRTUINT128U puDst, uint32_t uSrc));
3328IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u128_fallback,(PRTUINT128U puDst, uint32_t uSrc));
3329IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3330IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxwq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3331
3332IEM_DECL_IMPL_DEF(void, iemAImpl_pmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3333IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128,(PRTUINT128U puDst, uint64_t uSrc));
3334IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u128_fallback,(PRTUINT128U puDst, uint64_t uSrc));
3335IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3336IEM_DECL_IMPL_DEF(void, iemAImpl_vpmovzxdq_u256_fallback,(PRTUINT256U puDst, PCRTUINT128U puSrc));
3337
3338IEM_DECL_IMPL_DEF(void, iemAImpl_shufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3339IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3340IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3341IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3342IEM_DECL_IMPL_DEF(void, iemAImpl_vshufpd_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3343
3344IEM_DECL_IMPL_DEF(void, iemAImpl_shufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3345IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3346IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3347IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3348IEM_DECL_IMPL_DEF(void, iemAImpl_vshufps_u256_fallback,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3349
3350IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3351IEM_DECL_IMPL_DEF(void, iemAImpl_palignr_u64_fallback,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t bEvil));
3352
3353IEM_DECL_IMPL_DEF(void, iemAImpl_pinsrw_u64,(uint64_t *pu64Dst, uint16_t u16Src, uint8_t bEvil));
3354IEM_DECL_IMPL_DEF(void, iemAImpl_pinsrw_u128,(PRTUINT128U puDst, uint16_t u16Src, uint8_t bEvil));
3355IEM_DECL_IMPL_DEF(void, iemAImpl_vpinsrw_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint16_t u16Src, uint8_t bEvil));
3356IEM_DECL_IMPL_DEF(void, iemAImpl_vpinsrw_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint16_t u16Src, uint8_t bEvil));
3357
3358IEM_DECL_IMPL_DEF(void, iemAImpl_pextrw_u64,(uint16_t *pu16Dst, uint64_t u64Src, uint8_t bEvil));
3359IEM_DECL_IMPL_DEF(void, iemAImpl_pextrw_u128,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3360IEM_DECL_IMPL_DEF(void, iemAImpl_vpextrw_u128,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3361IEM_DECL_IMPL_DEF(void, iemAImpl_vpextrw_u128_fallback,(uint16_t *pu16Dst, PCRTUINT128U puSrc, uint8_t bEvil));
3362
3363IEM_DECL_IMPL_DEF(void, iemAImpl_movmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3364IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3365IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3366IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3367IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskps_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3368
3369IEM_DECL_IMPL_DEF(void, iemAImpl_movmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3370IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3371IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u128_fallback,(uint8_t *pu8Dst, PCRTUINT128U puSrc));
3372IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3373IEM_DECL_IMPL_DEF(void, iemAImpl_vmovmskpd_u256_fallback,(uint8_t *pu8Dst, PCRTUINT256U puSrc));
3374
3375
3376typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF2U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc, uint8_t bEvil));
3377typedef FNIEMAIMPLMEDIAOPTF2U128IMM8 *PFNIEMAIMPLMEDIAOPTF2U128IMM8;
3378typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U128IMM8,(PRTUINT128U puDst, PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint8_t bEvil));
3379typedef FNIEMAIMPLMEDIAOPTF3U128IMM8 *PFNIEMAIMPLMEDIAOPTF3U128IMM8;
3380typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAOPTF3U256IMM8,(PRTUINT256U puDst, PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint8_t bEvil));
3381typedef FNIEMAIMPLMEDIAOPTF3U256IMM8 *PFNIEMAIMPLMEDIAOPTF3U256IMM8;
3382
3383FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_palignr_u128, iemAImpl_palignr_u128_fallback;
3384FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pblendw_u128, iemAImpl_pblendw_u128_fallback;
3385FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendps_u128, iemAImpl_blendps_u128_fallback;
3386FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_blendpd_u128, iemAImpl_blendpd_u128_fallback;
3387
3388FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpalignr_u128, iemAImpl_vpalignr_u128_fallback;
3389FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpblendw_u128, iemAImpl_vpblendw_u128_fallback;
3390FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendps_u128, iemAImpl_vblendps_u128_fallback;
3391FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vblendpd_u128, iemAImpl_vblendpd_u128_fallback;
3392
3393FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpalignr_u256, iemAImpl_vpalignr_u256_fallback;
3394FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vpblendw_u256, iemAImpl_vpblendw_u256_fallback;
3395FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendps_u256, iemAImpl_vblendps_u256_fallback;
3396FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vblendpd_u256, iemAImpl_vblendpd_u256_fallback;
3397FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2i128_u256, iemAImpl_vperm2i128_u256_fallback;
3398FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vperm2f128_u256, iemAImpl_vperm2f128_u256_fallback;
3399
3400FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesimc_u128, iemAImpl_aesimc_u128_fallback;
3401FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenc_u128, iemAImpl_aesenc_u128_fallback;
3402FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesenclast_u128, iemAImpl_aesenclast_u128_fallback;
3403FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdec_u128, iemAImpl_aesdec_u128_fallback;
3404FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_aesdeclast_u128, iemAImpl_aesdeclast_u128_fallback;
3405
3406FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesimc_u128, iemAImpl_vaesimc_u128_fallback;
3407FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenc_u128, iemAImpl_vaesenc_u128_fallback;
3408FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesenclast_u128, iemAImpl_vaesenclast_u128_fallback;
3409FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdec_u128, iemAImpl_vaesdec_u128_fallback;
3410FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_vaesdeclast_u128, iemAImpl_vaesdeclast_u128_fallback;
3411
3412FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_aeskeygenassist_u128, iemAImpl_aeskeygenassist_u128_fallback;
3413
3414FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vaeskeygenassist_u128, iemAImpl_vaeskeygenassist_u128_fallback;
3415
3416FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1nexte_u128, iemAImpl_sha1nexte_u128_fallback;
3417FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg1_u128, iemAImpl_sha1msg1_u128_fallback;
3418FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha1msg2_u128, iemAImpl_sha1msg2_u128_fallback;
3419FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg1_u128, iemAImpl_sha256msg1_u128_fallback;
3420FNIEMAIMPLMEDIAOPTF2U128 iemAImpl_sha256msg2_u128, iemAImpl_sha256msg2_u128_fallback;
3421FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_sha1rnds4_u128, iemAImpl_sha1rnds4_u128_fallback;
3422IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3423IEM_DECL_IMPL_DEF(void, iemAImpl_sha256rnds2_u128_fallback,(PRTUINT128U puDst, PCRTUINT128U puSrc, PCRTUINT128U puXmm0Constants));
3424
3425typedef struct IEMPCMPISTRXSRC
3426{
3427 RTUINT128U uSrc1;
3428 RTUINT128U uSrc2;
3429} IEMPCMPISTRXSRC;
3430typedef IEMPCMPISTRXSRC *PIEMPCMPISTRXSRC;
3431typedef const IEMPCMPISTRXSRC *PCIEMPCMPISTRXSRC;
3432
3433typedef struct IEMPCMPESTRXSRC
3434{
3435 RTUINT128U uSrc1;
3436 RTUINT128U uSrc2;
3437 uint64_t u64Rax;
3438 uint64_t u64Rdx;
3439} IEMPCMPESTRXSRC;
3440typedef IEMPCMPESTRXSRC *PIEMPCMPESTRXSRC;
3441typedef const IEMPCMPESTRXSRC *PCIEMPCMPESTRXSRC;
3442
3443typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
3444typedef FNIEMAIMPLPCMPISTRIU128IMM8 *PFNIEMAIMPLPCMPISTRIU128IMM8;
3445typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRIU128IMM8,(uint32_t *pu32Ecx, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3446typedef FNIEMAIMPLPCMPESTRIU128IMM8 *PFNIEMAIMPLPCMPESTRIU128IMM8;
3447
3448typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPISTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPISTRXSRC pSrc, uint8_t bEvil));
3449typedef FNIEMAIMPLPCMPISTRMU128IMM8 *PFNIEMAIMPLPCMPISTRMU128IMM8;
3450typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLPCMPESTRMU128IMM8,(PRTUINT128U puDst, uint32_t *pEFlags, PCIEMPCMPESTRXSRC pSrc, uint8_t bEvil));
3451typedef FNIEMAIMPLPCMPESTRMU128IMM8 *PFNIEMAIMPLPCMPESTRMU128IMM8;
3452
3453FNIEMAIMPLPCMPISTRIU128IMM8 iemAImpl_pcmpistri_u128, iemAImpl_pcmpistri_u128_fallback;
3454FNIEMAIMPLPCMPESTRIU128IMM8 iemAImpl_pcmpestri_u128, iemAImpl_pcmpestri_u128_fallback;
3455FNIEMAIMPLPCMPISTRMU128IMM8 iemAImpl_pcmpistrm_u128, iemAImpl_pcmpistrm_u128_fallback;
3456FNIEMAIMPLPCMPESTRMU128IMM8 iemAImpl_pcmpestrm_u128, iemAImpl_pcmpestrm_u128_fallback;
3457
3458FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_pclmulqdq_u128, iemAImpl_pclmulqdq_u128_fallback;
3459FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vpclmulqdq_u128, iemAImpl_vpclmulqdq_u128_fallback;
3460
3461FNIEMAIMPLMEDIAOPTF2U128IMM8 iemAImpl_mpsadbw_u128, iemAImpl_mpsadbw_u128_fallback;
3462FNIEMAIMPLMEDIAOPTF3U128IMM8 iemAImpl_vmpsadbw_u128, iemAImpl_vmpsadbw_u128_fallback;
3463FNIEMAIMPLMEDIAOPTF3U256IMM8 iemAImpl_vmpsadbw_u256, iemAImpl_vmpsadbw_u256_fallback;
3464/** @} */
3465
3466/** @name Media Odds and Ends
3467 * @{ */
3468typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U8,(uint32_t *puDst, uint8_t uSrc));
3469typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U16,(uint32_t *puDst, uint16_t uSrc));
3470typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U32,(uint32_t *puDst, uint32_t uSrc));
3471typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLCR32U64,(uint32_t *puDst, uint64_t uSrc));
3472FNIEMAIMPLCR32U8 iemAImpl_crc32_u8, iemAImpl_crc32_u8_fallback;
3473FNIEMAIMPLCR32U16 iemAImpl_crc32_u16, iemAImpl_crc32_u16_fallback;
3474FNIEMAIMPLCR32U32 iemAImpl_crc32_u32, iemAImpl_crc32_u32_fallback;
3475FNIEMAIMPLCR32U64 iemAImpl_crc32_u64, iemAImpl_crc32_u64_fallback;
3476
3477typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL128,(PCRTUINT128U puSrc1, PCRTUINT128U puSrc2, uint32_t *pEFlags));
3478typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFL256,(PCRTUINT256U puSrc1, PCRTUINT256U puSrc2, uint32_t *pEFlags));
3479FNIEMAIMPLF2EFL128 iemAImpl_ptest_u128;
3480FNIEMAIMPLF2EFL256 iemAImpl_vptest_u256, iemAImpl_vptest_u256_fallback;
3481
3482typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I32U64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int32_t *pi32Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
3483typedef FNIEMAIMPLSSEF2I32U64 *PFNIEMAIMPLSSEF2I32U64;
3484typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I64U64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int64_t *pi64Dst, const uint64_t *pu64Src)); /* pu64Src is a double precision floating point. */
3485typedef FNIEMAIMPLSSEF2I64U64 *PFNIEMAIMPLSSEF2I64U64;
3486typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I32U32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int32_t *pi32Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
3487typedef FNIEMAIMPLSSEF2I32U32 *PFNIEMAIMPLSSEF2I32U32;
3488typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2I64U32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, int64_t *pi64Dst, const uint32_t *pu32Src)); /* pu32Src is a single precision floating point. */
3489typedef FNIEMAIMPLSSEF2I64U32 *PFNIEMAIMPLSSEF2I64U32;
3490
3491FNIEMAIMPLSSEF2I32U64 iemAImpl_cvttsd2si_i32_r64;
3492FNIEMAIMPLSSEF2I32U64 iemAImpl_cvtsd2si_i32_r64;
3493
3494FNIEMAIMPLSSEF2I64U64 iemAImpl_cvttsd2si_i64_r64;
3495FNIEMAIMPLSSEF2I64U64 iemAImpl_cvtsd2si_i64_r64;
3496
3497FNIEMAIMPLSSEF2I32U32 iemAImpl_cvttss2si_i32_r32;
3498FNIEMAIMPLSSEF2I32U32 iemAImpl_cvtss2si_i32_r32;
3499
3500FNIEMAIMPLSSEF2I64U32 iemAImpl_cvttss2si_i64_r32;
3501FNIEMAIMPLSSEF2I64U32 iemAImpl_cvtss2si_i64_r32;
3502
3503typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R32I32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT32U pr32Dst, const int32_t *pi32Src));
3504typedef FNIEMAIMPLSSEF2R32I32 *PFNIEMAIMPLSSEF2R32I32;
3505typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R32I64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT32U pr32Dst, const int64_t *pi64Src));
3506typedef FNIEMAIMPLSSEF2R32I64 *PFNIEMAIMPLSSEF2R32I64;
3507
3508FNIEMAIMPLSSEF2R32I32 iemAImpl_cvtsi2ss_r32_i32;
3509FNIEMAIMPLSSEF2R32I64 iemAImpl_cvtsi2ss_r32_i64;
3510
3511typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R64I32,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT64U pr64Dst, const int32_t *pi32Src));
3512typedef FNIEMAIMPLSSEF2R64I32 *PFNIEMAIMPLSSEF2R64I32;
3513typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSSEF2R64I64,(PCX86FXSTATE pFpuState, uint32_t *pfMxcsr, PRTFLOAT64U pr64Dst, const int64_t *pi64Src));
3514typedef FNIEMAIMPLSSEF2R64I64 *PFNIEMAIMPLSSEF2R64I64;
3515
3516FNIEMAIMPLSSEF2R64I32 iemAImpl_cvtsi2sd_r64_i32;
3517FNIEMAIMPLSSEF2R64I64 iemAImpl_cvtsi2sd_r64_i64;
3518
3519
3520typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLF2EFLMXCSR128,(uint32_t *pfMxcsr, uint32_t *pfEFlags, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
3521typedef FNIEMAIMPLF2EFLMXCSR128 *PFNIEMAIMPLF2EFLMXCSR128;
3522
3523FNIEMAIMPLF2EFLMXCSR128 iemAImpl_ucomiss_u128;
3524FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vucomiss_u128, iemAImpl_vucomiss_u128_fallback;
3525
3526FNIEMAIMPLF2EFLMXCSR128 iemAImpl_ucomisd_u128;
3527FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vucomisd_u128, iemAImpl_vucomisd_u128_fallback;
3528
3529FNIEMAIMPLF2EFLMXCSR128 iemAImpl_comiss_u128;
3530FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vcomiss_u128, iemAImpl_vcomiss_u128_fallback;
3531
3532FNIEMAIMPLF2EFLMXCSR128 iemAImpl_comisd_u128;
3533FNIEMAIMPLF2EFLMXCSR128 iemAImpl_vcomisd_u128, iemAImpl_vcomisd_u128_fallback;
3534
3535
3536typedef struct IEMMEDIAF2XMMSRC
3537{
3538 X86XMMREG uSrc1;
3539 X86XMMREG uSrc2;
3540} IEMMEDIAF2XMMSRC;
3541typedef IEMMEDIAF2XMMSRC *PIEMMEDIAF2XMMSRC;
3542typedef const IEMMEDIAF2XMMSRC *PCIEMMEDIAF2XMMSRC;
3543
3544typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRF2XMMIMM8,(uint32_t *pfMxcsr, PX86XMMREG puDst, PCIEMMEDIAF2XMMSRC puSrc, uint8_t bEvil));
3545typedef FNIEMAIMPLMXCSRF2XMMIMM8 *PFNIEMAIMPLMXCSRF2XMMIMM8;
3546
3547FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpps_u128;
3548FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmppd_u128;
3549FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpss_u128;
3550FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_cmpsd_u128;
3551FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundss_u128;
3552FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundsd_u128;
3553
3554FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundps_u128, iemAImpl_roundps_u128_fallback;
3555FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_roundpd_u128, iemAImpl_roundpd_u128_fallback;
3556
3557FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dpps_u128, iemAImpl_dpps_u128_fallback;
3558FNIEMAIMPLMXCSRF2XMMIMM8 iemAImpl_dppd_u128, iemAImpl_dppd_u128_fallback;
3559
3560typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU64U128,(uint32_t *pfMxcsr, uint64_t *pu64Dst, PCX86XMMREG pSrc));
3561typedef FNIEMAIMPLMXCSRU64U128 *PFNIEMAIMPLMXCSRU64U128;
3562
3563FNIEMAIMPLMXCSRU64U128 iemAImpl_cvtpd2pi_u128;
3564FNIEMAIMPLMXCSRU64U128 iemAImpl_cvttpd2pi_u128;
3565
3566typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU128U64,(uint32_t *pfMxcsr, PX86XMMREG pDst, uint64_t u64Src));
3567typedef FNIEMAIMPLMXCSRU128U64 *PFNIEMAIMPLMXCSRU128U64;
3568
3569FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2ps_u128;
3570FNIEMAIMPLMXCSRU128U64 iemAImpl_cvtpi2pd_u128;
3571
3572typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMXCSRU64U64,(uint32_t *pfMxcsr, uint64_t *pu64Dst, uint64_t u64Src));
3573typedef FNIEMAIMPLMXCSRU64U64 *PFNIEMAIMPLMXCSRU64U64;
3574
3575FNIEMAIMPLMXCSRU64U64 iemAImpl_cvtps2pi_u128;
3576FNIEMAIMPLMXCSRU64U64 iemAImpl_cvttps2pi_u128;
3577
3578/** @} */
3579
3580
3581/** @name Function tables.
3582 * @{
3583 */
3584
3585/**
3586 * Function table for a binary operator providing implementation based on
3587 * operand size.
3588 */
3589typedef struct IEMOPBINSIZES
3590{
3591 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
3592 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
3593 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
3594 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
3595} IEMOPBINSIZES;
3596/** Pointer to a binary operator function table. */
3597typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
3598
3599
3600/**
3601 * Function table for a unary operator providing implementation based on
3602 * operand size.
3603 */
3604typedef struct IEMOPUNARYSIZES
3605{
3606 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
3607 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
3608 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
3609 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
3610} IEMOPUNARYSIZES;
3611/** Pointer to a unary operator function table. */
3612typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
3613
3614
3615/**
3616 * Function table for a shift operator providing implementation based on
3617 * operand size.
3618 */
3619typedef struct IEMOPSHIFTSIZES
3620{
3621 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
3622 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
3623 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
3624 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
3625} IEMOPSHIFTSIZES;
3626/** Pointer to a shift operator function table. */
3627typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
3628
3629
3630/**
3631 * Function table for a multiplication or division operation.
3632 */
3633typedef struct IEMOPMULDIVSIZES
3634{
3635 PFNIEMAIMPLMULDIVU8 pfnU8;
3636 PFNIEMAIMPLMULDIVU16 pfnU16;
3637 PFNIEMAIMPLMULDIVU32 pfnU32;
3638 PFNIEMAIMPLMULDIVU64 pfnU64;
3639} IEMOPMULDIVSIZES;
3640/** Pointer to a multiplication or division operation function table. */
3641typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
3642
3643
3644/**
3645 * Function table for a double precision shift operator providing implementation
3646 * based on operand size.
3647 */
3648typedef struct IEMOPSHIFTDBLSIZES
3649{
3650 PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16;
3651 PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32;
3652 PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64;
3653} IEMOPSHIFTDBLSIZES;
3654/** Pointer to a double precision shift function table. */
3655typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
3656
3657
3658/**
3659 * Function table for media instruction taking two full sized media source
3660 * registers and one full sized destination register (AVX).
3661 */
3662typedef struct IEMOPMEDIAF3
3663{
3664 PFNIEMAIMPLMEDIAF3U128 pfnU128;
3665 PFNIEMAIMPLMEDIAF3U256 pfnU256;
3666} IEMOPMEDIAF3;
3667/** Pointer to a media operation function table for 3 full sized ops (AVX). */
3668typedef IEMOPMEDIAF3 const *PCIEMOPMEDIAF3;
3669
3670/** @def IEMOPMEDIAF3_INIT_VARS_EX
3671 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3672 * given functions as initializers. For use in AVX functions where a pair of
3673 * functions are only used once and the function table need not be public. */
3674#ifndef TST_IEM_CHECK_MC
3675# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3676# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3677 static IEMOPMEDIAF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3678 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3679# else
3680# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3681 static IEMOPMEDIAF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3682# endif
3683#else
3684# define IEMOPMEDIAF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3685#endif
3686/** @def IEMOPMEDIAF3_INIT_VARS
3687 * Generate AVX function tables for the @a a_InstrNm instruction.
3688 * @sa IEMOPMEDIAF3_INIT_VARS_EX */
3689#define IEMOPMEDIAF3_INIT_VARS(a_InstrNm) \
3690 IEMOPMEDIAF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3691 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3692
3693/**
3694 * Function table for media instruction taking two full sized media source
3695 * registers and one full sized destination register, but no additional state
3696 * (AVX).
3697 */
3698typedef struct IEMOPMEDIAOPTF3
3699{
3700 PFNIEMAIMPLMEDIAOPTF3U128 pfnU128;
3701 PFNIEMAIMPLMEDIAOPTF3U256 pfnU256;
3702} IEMOPMEDIAOPTF3;
3703/** Pointer to a media operation function table for 3 full sized ops (AVX). */
3704typedef IEMOPMEDIAOPTF3 const *PCIEMOPMEDIAOPTF3;
3705
3706/** @def IEMOPMEDIAOPTF3_INIT_VARS_EX
3707 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3708 * given functions as initializers. For use in AVX functions where a pair of
3709 * functions are only used once and the function table need not be public. */
3710#ifndef TST_IEM_CHECK_MC
3711# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3712# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3713 static IEMOPMEDIAOPTF3 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3714 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3715# else
3716# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3717 static IEMOPMEDIAOPTF3 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3718# endif
3719#else
3720# define IEMOPMEDIAOPTF3_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3721#endif
3722/** @def IEMOPMEDIAOPTF3_INIT_VARS
3723 * Generate AVX function tables for the @a a_InstrNm instruction.
3724 * @sa IEMOPMEDIAOPTF3_INIT_VARS_EX */
3725#define IEMOPMEDIAOPTF3_INIT_VARS(a_InstrNm) \
3726 IEMOPMEDIAOPTF3_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3727 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3728
3729/**
3730 * Function table for media instruction taking one full sized media source
3731 * registers and one full sized destination register, but no additional state
3732 * (AVX).
3733 */
3734typedef struct IEMOPMEDIAOPTF2
3735{
3736 PFNIEMAIMPLMEDIAOPTF2U128 pfnU128;
3737 PFNIEMAIMPLMEDIAOPTF2U256 pfnU256;
3738} IEMOPMEDIAOPTF2;
3739/** Pointer to a media operation function table for 2 full sized ops (AVX). */
3740typedef IEMOPMEDIAOPTF2 const *PCIEMOPMEDIAOPTF2;
3741
3742/** @def IEMOPMEDIAOPTF2_INIT_VARS_EX
3743 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3744 * given functions as initializers. For use in AVX functions where a pair of
3745 * functions are only used once and the function table need not be public. */
3746#ifndef TST_IEM_CHECK_MC
3747# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3748# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3749 static IEMOPMEDIAOPTF2 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3750 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3751# else
3752# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3753 static IEMOPMEDIAOPTF2 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3754# endif
3755#else
3756# define IEMOPMEDIAOPTF2_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3757#endif
3758/** @def IEMOPMEDIAOPTF2_INIT_VARS
3759 * Generate AVX function tables for the @a a_InstrNm instruction.
3760 * @sa IEMOPMEDIAOPTF2_INIT_VARS_EX */
3761#define IEMOPMEDIAOPTF2_INIT_VARS(a_InstrNm) \
3762 IEMOPMEDIAOPTF2_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3763 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3764
3765/**
3766 * Function table for media instruction taking two full sized media source
3767 * registers and one full sized destination register and an 8-bit immediate, but no additional state
3768 * (AVX).
3769 */
3770typedef struct IEMOPMEDIAOPTF3IMM8
3771{
3772 PFNIEMAIMPLMEDIAOPTF3U128IMM8 pfnU128;
3773 PFNIEMAIMPLMEDIAOPTF3U256IMM8 pfnU256;
3774} IEMOPMEDIAOPTF3IMM8;
3775/** Pointer to a media operation function table for 3 full sized ops (AVX). */
3776typedef IEMOPMEDIAOPTF3IMM8 const *PCIEMOPMEDIAOPTF3IMM8;
3777
3778/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX
3779 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3780 * given functions as initializers. For use in AVX functions where a pair of
3781 * functions are only used once and the function table need not be public. */
3782#ifndef TST_IEM_CHECK_MC
3783# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3784# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3785 static IEMOPMEDIAOPTF3IMM8 const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3786 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3787# else
3788# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3789 static IEMOPMEDIAOPTF3IMM8 const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3790# endif
3791#else
3792# define IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3793#endif
3794/** @def IEMOPMEDIAOPTF3IMM8_INIT_VARS
3795 * Generate AVX function tables for the @a a_InstrNm instruction.
3796 * @sa IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX */
3797#define IEMOPMEDIAOPTF3IMM8_INIT_VARS(a_InstrNm) \
3798 IEMOPMEDIAOPTF3IMM8_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3799 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3800/** @} */
3801
3802
3803/**
3804 * Function table for blend type instruction taking three full sized media source
3805 * registers and one full sized destination register, but no additional state
3806 * (AVX).
3807 */
3808typedef struct IEMOPBLENDOP
3809{
3810 PFNIEMAIMPLAVXBLENDU128 pfnU128;
3811 PFNIEMAIMPLAVXBLENDU256 pfnU256;
3812} IEMOPBLENDOP;
3813/** Pointer to a media operation function table for 4 full sized ops (AVX). */
3814typedef IEMOPBLENDOP const *PCIEMOPBLENDOP;
3815
3816/** @def IEMOPBLENDOP_INIT_VARS_EX
3817 * Declares a s_Host (x86 & amd64 only) and a s_Fallback variable with the
3818 * given functions as initializers. For use in AVX functions where a pair of
3819 * functions are only used once and the function table need not be public. */
3820#ifndef TST_IEM_CHECK_MC
3821# if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
3822# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnHostU128, a_pfnHostU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3823 static IEMOPBLENDOP const s_Host = { a_pfnHostU128, a_pfnHostU256 }; \
3824 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3825# else
3826# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) \
3827 static IEMOPBLENDOP const s_Fallback = { a_pfnFallbackU128, a_pfnFallbackU256 }
3828# endif
3829#else
3830# define IEMOPBLENDOP_INIT_VARS_EX(a_pfnU128, a_pfnU256, a_pfnFallbackU128, a_pfnFallbackU256) (void)0
3831#endif
3832/** @def IEMOPBLENDOP_INIT_VARS
3833 * Generate AVX function tables for the @a a_InstrNm instruction.
3834 * @sa IEMOPBLENDOP_INIT_VARS_EX */
3835#define IEMOPBLENDOP_INIT_VARS(a_InstrNm) \
3836 IEMOPBLENDOP_INIT_VARS_EX(RT_CONCAT3(iemAImpl_,a_InstrNm,_u128), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256),\
3837 RT_CONCAT3(iemAImpl_,a_InstrNm,_u128_fallback), RT_CONCAT3(iemAImpl_,a_InstrNm,_u256_fallback))
3838
3839
3840/** @name SSE/AVX single/double precision floating point operations.
3841 * @{ */
3842/**
3843 * A SSE result.
3844 */
3845typedef struct IEMSSERESULT
3846{
3847 /** The output value. */
3848 X86XMMREG uResult;
3849 /** The output status. */
3850 uint32_t MXCSR;
3851} IEMSSERESULT;
3852AssertCompileMemberOffset(IEMSSERESULT, MXCSR, 128 / 8);
3853/** Pointer to a SSE result. */
3854typedef IEMSSERESULT *PIEMSSERESULT;
3855/** Pointer to a const SSE result. */
3856typedef IEMSSERESULT const *PCIEMSSERESULT;
3857
3858
3859/**
3860 * A AVX128 result.
3861 */
3862typedef struct IEMAVX128RESULT
3863{
3864 /** The output value. */
3865 X86XMMREG uResult;
3866 /** The output status. */
3867 uint32_t MXCSR;
3868} IEMAVX128RESULT;
3869AssertCompileMemberOffset(IEMAVX128RESULT, MXCSR, 128 / 8);
3870/** Pointer to a AVX128 result. */
3871typedef IEMAVX128RESULT *PIEMAVX128RESULT;
3872/** Pointer to a const AVX128 result. */
3873typedef IEMAVX128RESULT const *PCIEMAVX128RESULT;
3874
3875
3876/**
3877 * A AVX256 result.
3878 */
3879typedef struct IEMAVX256RESULT
3880{
3881 /** The output value. */
3882 X86YMMREG uResult;
3883 /** The output status. */
3884 uint32_t MXCSR;
3885} IEMAVX256RESULT;
3886AssertCompileMemberOffset(IEMAVX256RESULT, MXCSR, 256 / 8);
3887/** Pointer to a AVX256 result. */
3888typedef IEMAVX256RESULT *PIEMAVX256RESULT;
3889/** Pointer to a const AVX256 result. */
3890typedef IEMAVX256RESULT const *PCIEMAVX256RESULT;
3891
3892
3893typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
3894typedef FNIEMAIMPLFPSSEF2U128 *PFNIEMAIMPLFPSSEF2U128;
3895typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128R32,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
3896typedef FNIEMAIMPLFPSSEF2U128R32 *PFNIEMAIMPLFPSSEF2U128R32;
3897typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPSSEF2U128R64,(PX86FXSTATE pFpuState, PIEMSSERESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
3898typedef FNIEMAIMPLFPSSEF2U128R64 *PFNIEMAIMPLFPSSEF2U128R64;
3899
3900typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCX86XMMREG puSrc2));
3901typedef FNIEMAIMPLFPAVXF3U128 *PFNIEMAIMPLFPAVXF3U128;
3902typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128R32,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT32U pr32Src2));
3903typedef FNIEMAIMPLFPAVXF3U128R32 *PFNIEMAIMPLFPAVXF3U128R32;
3904typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U128R64,(PX86XSAVEAREA pExtState, PIEMAVX128RESULT pResult, PCX86XMMREG puSrc1, PCRTFLOAT64U pr64Src2));
3905typedef FNIEMAIMPLFPAVXF3U128R64 *PFNIEMAIMPLFPAVXF3U128R64;
3906
3907typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPAVXF3U256,(PX86XSAVEAREA pExtState, PIEMAVX256RESULT pResult, PCX86YMMREG puSrc1, PCX86YMMREG puSrc2));
3908typedef FNIEMAIMPLFPAVXF3U256 *PFNIEMAIMPLFPAVXF3U256;
3909
3910FNIEMAIMPLFPSSEF2U128 iemAImpl_addps_u128;
3911FNIEMAIMPLFPSSEF2U128 iemAImpl_addpd_u128;
3912FNIEMAIMPLFPSSEF2U128 iemAImpl_mulps_u128;
3913FNIEMAIMPLFPSSEF2U128 iemAImpl_mulpd_u128;
3914FNIEMAIMPLFPSSEF2U128 iemAImpl_subps_u128;
3915FNIEMAIMPLFPSSEF2U128 iemAImpl_subpd_u128;
3916FNIEMAIMPLFPSSEF2U128 iemAImpl_minps_u128;
3917FNIEMAIMPLFPSSEF2U128 iemAImpl_minpd_u128;
3918FNIEMAIMPLFPSSEF2U128 iemAImpl_divps_u128;
3919FNIEMAIMPLFPSSEF2U128 iemAImpl_divpd_u128;
3920FNIEMAIMPLFPSSEF2U128 iemAImpl_maxps_u128;
3921FNIEMAIMPLFPSSEF2U128 iemAImpl_maxpd_u128;
3922FNIEMAIMPLFPSSEF2U128 iemAImpl_haddps_u128;
3923FNIEMAIMPLFPSSEF2U128 iemAImpl_haddpd_u128;
3924FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubps_u128;
3925FNIEMAIMPLFPSSEF2U128 iemAImpl_hsubpd_u128;
3926FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtps_u128;
3927FNIEMAIMPLFPSSEF2U128 iemAImpl_rsqrtps_u128;
3928FNIEMAIMPLFPSSEF2U128 iemAImpl_sqrtpd_u128;
3929FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubps_u128;
3930FNIEMAIMPLFPSSEF2U128 iemAImpl_addsubpd_u128;
3931FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2ps_u128;
3932FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2pd_u128;
3933
3934FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2ps_u128;
3935FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtps2dq_u128;
3936FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttps2dq_u128;
3937FNIEMAIMPLFPSSEF2U128 iemAImpl_cvttpd2dq_u128;
3938FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtdq2pd_u128;
3939FNIEMAIMPLFPSSEF2U128 iemAImpl_cvtpd2dq_u128;
3940
3941FNIEMAIMPLFPSSEF2U128R32 iemAImpl_addss_u128_r32;
3942FNIEMAIMPLFPSSEF2U128R64 iemAImpl_addsd_u128_r64;
3943FNIEMAIMPLFPSSEF2U128R32 iemAImpl_mulss_u128_r32;
3944FNIEMAIMPLFPSSEF2U128R64 iemAImpl_mulsd_u128_r64;
3945FNIEMAIMPLFPSSEF2U128R32 iemAImpl_subss_u128_r32;
3946FNIEMAIMPLFPSSEF2U128R64 iemAImpl_subsd_u128_r64;
3947FNIEMAIMPLFPSSEF2U128R32 iemAImpl_minss_u128_r32;
3948FNIEMAIMPLFPSSEF2U128R64 iemAImpl_minsd_u128_r64;
3949FNIEMAIMPLFPSSEF2U128R32 iemAImpl_divss_u128_r32;
3950FNIEMAIMPLFPSSEF2U128R64 iemAImpl_divsd_u128_r64;
3951FNIEMAIMPLFPSSEF2U128R32 iemAImpl_maxss_u128_r32;
3952FNIEMAIMPLFPSSEF2U128R64 iemAImpl_maxsd_u128_r64;
3953FNIEMAIMPLFPSSEF2U128R32 iemAImpl_cvtss2sd_u128_r32;
3954FNIEMAIMPLFPSSEF2U128R64 iemAImpl_cvtsd2ss_u128_r64;
3955FNIEMAIMPLFPSSEF2U128R32 iemAImpl_sqrtss_u128_r32;
3956FNIEMAIMPLFPSSEF2U128R64 iemAImpl_sqrtsd_u128_r64;
3957FNIEMAIMPLFPSSEF2U128R32 iemAImpl_rsqrtss_u128_r32;
3958
3959FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddps_u128, iemAImpl_vaddps_u128_fallback;
3960FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddpd_u128, iemAImpl_vaddpd_u128_fallback;
3961FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulps_u128, iemAImpl_vmulps_u128_fallback;
3962FNIEMAIMPLFPAVXF3U128 iemAImpl_vmulpd_u128, iemAImpl_vmulpd_u128_fallback;
3963FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubps_u128, iemAImpl_vsubps_u128_fallback;
3964FNIEMAIMPLFPAVXF3U128 iemAImpl_vsubpd_u128, iemAImpl_vsubpd_u128_fallback;
3965FNIEMAIMPLFPAVXF3U128 iemAImpl_vminps_u128, iemAImpl_vminps_u128_fallback;
3966FNIEMAIMPLFPAVXF3U128 iemAImpl_vminpd_u128, iemAImpl_vminpd_u128_fallback;
3967FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivps_u128, iemAImpl_vdivps_u128_fallback;
3968FNIEMAIMPLFPAVXF3U128 iemAImpl_vdivpd_u128, iemAImpl_vdivpd_u128_fallback;
3969FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxps_u128, iemAImpl_vmaxps_u128_fallback;
3970FNIEMAIMPLFPAVXF3U128 iemAImpl_vmaxpd_u128, iemAImpl_vmaxpd_u128_fallback;
3971FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddps_u128, iemAImpl_vhaddps_u128_fallback;
3972FNIEMAIMPLFPAVXF3U128 iemAImpl_vhaddpd_u128, iemAImpl_vhaddpd_u128_fallback;
3973FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubps_u128, iemAImpl_vhsubps_u128_fallback;
3974FNIEMAIMPLFPAVXF3U128 iemAImpl_vhsubpd_u128, iemAImpl_vhsubpd_u128_fallback;
3975FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtps_u128, iemAImpl_vsqrtps_u128_fallback;
3976FNIEMAIMPLFPAVXF3U128 iemAImpl_vsqrtpd_u128, iemAImpl_vsqrtpd_u128_fallback;
3977FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubps_u128, iemAImpl_vaddsubps_u128_fallback;
3978FNIEMAIMPLFPAVXF3U128 iemAImpl_vaddsubpd_u128, iemAImpl_vaddsubpd_u128_fallback;
3979FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtpd2ps_u128, iemAImpl_vcvtpd2ps_u128_fallback;
3980FNIEMAIMPLFPAVXF3U128 iemAImpl_vcvtps2pd_u128, iemAImpl_vcvtps2pd_u128_fallback;
3981
3982FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vaddss_u128_r32, iemAImpl_vaddss_u128_r32_fallback;
3983FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vaddsd_u128_r64, iemAImpl_vaddsd_u128_r64_fallback;
3984FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmulss_u128_r32, iemAImpl_vmulss_u128_r32_fallback;
3985FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmulsd_u128_r64, iemAImpl_vmulsd_u128_r64_fallback;
3986FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsubss_u128_r32, iemAImpl_vsubss_u128_r32_fallback;
3987FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsubsd_u128_r64, iemAImpl_vsubsd_u128_r64_fallback;
3988FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vminss_u128_r32, iemAImpl_vminss_u128_r32_fallback;
3989FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vminsd_u128_r64, iemAImpl_vminsd_u128_r64_fallback;
3990FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vdivss_u128_r32, iemAImpl_vdivss_u128_r32_fallback;
3991FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vdivsd_u128_r64, iemAImpl_vdivsd_u128_r64_fallback;
3992FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vmaxss_u128_r32, iemAImpl_vmaxss_u128_r32_fallback;
3993FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vmaxsd_u128_r64, iemAImpl_vmaxsd_u128_r64_fallback;
3994FNIEMAIMPLFPAVXF3U128R32 iemAImpl_vsqrtss_u128_r32, iemAImpl_vsqrtss_u128_r32_fallback;
3995FNIEMAIMPLFPAVXF3U128R64 iemAImpl_vsqrtsd_u128_r64, iemAImpl_vsqrtsd_u128_r64_fallback;
3996
3997FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddps_u256, iemAImpl_vaddps_u256_fallback;
3998FNIEMAIMPLFPAVXF3U256 iemAImpl_vaddpd_u256, iemAImpl_vaddpd_u256_fallback;
3999FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulps_u256, iemAImpl_vmulps_u256_fallback;
4000FNIEMAIMPLFPAVXF3U256 iemAImpl_vmulpd_u256, iemAImpl_vmulpd_u256_fallback;
4001FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubps_u256, iemAImpl_vsubps_u256_fallback;
4002FNIEMAIMPLFPAVXF3U256 iemAImpl_vsubpd_u256, iemAImpl_vsubpd_u256_fallback;
4003FNIEMAIMPLFPAVXF3U256 iemAImpl_vminps_u256, iemAImpl_vminps_u256_fallback;
4004FNIEMAIMPLFPAVXF3U256 iemAImpl_vminpd_u256, iemAImpl_vminpd_u256_fallback;
4005FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivps_u256, iemAImpl_vdivps_u256_fallback;
4006FNIEMAIMPLFPAVXF3U256 iemAImpl_vdivpd_u256, iemAImpl_vdivpd_u256_fallback;
4007FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxps_u256, iemAImpl_vmaxps_u256_fallback;
4008FNIEMAIMPLFPAVXF3U256 iemAImpl_vmaxpd_u256, iemAImpl_vmaxpd_u256_fallback;
4009FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddps_u256, iemAImpl_vhaddps_u256_fallback;
4010FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddpd_u256, iemAImpl_vhaddpd_u256_fallback;
4011FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubps_u256, iemAImpl_vhsubps_u256_fallback;
4012FNIEMAIMPLFPAVXF3U256 iemAImpl_vhsubpd_u256, iemAImpl_vhsubpd_u256_fallback;
4013FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubps_u256, iemAImpl_vhaddsubps_u256_fallback;
4014FNIEMAIMPLFPAVXF3U256 iemAImpl_vhaddsubpd_u256, iemAImpl_vhaddsubpd_u256_fallback;
4015FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtpd2ps_u256, iemAImpl_vcvtpd2ps_u256_fallback;
4016FNIEMAIMPLFPAVXF3U256 iemAImpl_vcvtps2pd_u256, iemAImpl_vcvtps2pd_u256_fallback;
4017/** @} */
4018
4019/** @name C instruction implementations for anything slightly complicated.
4020 * @{ */
4021
4022/**
4023 * For typedef'ing or declaring a C instruction implementation function taking
4024 * no extra arguments.
4025 *
4026 * @param a_Name The name of the type.
4027 */
4028# define IEM_CIMPL_DECL_TYPE_0(a_Name) \
4029 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4030/**
4031 * For defining a C instruction implementation function taking no extra
4032 * arguments.
4033 *
4034 * @param a_Name The name of the function
4035 */
4036# define IEM_CIMPL_DEF_0(a_Name) \
4037 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4038/**
4039 * Prototype version of IEM_CIMPL_DEF_0.
4040 */
4041# define IEM_CIMPL_PROTO_0(a_Name) \
4042 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr))
4043/**
4044 * For calling a C instruction implementation function taking no extra
4045 * arguments.
4046 *
4047 * This special call macro adds default arguments to the call and allow us to
4048 * change these later.
4049 *
4050 * @param a_fn The name of the function.
4051 */
4052# define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr)
4053
4054/** Type for a C instruction implementation function taking no extra
4055 * arguments. */
4056typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
4057/** Function pointer type for a C instruction implementation function taking
4058 * no extra arguments. */
4059typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
4060
4061/**
4062 * For typedef'ing or declaring a C instruction implementation function taking
4063 * one extra argument.
4064 *
4065 * @param a_Name The name of the type.
4066 * @param a_Type0 The argument type.
4067 * @param a_Arg0 The argument name.
4068 */
4069# define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
4070 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4071/**
4072 * For defining a C instruction implementation function taking one extra
4073 * argument.
4074 *
4075 * @param a_Name The name of the function
4076 * @param a_Type0 The argument type.
4077 * @param a_Arg0 The argument name.
4078 */
4079# define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
4080 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4081/**
4082 * Prototype version of IEM_CIMPL_DEF_1.
4083 */
4084# define IEM_CIMPL_PROTO_1(a_Name, a_Type0, a_Arg0) \
4085 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
4086/**
4087 * For calling a C instruction implementation function taking one extra
4088 * argument.
4089 *
4090 * This special call macro adds default arguments to the call and allow us to
4091 * change these later.
4092 *
4093 * @param a_fn The name of the function.
4094 * @param a0 The name of the 1st argument.
4095 */
4096# define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0))
4097
4098/**
4099 * For typedef'ing or declaring a C instruction implementation function taking
4100 * two extra arguments.
4101 *
4102 * @param a_Name The name of the type.
4103 * @param a_Type0 The type of the 1st argument
4104 * @param a_Arg0 The name of the 1st argument.
4105 * @param a_Type1 The type of the 2nd argument.
4106 * @param a_Arg1 The name of the 2nd argument.
4107 */
4108# define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4109 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4110/**
4111 * For defining a C instruction implementation function taking two extra
4112 * arguments.
4113 *
4114 * @param a_Name The name of the function.
4115 * @param a_Type0 The type of the 1st argument
4116 * @param a_Arg0 The name of the 1st argument.
4117 * @param a_Type1 The type of the 2nd argument.
4118 * @param a_Arg1 The name of the 2nd argument.
4119 */
4120# define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4121 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4122/**
4123 * Prototype version of IEM_CIMPL_DEF_2.
4124 */
4125# define IEM_CIMPL_PROTO_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
4126 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
4127/**
4128 * For calling a C instruction implementation function taking two extra
4129 * arguments.
4130 *
4131 * This special call macro adds default arguments to the call and allow us to
4132 * change these later.
4133 *
4134 * @param a_fn The name of the function.
4135 * @param a0 The name of the 1st argument.
4136 * @param a1 The name of the 2nd argument.
4137 */
4138# define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1))
4139
4140/**
4141 * For typedef'ing or declaring a C instruction implementation function taking
4142 * three extra arguments.
4143 *
4144 * @param a_Name The name of the type.
4145 * @param a_Type0 The type of the 1st argument
4146 * @param a_Arg0 The name of the 1st argument.
4147 * @param a_Type1 The type of the 2nd argument.
4148 * @param a_Arg1 The name of the 2nd argument.
4149 * @param a_Type2 The type of the 3rd argument.
4150 * @param a_Arg2 The name of the 3rd argument.
4151 */
4152# define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4153 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4154/**
4155 * For defining a C instruction implementation function taking three extra
4156 * arguments.
4157 *
4158 * @param a_Name The name of the function.
4159 * @param a_Type0 The type of the 1st argument
4160 * @param a_Arg0 The name of the 1st argument.
4161 * @param a_Type1 The type of the 2nd argument.
4162 * @param a_Arg1 The name of the 2nd argument.
4163 * @param a_Type2 The type of the 3rd argument.
4164 * @param a_Arg2 The name of the 3rd argument.
4165 */
4166# define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4167 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4168/**
4169 * Prototype version of IEM_CIMPL_DEF_3.
4170 */
4171# define IEM_CIMPL_PROTO_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
4172 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
4173/**
4174 * For calling a C instruction implementation function taking three extra
4175 * arguments.
4176 *
4177 * This special call macro adds default arguments to the call and allow us to
4178 * change these later.
4179 *
4180 * @param a_fn The name of the function.
4181 * @param a0 The name of the 1st argument.
4182 * @param a1 The name of the 2nd argument.
4183 * @param a2 The name of the 3rd argument.
4184 */
4185# define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
4186
4187
4188/**
4189 * For typedef'ing or declaring a C instruction implementation function taking
4190 * four extra arguments.
4191 *
4192 * @param a_Name The name of the type.
4193 * @param a_Type0 The type of the 1st argument
4194 * @param a_Arg0 The name of the 1st argument.
4195 * @param a_Type1 The type of the 2nd argument.
4196 * @param a_Arg1 The name of the 2nd argument.
4197 * @param a_Type2 The type of the 3rd argument.
4198 * @param a_Arg2 The name of the 3rd argument.
4199 * @param a_Type3 The type of the 4th argument.
4200 * @param a_Arg3 The name of the 4th argument.
4201 */
4202# define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4203 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
4204/**
4205 * For defining a C instruction implementation function taking four extra
4206 * arguments.
4207 *
4208 * @param a_Name The name of the function.
4209 * @param a_Type0 The type of the 1st argument
4210 * @param a_Arg0 The name of the 1st argument.
4211 * @param a_Type1 The type of the 2nd argument.
4212 * @param a_Arg1 The name of the 2nd argument.
4213 * @param a_Type2 The type of the 3rd argument.
4214 * @param a_Arg2 The name of the 3rd argument.
4215 * @param a_Type3 The type of the 4th argument.
4216 * @param a_Arg3 The name of the 4th argument.
4217 */
4218# define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4219 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4220 a_Type2 a_Arg2, a_Type3 a_Arg3))
4221/**
4222 * Prototype version of IEM_CIMPL_DEF_4.
4223 */
4224# define IEM_CIMPL_PROTO_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
4225 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4226 a_Type2 a_Arg2, a_Type3 a_Arg3))
4227/**
4228 * For calling a C instruction implementation function taking four extra
4229 * arguments.
4230 *
4231 * This special call macro adds default arguments to the call and allow us to
4232 * change these later.
4233 *
4234 * @param a_fn The name of the function.
4235 * @param a0 The name of the 1st argument.
4236 * @param a1 The name of the 2nd argument.
4237 * @param a2 The name of the 3rd argument.
4238 * @param a3 The name of the 4th argument.
4239 */
4240# define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
4241
4242
4243/**
4244 * For typedef'ing or declaring a C instruction implementation function taking
4245 * five extra arguments.
4246 *
4247 * @param a_Name The name of the type.
4248 * @param a_Type0 The type of the 1st argument
4249 * @param a_Arg0 The name of the 1st argument.
4250 * @param a_Type1 The type of the 2nd argument.
4251 * @param a_Arg1 The name of the 2nd argument.
4252 * @param a_Type2 The type of the 3rd argument.
4253 * @param a_Arg2 The name of the 3rd argument.
4254 * @param a_Type3 The type of the 4th argument.
4255 * @param a_Arg3 The name of the 4th argument.
4256 * @param a_Type4 The type of the 5th argument.
4257 * @param a_Arg4 The name of the 5th argument.
4258 */
4259# define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4260 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, \
4261 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
4262 a_Type3 a_Arg3, a_Type4 a_Arg4))
4263/**
4264 * For defining a C instruction implementation function taking five extra
4265 * arguments.
4266 *
4267 * @param a_Name The name of the function.
4268 * @param a_Type0 The type of the 1st argument
4269 * @param a_Arg0 The name of the 1st argument.
4270 * @param a_Type1 The type of the 2nd argument.
4271 * @param a_Arg1 The name of the 2nd argument.
4272 * @param a_Type2 The type of the 3rd argument.
4273 * @param a_Arg2 The name of the 3rd argument.
4274 * @param a_Type3 The type of the 4th argument.
4275 * @param a_Arg3 The name of the 4th argument.
4276 * @param a_Type4 The type of the 5th argument.
4277 * @param a_Arg4 The name of the 5th argument.
4278 */
4279# define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4280 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4281 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4282/**
4283 * Prototype version of IEM_CIMPL_DEF_5.
4284 */
4285# define IEM_CIMPL_PROTO_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
4286 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
4287 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
4288/**
4289 * For calling a C instruction implementation function taking five extra
4290 * arguments.
4291 *
4292 * This special call macro adds default arguments to the call and allow us to
4293 * change these later.
4294 *
4295 * @param a_fn The name of the function.
4296 * @param a0 The name of the 1st argument.
4297 * @param a1 The name of the 2nd argument.
4298 * @param a2 The name of the 3rd argument.
4299 * @param a3 The name of the 4th argument.
4300 * @param a4 The name of the 5th argument.
4301 */
4302# define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
4303
4304/** @} */
4305
4306
4307/** @name Opcode Decoder Function Types.
4308 * @{ */
4309
4310/** @typedef PFNIEMOP
4311 * Pointer to an opcode decoder function.
4312 */
4313
4314/** @def FNIEMOP_DEF
4315 * Define an opcode decoder function.
4316 *
4317 * We're using macors for this so that adding and removing parameters as well as
4318 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
4319 *
4320 * @param a_Name The function name.
4321 */
4322
4323/** @typedef PFNIEMOPRM
4324 * Pointer to an opcode decoder function with RM byte.
4325 */
4326
4327/** @def FNIEMOPRM_DEF
4328 * Define an opcode decoder function with RM byte.
4329 *
4330 * We're using macors for this so that adding and removing parameters as well as
4331 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
4332 *
4333 * @param a_Name The function name.
4334 */
4335
4336#if defined(__GNUC__) && defined(RT_ARCH_X86)
4337typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
4338typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4339# define FNIEMOP_DEF(a_Name) \
4340 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
4341# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4342 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4343# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4344 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4345
4346#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
4347typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
4348typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4349# define FNIEMOP_DEF(a_Name) \
4350 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4351# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4352 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4353# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4354 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4355
4356#elif defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
4357typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4358typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4359# define FNIEMOP_DEF(a_Name) \
4360 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
4361# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4362 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
4363# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4364 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
4365
4366#else
4367typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
4368typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
4369# define FNIEMOP_DEF(a_Name) \
4370 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
4371# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
4372 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
4373# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
4374 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
4375
4376#endif
4377#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
4378
4379/**
4380 * Call an opcode decoder function.
4381 *
4382 * We're using macors for this so that adding and removing parameters can be
4383 * done as we please. See FNIEMOP_DEF.
4384 */
4385#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
4386
4387/**
4388 * Call a common opcode decoder function taking one extra argument.
4389 *
4390 * We're using macors for this so that adding and removing parameters can be
4391 * done as we please. See FNIEMOP_DEF_1.
4392 */
4393#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
4394
4395/**
4396 * Call a common opcode decoder function taking one extra argument.
4397 *
4398 * We're using macors for this so that adding and removing parameters can be
4399 * done as we please. See FNIEMOP_DEF_1.
4400 */
4401#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
4402/** @} */
4403
4404
4405/** @name Misc Helpers
4406 * @{ */
4407
4408/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
4409 * due to GCC lacking knowledge about the value range of a switch. */
4410#if RT_CPLUSPLUS_PREREQ(202000)
4411# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: [[unlikely]] AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4412#else
4413# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
4414#endif
4415
4416/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
4417#if RT_CPLUSPLUS_PREREQ(202000)
4418# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: [[unlikely]] AssertFailedReturn(a_RetValue)
4419#else
4420# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
4421#endif
4422
4423/**
4424 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4425 * occation.
4426 */
4427#ifdef LOG_ENABLED
4428# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4429 do { \
4430 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
4431 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4432 } while (0)
4433#else
4434# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
4435 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4436#endif
4437
4438/**
4439 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
4440 * occation using the supplied logger statement.
4441 *
4442 * @param a_LoggerArgs What to log on failure.
4443 */
4444#ifdef LOG_ENABLED
4445# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4446 do { \
4447 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
4448 /*LogFunc(a_LoggerArgs);*/ \
4449 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
4450 } while (0)
4451#else
4452# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
4453 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
4454#endif
4455
4456/**
4457 * Gets the CPU mode (from fExec) as a IEMMODE value.
4458 *
4459 * @returns IEMMODE
4460 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4461 */
4462#define IEM_GET_CPU_MODE(a_pVCpu) ((a_pVCpu)->iem.s.fExec & IEM_F_MODE_CPUMODE_MASK)
4463
4464/**
4465 * Check if we're currently executing in real or virtual 8086 mode.
4466 *
4467 * @returns @c true if it is, @c false if not.
4468 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4469 */
4470#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (( ((a_pVCpu)->iem.s.fExec ^ IEM_F_MODE_X86_PROT_MASK) \
4471 & (IEM_F_MODE_X86_V86_MASK | IEM_F_MODE_X86_PROT_MASK)) != 0)
4472
4473/**
4474 * Check if we're currently executing in virtual 8086 mode.
4475 *
4476 * @returns @c true if it is, @c false if not.
4477 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4478 */
4479#define IEM_IS_V86_MODE(a_pVCpu) (((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_V86_MASK) != 0)
4480
4481/**
4482 * Check if we're currently executing in long mode.
4483 *
4484 * @returns @c true if it is, @c false if not.
4485 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4486 */
4487#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
4488
4489/**
4490 * Check if we're currently executing in a 16-bit code segment.
4491 *
4492 * @returns @c true if it is, @c false if not.
4493 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4494 */
4495#define IEM_IS_16BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_16BIT)
4496
4497/**
4498 * Check if we're currently executing in a 32-bit code segment.
4499 *
4500 * @returns @c true if it is, @c false if not.
4501 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4502 */
4503#define IEM_IS_32BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_32BIT)
4504
4505/**
4506 * Check if we're currently executing in a 64-bit code segment.
4507 *
4508 * @returns @c true if it is, @c false if not.
4509 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4510 */
4511#define IEM_IS_64BIT_CODE(a_pVCpu) (IEM_GET_CPU_MODE(a_pVCpu) == IEMMODE_64BIT)
4512
4513/**
4514 * Check if we're currently executing in real mode.
4515 *
4516 * @returns @c true if it is, @c false if not.
4517 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4518 */
4519#define IEM_IS_REAL_MODE(a_pVCpu) (!((a_pVCpu)->iem.s.fExec & IEM_F_MODE_X86_PROT_MASK))
4520
4521/**
4522 * Gets the current protection level (CPL).
4523 *
4524 * @returns 0..3
4525 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4526 */
4527#define IEM_GET_CPL(a_pVCpu) (((a_pVCpu)->iem.s.fExec >> IEM_F_X86_CPL_SHIFT) & IEM_F_X86_CPL_SMASK)
4528
4529/**
4530 * Sets the current protection level (CPL).
4531 *
4532 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4533 */
4534#define IEM_SET_CPL(a_pVCpu, a_uCpl) \
4535 do { (a_pVCpu)->iem.s.fExec = ((a_pVCpu)->iem.s.fExec & ~IEM_F_X86_CPL_MASK) | ((a_uCpl) << IEM_F_X86_CPL_SHIFT); } while (0)
4536
4537/**
4538 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
4539 * @returns PCCPUMFEATURES
4540 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4541 */
4542#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
4543
4544/**
4545 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
4546 * @returns PCCPUMFEATURES
4547 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4548 */
4549#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&g_CpumHostFeatures.s)
4550
4551/**
4552 * Evaluates to true if we're presenting an Intel CPU to the guest.
4553 */
4554#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
4555
4556/**
4557 * Evaluates to true if we're presenting an AMD CPU to the guest.
4558 */
4559#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
4560
4561/**
4562 * Check if the address is canonical.
4563 */
4564#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
4565
4566/** Checks if the ModR/M byte is in register mode or not. */
4567#define IEM_IS_MODRM_REG_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT) )
4568/** Checks if the ModR/M byte is in memory mode or not. */
4569#define IEM_IS_MODRM_MEM_MODE(a_bRm) ( ((a_bRm) & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT) )
4570
4571/**
4572 * Gets the register (reg) part of a ModR/M encoding, with REX.R added in.
4573 *
4574 * For use during decoding.
4575 */
4576#define IEM_GET_MODRM_REG(a_pVCpu, a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | (a_pVCpu)->iem.s.uRexReg )
4577/**
4578 * Gets the r/m part of a ModR/M encoding as a register index, with REX.B added in.
4579 *
4580 * For use during decoding.
4581 */
4582#define IEM_GET_MODRM_RM(a_pVCpu, a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) | (a_pVCpu)->iem.s.uRexB )
4583
4584/**
4585 * Gets the register (reg) part of a ModR/M encoding, without REX.R.
4586 *
4587 * For use during decoding.
4588 */
4589#define IEM_GET_MODRM_REG_8(a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) )
4590/**
4591 * Gets the r/m part of a ModR/M encoding as a register index, without REX.B.
4592 *
4593 * For use during decoding.
4594 */
4595#define IEM_GET_MODRM_RM_8(a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) )
4596
4597/**
4598 * Gets the register (reg) part of a ModR/M encoding as an extended 8-bit
4599 * register index, with REX.R added in.
4600 *
4601 * For use during decoding.
4602 *
4603 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
4604 */
4605#define IEM_GET_MODRM_REG_EX8(a_pVCpu, a_bRm) \
4606 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
4607 || !((a_bRm) & (4 << X86_MODRM_REG_SHIFT)) /* IEM_GET_MODRM_REG(pVCpu, a_bRm) < 4 */ \
4608 ? IEM_GET_MODRM_REG(pVCpu, a_bRm) : (((a_bRm) >> X86_MODRM_REG_SHIFT) & 3) | 16)
4609/**
4610 * Gets the r/m part of a ModR/M encoding as an extended 8-bit register index,
4611 * with REX.B added in.
4612 *
4613 * For use during decoding.
4614 *
4615 * @see iemGRegRefU8Ex, iemGRegFetchU8Ex, iemGRegStoreU8Ex
4616 */
4617#define IEM_GET_MODRM_RM_EX8(a_pVCpu, a_bRm) \
4618 ( (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX) \
4619 || !((a_bRm) & 4) /* IEM_GET_MODRM_RM(pVCpu, a_bRm) < 4 */ \
4620 ? IEM_GET_MODRM_RM(pVCpu, a_bRm) : ((a_bRm) & 3) | 16)
4621
4622/**
4623 * Combines the prefix REX and ModR/M byte for passing to
4624 * iemOpHlpCalcRmEffAddrThreadedAddr64().
4625 *
4626 * @returns The ModRM byte but with bit 3 set to REX.B and bit 4 to REX.X.
4627 * The two bits are part of the REG sub-field, which isn't needed in
4628 * iemOpHlpCalcRmEffAddrThreadedAddr64().
4629 *
4630 * For use during decoding/recompiling.
4631 */
4632#define IEM_GET_MODRM_EX(a_pVCpu, a_bRm) \
4633 ( ((a_bRm) & ~X86_MODRM_REG_MASK) \
4634 | (uint8_t)( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X)) >> (26 - 3) ) )
4635AssertCompile(IEM_OP_PRF_REX_B == RT_BIT_32(26));
4636AssertCompile(IEM_OP_PRF_REX_X == RT_BIT_32(27));
4637
4638/**
4639 * Gets the effective VEX.VVVV value.
4640 *
4641 * The 4th bit is ignored if not 64-bit code.
4642 * @returns effective V-register value.
4643 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
4644 */
4645#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
4646 (IEM_IS_64BIT_CODE(a_pVCpu) ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
4647
4648
4649/**
4650 * Checks if we're executing inside an AMD-V or VT-x guest.
4651 */
4652#if defined(VBOX_WITH_NESTED_HWVIRT_VMX) || defined(VBOX_WITH_NESTED_HWVIRT_SVM)
4653# define IEM_IS_IN_GUEST(a_pVCpu) RT_BOOL((a_pVCpu)->iem.s.fExec & IEM_F_X86_CTX_IN_GUEST)
4654#else
4655# define IEM_IS_IN_GUEST(a_pVCpu) false
4656#endif
4657
4658
4659#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
4660
4661/**
4662 * Check if the guest has entered VMX root operation.
4663 */
4664# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
4665
4666/**
4667 * Check if the guest has entered VMX non-root operation.
4668 */
4669# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) ( ((a_pVCpu)->iem.s.fExec & (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST)) \
4670 == (IEM_F_X86_CTX_VMX | IEM_F_X86_CTX_IN_GUEST) )
4671
4672/**
4673 * Check if the nested-guest has the given Pin-based VM-execution control set.
4674 */
4675# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
4676
4677/**
4678 * Check if the nested-guest has the given Processor-based VM-execution control set.
4679 */
4680# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
4681
4682/**
4683 * Check if the nested-guest has the given Secondary Processor-based VM-execution
4684 * control set.
4685 */
4686# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
4687
4688/** Gets the guest-physical address of the shadows VMCS for the given VCPU. */
4689# define IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysShadowVmcs)
4690
4691/** Whether a shadow VMCS is present for the given VCPU. */
4692# define IEM_VMX_HAS_SHADOW_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_SHADOW_VMCS(a_pVCpu) != NIL_RTGCPHYS)
4693
4694/** Gets the VMXON region pointer. */
4695# define IEM_VMX_GET_VMXON_PTR(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
4696
4697/** Gets the guest-physical address of the current VMCS for the given VCPU. */
4698# define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
4699
4700/** Whether a current VMCS is present for the given VCPU. */
4701# define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu) RT_BOOL(IEM_VMX_GET_CURRENT_VMCS(a_pVCpu) != NIL_RTGCPHYS)
4702
4703/** Assigns the guest-physical address of the current VMCS for the given VCPU. */
4704# define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
4705 do \
4706 { \
4707 Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
4708 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
4709 } while (0)
4710
4711/** Clears any current VMCS for the given VCPU. */
4712# define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
4713 do \
4714 { \
4715 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
4716 } while (0)
4717
4718/**
4719 * Invokes the VMX VM-exit handler for an instruction intercept.
4720 */
4721# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
4722 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
4723
4724/**
4725 * Invokes the VMX VM-exit handler for an instruction intercept where the
4726 * instruction provides additional VM-exit information.
4727 */
4728# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
4729 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
4730
4731/**
4732 * Invokes the VMX VM-exit handler for a task switch.
4733 */
4734# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
4735 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
4736
4737/**
4738 * Invokes the VMX VM-exit handler for MWAIT.
4739 */
4740# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
4741 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
4742
4743/**
4744 * Invokes the VMX VM-exit handler for EPT faults.
4745 */
4746# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
4747 do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
4748
4749/**
4750 * Invokes the VMX VM-exit handler.
4751 */
4752# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
4753 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
4754
4755#else
4756# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
4757# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
4758# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
4759# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
4760# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
4761# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4762# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4763# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4764# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4765# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
4766# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
4767
4768#endif
4769
4770#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
4771/**
4772 * Checks if we're executing a guest using AMD-V.
4773 */
4774# define IEM_SVM_IS_IN_GUEST(a_pVCpu) ( (a_pVCpu->iem.s.fExec & (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST)) \
4775 == (IEM_F_X86_CTX_SVM | IEM_F_X86_CTX_IN_GUEST))
4776/**
4777 * Check if an SVM control/instruction intercept is set.
4778 */
4779# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
4780 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
4781
4782/**
4783 * Check if an SVM read CRx intercept is set.
4784 */
4785# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
4786 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
4787
4788/**
4789 * Check if an SVM write CRx intercept is set.
4790 */
4791# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
4792 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
4793
4794/**
4795 * Check if an SVM read DRx intercept is set.
4796 */
4797# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
4798 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
4799
4800/**
4801 * Check if an SVM write DRx intercept is set.
4802 */
4803# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
4804 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
4805
4806/**
4807 * Check if an SVM exception intercept is set.
4808 */
4809# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
4810 (IEM_SVM_IS_IN_GUEST(a_pVCpu) && CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
4811
4812/**
4813 * Invokes the SVM \#VMEXIT handler for the nested-guest.
4814 */
4815# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
4816 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
4817
4818/**
4819 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
4820 * corresponding decode assist information.
4821 */
4822# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
4823 do \
4824 { \
4825 uint64_t uExitInfo1; \
4826 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
4827 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
4828 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
4829 else \
4830 uExitInfo1 = 0; \
4831 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
4832 } while (0)
4833
4834/** Check and handles SVM nested-guest instruction intercept and updates
4835 * NRIP if needed.
4836 */
4837# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
4838 do \
4839 { \
4840 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
4841 { \
4842 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
4843 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
4844 } \
4845 } while (0)
4846
4847/** Checks and handles SVM nested-guest CR0 read intercept. */
4848# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) \
4849 do \
4850 { \
4851 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
4852 { /* probably likely */ } \
4853 else \
4854 { \
4855 IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr); \
4856 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
4857 } \
4858 } while (0)
4859
4860/**
4861 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
4862 */
4863# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) \
4864 do { \
4865 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
4866 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_cbInstr)); \
4867 } while (0)
4868
4869#else
4870# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
4871# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
4872# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
4873# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
4874# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
4875# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
4876# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
4877# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
4878# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, \
4879 a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
4880# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2, a_cbInstr) do { } while (0)
4881# define IEM_SVM_UPDATE_NRIP(a_pVCpu, a_cbInstr) do { } while (0)
4882
4883#endif
4884
4885/** @} */
4886
4887uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu);
4888VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu);
4889
4890
4891/**
4892 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
4893 */
4894typedef union IEMSELDESC
4895{
4896 /** The legacy view. */
4897 X86DESC Legacy;
4898 /** The long mode view. */
4899 X86DESC64 Long;
4900} IEMSELDESC;
4901/** Pointer to a selector descriptor table entry. */
4902typedef IEMSELDESC *PIEMSELDESC;
4903
4904/** @name Raising Exceptions.
4905 * @{ */
4906VBOXSTRICTRC iemTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, uint32_t uNextEip, uint32_t fFlags,
4907 uint16_t uErr, uint64_t uCr2, RTSEL SelTSS, PIEMSELDESC pNewDescTSS) RT_NOEXCEPT;
4908
4909VBOXSTRICTRC iemRaiseXcptOrInt(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags,
4910 uint16_t uErr, uint64_t uCr2) RT_NOEXCEPT;
4911#ifdef IEM_WITH_SETJMP
4912DECL_NO_RETURN(void) iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector,
4913 uint32_t fFlags, uint16_t uErr, uint64_t uCr2) IEM_NOEXCEPT_MAY_LONGJMP;
4914#endif
4915VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT;
4916VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT;
4917VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT;
4918VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT;
4919VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT;
4920VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4921VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT;
4922VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
4923VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
4924/*VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;*/
4925VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4926VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
4927VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
4928VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4929VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT;
4930VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT;
4931#ifdef IEM_WITH_SETJMP
4932DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
4933#endif
4934VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
4935VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT;
4936VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
4937#ifdef IEM_WITH_SETJMP
4938DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
4939#endif
4940VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT;
4941#ifdef IEM_WITH_SETJMP
4942DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) IEM_NOEXCEPT_MAY_LONGJMP;
4943#endif
4944VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT;
4945#ifdef IEM_WITH_SETJMP
4946DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) IEM_NOEXCEPT_MAY_LONGJMP;
4947#endif
4948VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) RT_NOEXCEPT;
4949#ifdef IEM_WITH_SETJMP
4950DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t cbAccess, uint32_t fAccess, int rc) IEM_NOEXCEPT_MAY_LONGJMP;
4951#endif
4952VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
4953VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu) RT_NOEXCEPT;
4954#ifdef IEM_WITH_SETJMP
4955DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
4956#endif
4957VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT;
4958
4959void iemLogSyscallProtModeInt(PVMCPUCC pVCpu, uint8_t u8Vector, uint8_t cbInstr);
4960
4961IEM_CIMPL_DEF_0(iemCImplRaiseDivideError);
4962IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix);
4963IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode);
4964
4965/**
4966 * Macro for calling iemCImplRaiseDivideError().
4967 *
4968 * This is for things that will _always_ decode to an \#DE, taking the
4969 * recompiler into consideration and everything.
4970 *
4971 * @return Strict VBox status code.
4972 */
4973#define IEMOP_RAISE_DIVIDE_ERROR_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseDivideError)
4974
4975/**
4976 * Macro for calling iemCImplRaiseInvalidLockPrefix().
4977 *
4978 * This is for things that will _always_ decode to an \#UD, taking the
4979 * recompiler into consideration and everything.
4980 *
4981 * @return Strict VBox status code.
4982 */
4983#define IEMOP_RAISE_INVALID_LOCK_PREFIX_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidLockPrefix)
4984
4985/**
4986 * Macro for calling iemCImplRaiseInvalidOpcode() for decode/static \#UDs.
4987 *
4988 * This is for things that will _always_ decode to an \#UD, taking the
4989 * recompiler into consideration and everything.
4990 *
4991 * @return Strict VBox status code.
4992 */
4993#define IEMOP_RAISE_INVALID_OPCODE_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidOpcode)
4994
4995/**
4996 * Macro for calling iemCImplRaiseInvalidOpcode() for runtime-style \#UDs.
4997 *
4998 * Using this macro means you've got _buggy_ _code_ and are doing things that
4999 * belongs exclusively in IEMAllCImpl.cpp during decoding.
5000 *
5001 * @return Strict VBox status code.
5002 * @see IEMOP_RAISE_INVALID_OPCODE_RET
5003 */
5004#define IEMOP_RAISE_INVALID_OPCODE_RUNTIME_RET() IEM_MC_DEFER_TO_CIMPL_0_RET(IEM_CIMPL_F_XCPT, 0, iemCImplRaiseInvalidOpcode)
5005
5006/** @} */
5007
5008/** @name Register Access.
5009 * @{ */
5010VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
5011 IEMMODE enmEffOpSize) RT_NOEXCEPT;
5012VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT;
5013VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
5014 IEMMODE enmEffOpSize) RT_NOEXCEPT;
5015/** @} */
5016
5017/** @name FPU access and helpers.
5018 * @{ */
5019void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
5020void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5021void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult, uint16_t uFpuOpcode) RT_NOEXCEPT;
5022void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5023void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5024void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5025 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5026void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
5027 uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5028void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5029void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5030void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5031void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5032void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint16_t uFpuOpcode) RT_NOEXCEPT;
5033void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5034void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5035void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5036void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint16_t uFpuOpcode) RT_NOEXCEPT;
5037void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5038void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5039void iemFpuStackPushUnderflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5040void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5041void iemFpuStackPushOverflow(PVMCPUCC pVCpu, uint16_t uFpuOpcode) RT_NOEXCEPT;
5042void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff, uint16_t uFpuOpcode) RT_NOEXCEPT;
5043/** @} */
5044
5045/** @name SSE+AVX SIMD access and helpers.
5046 * @{ */
5047void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT;
5048void iemSseUpdateMxcsr(PVMCPUCC pVCpu, uint32_t fMxcsr) RT_NOEXCEPT;
5049/** @} */
5050
5051/** @name Memory access.
5052 * @{ */
5053
5054/** Report a \#GP instead of \#AC and do not restrict to ring-3 */
5055#define IEM_MEMMAP_F_ALIGN_GP RT_BIT_32(16)
5056/** SSE access that should report a \#GP instead of \#AC, unless MXCSR.MM=1
5057 * when it works like normal \#AC. Always used with IEM_MEMMAP_F_ALIGN_GP. */
5058#define IEM_MEMMAP_F_ALIGN_SSE RT_BIT_32(17)
5059/** If \#AC is applicable, raise it. Always used with IEM_MEMMAP_F_ALIGN_GP.
5060 * Users include FXSAVE & FXRSTOR. */
5061#define IEM_MEMMAP_F_ALIGN_GP_OR_AC RT_BIT_32(18)
5062
5063VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5064 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT;
5065VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5066#ifndef IN_RING3
5067VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5068#endif
5069void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5070void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
5071VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT;
5072VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT;
5073VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT;
5074
5075void iemOpcodeFlushLight(PVMCPUCC pVCpu, uint8_t cbInstr);
5076void iemOpcodeFlushHeavy(PVMCPUCC pVCpu, uint8_t cbInstr);
5077#ifdef IEM_WITH_CODE_TLB
5078void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) IEM_NOEXCEPT_MAY_LONGJMP;
5079#else
5080VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT;
5081#endif
5082#ifdef IEM_WITH_SETJMP
5083uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5084uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5085uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5086uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
5087#else
5088VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT;
5089VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5090VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5091VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5092VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT;
5093VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5094VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5095VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT;
5096VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5097VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5098VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT;
5099#endif
5100
5101VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5102VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5103VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5104VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5105VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5106VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5107VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5108VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5109VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5110VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5111VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5112VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5113VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
5114 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT;
5115#ifdef IEM_WITH_SETJMP
5116uint8_t iemMemFetchDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5117uint16_t iemMemFetchDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5118uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5119uint32_t iemMemFlatFetchDataU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5120uint64_t iemMemFetchDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5121uint64_t iemMemFetchDataU64AlignedU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5122void iemMemFetchDataR80SafeJmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5123void iemMemFetchDataD80SafeJmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5124void iemMemFetchDataU128SafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5125void iemMemFetchDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5126void iemMemFetchDataU256SafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5127void iemMemFetchDataU256AlignedSseSafeJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5128# if 0 /* these are inlined now */
5129uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5130uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5131uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5132uint32_t iemMemFlatFetchDataU32Jmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5133uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5134uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5135void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5136void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5137void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5138# endif
5139void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5140void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5141void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5142#endif
5143
5144VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5145VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5146VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5147VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5148VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT;
5149
5150VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT;
5151VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT;
5152VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT;
5153VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT;
5154VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5155VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT;
5156VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5157VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT;
5158VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT;
5159#ifdef IEM_WITH_SETJMP
5160void iemMemStoreDataU8SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5161void iemMemStoreDataU16SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5162void iemMemStoreDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5163void iemMemStoreDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5164void iemMemStoreDataU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5165void iemMemStoreDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5166void iemMemStoreDataU256SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5167void iemMemStoreDataU256AlignedAvxSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5168void iemMemStoreDataR80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTFLOAT80U pr80Value) IEM_NOEXCEPT_MAY_LONGJMP;
5169void iemMemStoreDataD80SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTPBCD80U pd80Value) IEM_NOEXCEPT_MAY_LONGJMP;
5170#if 0
5171void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) IEM_NOEXCEPT_MAY_LONGJMP;
5172void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) IEM_NOEXCEPT_MAY_LONGJMP;
5173void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) IEM_NOEXCEPT_MAY_LONGJMP;
5174void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP;
5175void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5176#endif
5177void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;
5178void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5179void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
5180#endif
5181
5182#ifdef IEM_WITH_SETJMP
5183uint8_t *iemMemMapDataU8RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5184uint8_t *iemMemMapDataU8WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5185uint8_t const *iemMemMapDataU8RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5186uint16_t *iemMemMapDataU16RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5187uint16_t *iemMemMapDataU16WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5188uint16_t const *iemMemMapDataU16RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5189uint32_t *iemMemMapDataU32RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5190uint32_t *iemMemMapDataU32WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5191uint32_t const *iemMemMapDataU32RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5192uint64_t *iemMemMapDataU64RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5193uint64_t *iemMemMapDataU64WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5194uint64_t const *iemMemMapDataU64RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5195PRTFLOAT80U iemMemMapDataR80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5196PRTFLOAT80U iemMemMapDataR80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5197PCRTFLOAT80U iemMemMapDataR80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5198PRTPBCD80U iemMemMapDataD80RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5199PRTPBCD80U iemMemMapDataD80WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5200PCRTPBCD80U iemMemMapDataD80RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5201PRTUINT128U iemMemMapDataU128RwSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5202PRTUINT128U iemMemMapDataU128WoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5203PCRTUINT128U iemMemMapDataU128RoSafeJmp(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;
5204
5205void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5206void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5207void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5208void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
5209void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5210#endif
5211
5212VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5213 void **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
5214VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo, uint64_t uNewRsp) RT_NOEXCEPT;
5215VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT;
5216VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5217VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT;
5218VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5219VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5220VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5221VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT;
5222VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
5223 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t *puNewRsp) RT_NOEXCEPT;
5224VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t off, size_t cbMem,
5225 void const **ppvMem, uint8_t *pbUnmapInfo, uint64_t uCurNewRsp) RT_NOEXCEPT;
5226VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
5227VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT;
5228VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT;
5229VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT;
5230VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5231VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5232VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT;
5233
5234#ifdef IEM_WITH_SETJMP
5235void iemMemStackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5236void iemMemStackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5237void iemMemStackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5238void iemMemStackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5239void iemMemStackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5240void iemMemStackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5241void iemMemStackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5242
5243void iemMemFlat32StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5244void iemMemFlat32StackPushU32SafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5245void iemMemFlat32StackPushU32SRegSafeJmp(PVMCPUCC pVCpu, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5246void iemMemFlat32StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5247void iemMemFlat32StackPopGRegU32SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5248
5249void iemMemFlat64StackPushU16SafeJmp(PVMCPUCC pVCpu, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5250void iemMemFlat64StackPushU64SafeJmp(PVMCPUCC pVCpu, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
5251void iemMemFlat64StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5252void iemMemFlat64StackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
5253#endif
5254
5255/** @} */
5256
5257/** @name IEMAllCImpl.cpp
5258 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/'
5259 * @{ */
5260IEM_CIMPL_PROTO_2(iemCImpl_pop_mem16, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5261IEM_CIMPL_PROTO_2(iemCImpl_pop_mem32, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5262IEM_CIMPL_PROTO_2(iemCImpl_pop_mem64, uint16_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5263IEM_CIMPL_PROTO_0(iemCImpl_popa_16);
5264IEM_CIMPL_PROTO_0(iemCImpl_popa_32);
5265IEM_CIMPL_PROTO_0(iemCImpl_pusha_16);
5266IEM_CIMPL_PROTO_0(iemCImpl_pusha_32);
5267IEM_CIMPL_PROTO_1(iemCImpl_pushf, IEMMODE, enmEffOpSize);
5268IEM_CIMPL_PROTO_1(iemCImpl_popf, IEMMODE, enmEffOpSize);
5269IEM_CIMPL_PROTO_1(iemCImpl_call_16, uint16_t, uNewPC);
5270IEM_CIMPL_PROTO_1(iemCImpl_call_rel_16, int16_t, offDisp);
5271IEM_CIMPL_PROTO_1(iemCImpl_call_32, uint32_t, uNewPC);
5272IEM_CIMPL_PROTO_1(iemCImpl_call_rel_32, int32_t, offDisp);
5273IEM_CIMPL_PROTO_1(iemCImpl_call_64, uint64_t, uNewPC);
5274IEM_CIMPL_PROTO_1(iemCImpl_call_rel_64, int64_t, offDisp);
5275IEM_CIMPL_PROTO_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5276IEM_CIMPL_PROTO_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5277typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize);
5278typedef FNIEMCIMPLFARBRANCH *PFNIEMCIMPLFARBRANCH;
5279IEM_CIMPL_PROTO_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop);
5280IEM_CIMPL_PROTO_0(iemCImpl_retn_16);
5281IEM_CIMPL_PROTO_0(iemCImpl_retn_32);
5282IEM_CIMPL_PROTO_0(iemCImpl_retn_64);
5283IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_16, uint16_t, cbPop);
5284IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_32, uint16_t, cbPop);
5285IEM_CIMPL_PROTO_1(iemCImpl_retn_iw_64, uint16_t, cbPop);
5286IEM_CIMPL_PROTO_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters);
5287IEM_CIMPL_PROTO_1(iemCImpl_leave, IEMMODE, enmEffOpSize);
5288IEM_CIMPL_PROTO_2(iemCImpl_int, uint8_t, u8Int, IEMINT, enmInt);
5289IEM_CIMPL_PROTO_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize);
5290IEM_CIMPL_PROTO_4(iemCImpl_iret_prot_v8086, uint32_t, uNewEip, uint16_t, uNewCs, uint32_t, uNewFlags, uint64_t, uNewRsp);
5291IEM_CIMPL_PROTO_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize);
5292IEM_CIMPL_PROTO_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize);
5293IEM_CIMPL_PROTO_1(iemCImpl_iret_64bit, IEMMODE, enmEffOpSize);
5294IEM_CIMPL_PROTO_1(iemCImpl_iret, IEMMODE, enmEffOpSize);
5295IEM_CIMPL_PROTO_0(iemCImpl_loadall286);
5296IEM_CIMPL_PROTO_0(iemCImpl_syscall);
5297IEM_CIMPL_PROTO_1(iemCImpl_sysret, IEMMODE, enmEffOpSize);
5298IEM_CIMPL_PROTO_0(iemCImpl_sysenter);
5299IEM_CIMPL_PROTO_1(iemCImpl_sysexit, IEMMODE, enmEffOpSize);
5300IEM_CIMPL_PROTO_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel);
5301IEM_CIMPL_PROTO_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel);
5302IEM_CIMPL_PROTO_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize);
5303IEM_CIMPL_PROTO_5(iemCImpl_load_SReg_Greg, uint16_t, uSel, uint64_t, offSeg, uint8_t, iSegReg, uint8_t, iGReg, IEMMODE, enmEffOpSize);
5304IEM_CIMPL_PROTO_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite);
5305IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, bool, fIsLar);
5306IEM_CIMPL_PROTO_3(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, bool, fIsLar);
5307IEM_CIMPL_PROTO_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5308IEM_CIMPL_PROTO_2(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5309IEM_CIMPL_PROTO_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize);
5310IEM_CIMPL_PROTO_2(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5311IEM_CIMPL_PROTO_1(iemCImpl_lldt, uint16_t, uNewLdt);
5312IEM_CIMPL_PROTO_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5313IEM_CIMPL_PROTO_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5314IEM_CIMPL_PROTO_1(iemCImpl_ltr, uint16_t, uNewTr);
5315IEM_CIMPL_PROTO_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5316IEM_CIMPL_PROTO_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5317IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg);
5318IEM_CIMPL_PROTO_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize);
5319IEM_CIMPL_PROTO_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5320IEM_CIMPL_PROTO_4(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX, IEMACCESSCRX, enmAccessCrX, uint8_t, iGReg);
5321IEM_CIMPL_PROTO_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg);
5322IEM_CIMPL_PROTO_2(iemCImpl_lmsw, uint16_t, u16NewMsw, RTGCPTR, GCPtrEffDst);
5323IEM_CIMPL_PROTO_0(iemCImpl_clts);
5324IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg);
5325IEM_CIMPL_PROTO_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg);
5326IEM_CIMPL_PROTO_2(iemCImpl_mov_Rd_Td, uint8_t, iGReg, uint8_t, iTrReg);
5327IEM_CIMPL_PROTO_2(iemCImpl_mov_Td_Rd, uint8_t, iTrReg, uint8_t, iGReg);
5328IEM_CIMPL_PROTO_1(iemCImpl_invlpg, RTGCPTR, GCPtrPage);
5329IEM_CIMPL_PROTO_3(iemCImpl_invpcid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvpcidDesc, uint64_t, uInvpcidType);
5330IEM_CIMPL_PROTO_0(iemCImpl_invd);
5331IEM_CIMPL_PROTO_0(iemCImpl_wbinvd);
5332IEM_CIMPL_PROTO_0(iemCImpl_rsm);
5333IEM_CIMPL_PROTO_0(iemCImpl_rdtsc);
5334IEM_CIMPL_PROTO_0(iemCImpl_rdtscp);
5335IEM_CIMPL_PROTO_0(iemCImpl_rdpmc);
5336IEM_CIMPL_PROTO_0(iemCImpl_rdmsr);
5337IEM_CIMPL_PROTO_0(iemCImpl_wrmsr);
5338IEM_CIMPL_PROTO_3(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5339IEM_CIMPL_PROTO_2(iemCImpl_in_eAX_DX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5340IEM_CIMPL_PROTO_3(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg, uint8_t, bImmAndEffAddrMode);
5341IEM_CIMPL_PROTO_2(iemCImpl_out_DX_eAX, uint8_t, cbReg, IEMMODE, enmEffAddrMode);
5342IEM_CIMPL_PROTO_0(iemCImpl_cli);
5343IEM_CIMPL_PROTO_0(iemCImpl_sti);
5344IEM_CIMPL_PROTO_0(iemCImpl_hlt);
5345IEM_CIMPL_PROTO_1(iemCImpl_monitor, uint8_t, iEffSeg);
5346IEM_CIMPL_PROTO_0(iemCImpl_mwait);
5347IEM_CIMPL_PROTO_0(iemCImpl_swapgs);
5348IEM_CIMPL_PROTO_0(iemCImpl_cpuid);
5349IEM_CIMPL_PROTO_1(iemCImpl_aad, uint8_t, bImm);
5350IEM_CIMPL_PROTO_1(iemCImpl_aam, uint8_t, bImm);
5351IEM_CIMPL_PROTO_0(iemCImpl_daa);
5352IEM_CIMPL_PROTO_0(iemCImpl_das);
5353IEM_CIMPL_PROTO_0(iemCImpl_aaa);
5354IEM_CIMPL_PROTO_0(iemCImpl_aas);
5355IEM_CIMPL_PROTO_3(iemCImpl_bound_16, int16_t, idxArray, int16_t, idxLowerBound, int16_t, idxUpperBound);
5356IEM_CIMPL_PROTO_3(iemCImpl_bound_32, int32_t, idxArray, int32_t, idxLowerBound, int32_t, idxUpperBound);
5357IEM_CIMPL_PROTO_0(iemCImpl_xgetbv);
5358IEM_CIMPL_PROTO_0(iemCImpl_xsetbv);
5359IEM_CIMPL_PROTO_5(iemCImpl_cmpxchg16b_fallback_rendezvous, PRTUINT128U, pu128Dst, PRTUINT128U, pu128RaxRdx,
5360 PRTUINT128U, pu128RbxRcx, uint32_t *, pEFlags, uint8_t, bUnmapInfo);
5361IEM_CIMPL_PROTO_2(iemCImpl_clflush_clflushopt, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5362IEM_CIMPL_PROTO_1(iemCImpl_finit, bool, fCheckXcpts);
5363IEM_CIMPL_PROTO_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5364IEM_CIMPL_PROTO_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5365IEM_CIMPL_PROTO_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5366IEM_CIMPL_PROTO_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize);
5367IEM_CIMPL_PROTO_2(iemCImpl_stmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5368IEM_CIMPL_PROTO_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5369IEM_CIMPL_PROTO_2(iemCImpl_ldmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff);
5370IEM_CIMPL_PROTO_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5371IEM_CIMPL_PROTO_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst);
5372IEM_CIMPL_PROTO_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5373IEM_CIMPL_PROTO_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc);
5374IEM_CIMPL_PROTO_1(iemCImpl_fldcw, uint16_t, u16Fcw);
5375IEM_CIMPL_PROTO_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode);
5376IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode);
5377IEM_CIMPL_PROTO_2(iemCImpl_rdseed, uint8_t, iReg, IEMMODE, enmEffOpSize);
5378IEM_CIMPL_PROTO_2(iemCImpl_rdrand, uint8_t, iReg, IEMMODE, enmEffOpSize);
5379/** @} */
5380
5381/** @name IEMAllCImplStrInstr.cpp.h
5382 * @note sed -e '/IEM_CIMPL_DEF_/!d' -e 's/IEM_CIMPL_DEF_/IEM_CIMPL_PROTO_/' -e 's/$/;/' -e 's/RT_CONCAT4(//' \
5383 * -e 's/,ADDR_SIZE)/64/g' -e 's/,OP_SIZE,/64/g' -e 's/,OP_rAX,/rax/g' IEMAllCImplStrInstr.cpp.h
5384 * @{ */
5385IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr16, uint8_t, iEffSeg);
5386IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr16, uint8_t, iEffSeg);
5387IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m16);
5388IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m16);
5389IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr16, uint8_t, iEffSeg);
5390IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m16);
5391IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m16, int8_t, iEffSeg);
5392IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr16, bool, fIoChecked);
5393IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr16, bool, fIoChecked);
5394IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5395IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5396
5397IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr16, uint8_t, iEffSeg);
5398IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr16, uint8_t, iEffSeg);
5399IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m16);
5400IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m16);
5401IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr16, uint8_t, iEffSeg);
5402IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m16);
5403IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m16, int8_t, iEffSeg);
5404IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr16, bool, fIoChecked);
5405IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr16, bool, fIoChecked);
5406IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5407IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5408
5409IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr16, uint8_t, iEffSeg);
5410IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr16, uint8_t, iEffSeg);
5411IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m16);
5412IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m16);
5413IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr16, uint8_t, iEffSeg);
5414IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m16);
5415IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m16, int8_t, iEffSeg);
5416IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr16, bool, fIoChecked);
5417IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr16, bool, fIoChecked);
5418IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5419IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr16, uint8_t, iEffSeg, bool, fIoChecked);
5420
5421
5422IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr32, uint8_t, iEffSeg);
5423IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr32, uint8_t, iEffSeg);
5424IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m32);
5425IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m32);
5426IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr32, uint8_t, iEffSeg);
5427IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m32);
5428IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m32, int8_t, iEffSeg);
5429IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr32, bool, fIoChecked);
5430IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr32, bool, fIoChecked);
5431IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5432IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5433
5434IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr32, uint8_t, iEffSeg);
5435IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr32, uint8_t, iEffSeg);
5436IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m32);
5437IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m32);
5438IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr32, uint8_t, iEffSeg);
5439IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m32);
5440IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m32, int8_t, iEffSeg);
5441IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr32, bool, fIoChecked);
5442IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr32, bool, fIoChecked);
5443IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5444IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5445
5446IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr32, uint8_t, iEffSeg);
5447IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr32, uint8_t, iEffSeg);
5448IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m32);
5449IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m32);
5450IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr32, uint8_t, iEffSeg);
5451IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m32);
5452IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m32, int8_t, iEffSeg);
5453IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr32, bool, fIoChecked);
5454IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr32, bool, fIoChecked);
5455IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5456IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5457
5458IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr32, uint8_t, iEffSeg);
5459IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr32, uint8_t, iEffSeg);
5460IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m32);
5461IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m32);
5462IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr32, uint8_t, iEffSeg);
5463IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m32);
5464IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m32, int8_t, iEffSeg);
5465IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr32, bool, fIoChecked);
5466IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr32, bool, fIoChecked);
5467IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5468IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr32, uint8_t, iEffSeg, bool, fIoChecked);
5469
5470
5471IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op8_addr64, uint8_t, iEffSeg);
5472IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op8_addr64, uint8_t, iEffSeg);
5473IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_al_m64);
5474IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_al_m64);
5475IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op8_addr64, uint8_t, iEffSeg);
5476IEM_CIMPL_PROTO_0(iemCImpl_stos_al_m64);
5477IEM_CIMPL_PROTO_1(iemCImpl_lods_al_m64, int8_t, iEffSeg);
5478IEM_CIMPL_PROTO_1(iemCImpl_ins_op8_addr64, bool, fIoChecked);
5479IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op8_addr64, bool, fIoChecked);
5480IEM_CIMPL_PROTO_2(iemCImpl_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5481IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op8_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5482
5483IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op16_addr64, uint8_t, iEffSeg);
5484IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op16_addr64, uint8_t, iEffSeg);
5485IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_ax_m64);
5486IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_ax_m64);
5487IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op16_addr64, uint8_t, iEffSeg);
5488IEM_CIMPL_PROTO_0(iemCImpl_stos_ax_m64);
5489IEM_CIMPL_PROTO_1(iemCImpl_lods_ax_m64, int8_t, iEffSeg);
5490IEM_CIMPL_PROTO_1(iemCImpl_ins_op16_addr64, bool, fIoChecked);
5491IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op16_addr64, bool, fIoChecked);
5492IEM_CIMPL_PROTO_2(iemCImpl_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5493IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op16_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5494
5495IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op32_addr64, uint8_t, iEffSeg);
5496IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op32_addr64, uint8_t, iEffSeg);
5497IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_eax_m64);
5498IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_eax_m64);
5499IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op32_addr64, uint8_t, iEffSeg);
5500IEM_CIMPL_PROTO_0(iemCImpl_stos_eax_m64);
5501IEM_CIMPL_PROTO_1(iemCImpl_lods_eax_m64, int8_t, iEffSeg);
5502IEM_CIMPL_PROTO_1(iemCImpl_ins_op32_addr64, bool, fIoChecked);
5503IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op32_addr64, bool, fIoChecked);
5504IEM_CIMPL_PROTO_2(iemCImpl_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5505IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op32_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5506
5507IEM_CIMPL_PROTO_1(iemCImpl_repe_cmps_op64_addr64, uint8_t, iEffSeg);
5508IEM_CIMPL_PROTO_1(iemCImpl_repne_cmps_op64_addr64, uint8_t, iEffSeg);
5509IEM_CIMPL_PROTO_0(iemCImpl_repe_scas_rax_m64);
5510IEM_CIMPL_PROTO_0(iemCImpl_repne_scas_rax_m64);
5511IEM_CIMPL_PROTO_1(iemCImpl_rep_movs_op64_addr64, uint8_t, iEffSeg);
5512IEM_CIMPL_PROTO_0(iemCImpl_stos_rax_m64);
5513IEM_CIMPL_PROTO_1(iemCImpl_lods_rax_m64, int8_t, iEffSeg);
5514IEM_CIMPL_PROTO_1(iemCImpl_ins_op64_addr64, bool, fIoChecked);
5515IEM_CIMPL_PROTO_1(iemCImpl_rep_ins_op64_addr64, bool, fIoChecked);
5516IEM_CIMPL_PROTO_2(iemCImpl_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5517IEM_CIMPL_PROTO_2(iemCImpl_rep_outs_op64_addr64, uint8_t, iEffSeg, bool, fIoChecked);
5518/** @} */
5519
5520#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5521VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual) RT_NOEXCEPT;
5522VBOXSTRICTRC iemVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr) RT_NOEXCEPT;
5523VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPUCC pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr) RT_NOEXCEPT;
5524VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr) RT_NOEXCEPT;
5525VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr) RT_NOEXCEPT;
5526VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu) RT_NOEXCEPT;
5527VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr) RT_NOEXCEPT;
5528VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPUCC pVCpu) RT_NOEXCEPT;
5529VBOXSTRICTRC iemVmxVmexitInstrMwait(PVMCPUCC pVCpu, bool fMonitorHwArmed, uint8_t cbInstr) RT_NOEXCEPT;
5530VBOXSTRICTRC iemVmxVmexitInstrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port,
5531 bool fImm, uint8_t cbAccess, uint8_t cbInstr) RT_NOEXCEPT;
5532VBOXSTRICTRC iemVmxVmexitInstrStrIo(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint16_t u16Port, uint8_t cbAccess,
5533 bool fRep, VMXEXITINSTRINFO ExitInstrInfo, uint8_t cbInstr) RT_NOEXCEPT;
5534VBOXSTRICTRC iemVmxVmexitInstrMovDrX(PVMCPUCC pVCpu, VMXINSTRID uInstrId, uint8_t iDrReg, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5535VBOXSTRICTRC iemVmxVmexitInstrMovToCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5536VBOXSTRICTRC iemVmxVmexitInstrMovFromCr8(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5537VBOXSTRICTRC iemVmxVmexitInstrMovToCr3(PVMCPUCC pVCpu, uint64_t uNewCr3, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5538VBOXSTRICTRC iemVmxVmexitInstrMovFromCr3(PVMCPUCC pVCpu, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5539VBOXSTRICTRC iemVmxVmexitInstrMovToCr0Cr4(PVMCPUCC pVCpu, uint8_t iCrReg, uint64_t *puNewCrX, uint8_t iGReg, uint8_t cbInstr) RT_NOEXCEPT;
5540VBOXSTRICTRC iemVmxVmexitInstrClts(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT;
5541VBOXSTRICTRC iemVmxVmexitInstrLmsw(PVMCPUCC pVCpu, uint32_t uGuestCr0, uint16_t *pu16NewMsw,
5542 RTGCPTR GCPtrEffDst, uint8_t cbInstr) RT_NOEXCEPT;
5543VBOXSTRICTRC iemVmxVmexitInstrInvlpg(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, uint8_t cbInstr) RT_NOEXCEPT;
5544VBOXSTRICTRC iemVmxApicWriteEmulation(PVMCPUCC pVCpu) RT_NOEXCEPT;
5545VBOXSTRICTRC iemVmxVirtApicAccessUnused(PVMCPUCC pVCpu, PRTGCPHYS pGCPhysAccess, size_t cbAccess, uint32_t fAccess) RT_NOEXCEPT;
5546uint32_t iemVmxVirtApicReadRaw32(PVMCPUCC pVCpu, uint16_t offReg) RT_NOEXCEPT;
5547void iemVmxVirtApicWriteRaw32(PVMCPUCC pVCpu, uint16_t offReg, uint32_t uReg) RT_NOEXCEPT;
5548VBOXSTRICTRC iemVmxInvvpid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrInvvpidDesc,
5549 uint64_t u64InvvpidType, PCVMXVEXITINFO pExitInfo) RT_NOEXCEPT;
5550bool iemVmxIsRdmsrWrmsrInterceptSet(PCVMCPU pVCpu, uint32_t uExitReason, uint32_t idMsr) RT_NOEXCEPT;
5551IEM_CIMPL_PROTO_0(iemCImpl_vmxoff);
5552IEM_CIMPL_PROTO_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon);
5553IEM_CIMPL_PROTO_0(iemCImpl_vmlaunch);
5554IEM_CIMPL_PROTO_0(iemCImpl_vmresume);
5555IEM_CIMPL_PROTO_2(iemCImpl_vmptrld, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5556IEM_CIMPL_PROTO_2(iemCImpl_vmptrst, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5557IEM_CIMPL_PROTO_2(iemCImpl_vmclear, uint8_t, iEffSeg, RTGCPTR, GCPtrVmcs);
5558IEM_CIMPL_PROTO_2(iemCImpl_vmwrite_reg, uint64_t, u64Val, uint64_t, u64VmcsField);
5559IEM_CIMPL_PROTO_3(iemCImpl_vmwrite_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrVal, uint32_t, u64VmcsField);
5560IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg64, uint64_t *, pu64Dst, uint64_t, u64VmcsField);
5561IEM_CIMPL_PROTO_2(iemCImpl_vmread_reg32, uint64_t *, pu32Dst, uint32_t, u32VmcsField);
5562IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg64, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u64VmcsField);
5563IEM_CIMPL_PROTO_3(iemCImpl_vmread_mem_reg32, uint8_t, iEffSeg, RTGCPTR, GCPtrDst, uint32_t, u32VmcsField);
5564IEM_CIMPL_PROTO_3(iemCImpl_invvpid, uint8_t, iEffSeg, RTGCPTR, GCPtrInvvpidDesc, uint64_t, uInvvpidType);
5565IEM_CIMPL_PROTO_3(iemCImpl_invept, uint8_t, iEffSeg, RTGCPTR, GCPtrInveptDesc, uint64_t, uInveptType);
5566IEM_CIMPL_PROTO_0(iemCImpl_vmx_pause);
5567#endif
5568
5569#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5570VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2) RT_NOEXCEPT;
5571VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2) RT_NOEXCEPT;
5572VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPUCC pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
5573 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr) RT_NOEXCEPT;
5574VBOXSTRICTRC iemSvmHandleMsrIntercept(PVMCPUCC pVCpu, uint32_t idMsr, bool fWrite, uint8_t cbInstr) RT_NOEXCEPT;
5575IEM_CIMPL_PROTO_0(iemCImpl_vmrun);
5576IEM_CIMPL_PROTO_0(iemCImpl_vmload);
5577IEM_CIMPL_PROTO_0(iemCImpl_vmsave);
5578IEM_CIMPL_PROTO_0(iemCImpl_clgi);
5579IEM_CIMPL_PROTO_0(iemCImpl_stgi);
5580IEM_CIMPL_PROTO_0(iemCImpl_invlpga);
5581IEM_CIMPL_PROTO_0(iemCImpl_skinit);
5582IEM_CIMPL_PROTO_0(iemCImpl_svm_pause);
5583#endif
5584
5585IEM_CIMPL_PROTO_0(iemCImpl_vmcall); /* vmx */
5586IEM_CIMPL_PROTO_0(iemCImpl_vmmcall); /* svm */
5587IEM_CIMPL_PROTO_1(iemCImpl_Hypercall, uint16_t, uDisOpcode); /* both */
5588
5589extern const PFNIEMOP g_apfnIemInterpretOnlyOneByteMap[256];
5590extern const PFNIEMOP g_apfnIemInterpretOnlyTwoByteMap[1024];
5591extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f3a[1024];
5592extern const PFNIEMOP g_apfnIemInterpretOnlyThreeByte0f38[1024];
5593extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap1[1024];
5594extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap2[1024];
5595extern const PFNIEMOP g_apfnIemInterpretOnlyVecMap3[1024];
5596
5597/*
5598 * Recompiler related stuff.
5599 */
5600extern const PFNIEMOP g_apfnIemThreadedRecompilerOneByteMap[256];
5601extern const PFNIEMOP g_apfnIemThreadedRecompilerTwoByteMap[1024];
5602extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f3a[1024];
5603extern const PFNIEMOP g_apfnIemThreadedRecompilerThreeByte0f38[1024];
5604extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap1[1024];
5605extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap2[1024];
5606extern const PFNIEMOP g_apfnIemThreadedRecompilerVecMap3[1024];
5607
5608DECLCALLBACK(int) iemTbInit(PVMCC pVM, uint32_t cInitialTbs, uint32_t cMaxTbs,
5609 uint64_t cbInitialExec, uint64_t cbMaxExec, uint32_t cbChunkExec);
5610void iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb, bool fSafeToFree);
5611void iemTbAllocatorProcessDelayedFrees(PVMCPUCC pVCpu, PIEMTBALLOCATOR pTbAllocator);
5612void iemTbAllocatorFreeupNativeSpace(PVMCPUCC pVCpu, uint32_t cNeededInstrs);
5613
5614
5615/** @todo FNIEMTHREADEDFUNC and friends may need more work... */
5616#if defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
5617typedef VBOXSTRICTRC /*__attribute__((__nothrow__))*/ FNIEMTHREADEDFUNC(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
5618typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
5619# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
5620 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
5621# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
5622 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
5623
5624#else
5625typedef VBOXSTRICTRC (FNIEMTHREADEDFUNC)(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
5626typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
5627# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
5628 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
5629# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
5630 VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
5631#endif
5632
5633
5634IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_Nop);
5635IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_LogCpuState);
5636
5637IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_DeferToCImpl0);
5638
5639IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckIrq);
5640IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckMode);
5641IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckHwInstrBps);
5642IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLim);
5643
5644IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes);
5645IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodes);
5646IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim);
5647
5648/* Branching: */
5649IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes);
5650IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodes);
5651IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim);
5652
5653IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb);
5654IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb);
5655IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim);
5656
5657/* Natural page crossing: */
5658IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb);
5659IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb);
5660IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim);
5661
5662IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb);
5663IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb);
5664IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim);
5665
5666IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb);
5667IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb);
5668IEM_DECL_IEMTHREADEDFUNC_PROTO(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim);
5669
5670bool iemThreadedCompileEmitIrqCheckBefore(PVMCPUCC pVCpu, PIEMTB pTb);
5671bool iemThreadedCompileBeginEmitCallsComplications(PVMCPUCC pVCpu, PIEMTB pTb);
5672
5673/* Native recompiler public bits: */
5674DECLHIDDEN(PIEMTB) iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT;
5675DECLHIDDEN(void) iemNativeDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT;
5676int iemExecMemAllocatorInit(PVMCPU pVCpu, uint64_t cbMax, uint64_t cbInitial, uint32_t cbChunk);
5677void iemExecMemAllocatorFree(PVMCPU pVCpu, void *pv, size_t cb);
5678
5679
5680/** @} */
5681
5682RT_C_DECLS_END
5683
5684#endif /* !VMM_INCLUDED_SRC_include_IEMInternal_h */
5685
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette