VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInternal.h@ 72485

Last change on this file since 72485 was 72485, checked in by vboxsync, 7 years ago

IEM,NEM: Define minimum CPUMCTX set for IEM and hook it up to NEM for fetching missing bits as needed. bugref:9044 [build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 87.5 KB
Line 
1/* $Id: IEMInternal.h 72485 2018-06-08 17:12:24Z vboxsync $ */
2/** @file
3 * IEM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18#ifndef ___IEMInternal_h
19#define ___IEMInternal_h
20
21#include <VBox/vmm/cpum.h>
22#include <VBox/vmm/iem.h>
23#include <VBox/vmm/stam.h>
24#include <VBox/param.h>
25
26#include <setjmp.h>
27
28
29RT_C_DECLS_BEGIN
30
31
32/** @defgroup grp_iem_int Internals
33 * @ingroup grp_iem
34 * @internal
35 * @{
36 */
37
38/** For expanding symbol in slickedit and other products tagging and
39 * crossreferencing IEM symbols. */
40#ifndef IEM_STATIC
41# define IEM_STATIC static
42#endif
43
44/** @def IEM_WITH_3DNOW
45 * Includes the 3DNow decoding. */
46#define IEM_WITH_3DNOW
47
48/** @def IEM_WITH_THREE_0F_38
49 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
50#define IEM_WITH_THREE_0F_38
51
52/** @def IEM_WITH_THREE_0F_3A
53 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
54#define IEM_WITH_THREE_0F_3A
55
56/** @def IEM_WITH_VEX
57 * Includes the VEX decoding. */
58#define IEM_WITH_VEX
59
60
61/** @def IEM_VERIFICATION_MODE_FULL
62 * Shorthand for:
63 * defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
64 */
65#if (defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_MINIMAL) && !defined(IEM_VERIFICATION_MODE_FULL)) \
66 || defined(DOXYGEN_RUNNING)
67# define IEM_VERIFICATION_MODE_FULL
68#endif
69
70
71/** @def IEM_CFG_TARGET_CPU
72 * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
73 *
74 * By default we allow this to be configured by the user via the
75 * CPUM/GuestCpuName config string, but this comes at a slight cost during
76 * decoding. So, for applications of this code where there is no need to
77 * be dynamic wrt target CPU, just modify this define.
78 */
79#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
80# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
81#endif
82
83
84//#define IEM_WITH_CODE_TLB// - work in progress
85
86
87#if !defined(IN_TSTVMSTRUCT) && !defined(DOXYGEN_RUNNING)
88/** Instruction statistics. */
89typedef struct IEMINSTRSTATS
90{
91# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
92# include "IEMInstructionStatisticsTmpl.h"
93# undef IEM_DO_INSTR_STAT
94} IEMINSTRSTATS;
95#else
96struct IEMINSTRSTATS;
97typedef struct IEMINSTRSTATS IEMINSTRSTATS;
98#endif
99/** Pointer to IEM instruction statistics. */
100typedef IEMINSTRSTATS *PIEMINSTRSTATS;
101
102/** Finish and move to types.h */
103typedef union
104{
105 uint32_t u32;
106} RTFLOAT32U;
107typedef RTFLOAT32U *PRTFLOAT32U;
108typedef RTFLOAT32U const *PCRTFLOAT32U;
109
110
111/**
112 * Extended operand mode that includes a representation of 8-bit.
113 *
114 * This is used for packing down modes when invoking some C instruction
115 * implementations.
116 */
117typedef enum IEMMODEX
118{
119 IEMMODEX_16BIT = IEMMODE_16BIT,
120 IEMMODEX_32BIT = IEMMODE_32BIT,
121 IEMMODEX_64BIT = IEMMODE_64BIT,
122 IEMMODEX_8BIT
123} IEMMODEX;
124AssertCompileSize(IEMMODEX, 4);
125
126
127/**
128 * Branch types.
129 */
130typedef enum IEMBRANCH
131{
132 IEMBRANCH_JUMP = 1,
133 IEMBRANCH_CALL,
134 IEMBRANCH_TRAP,
135 IEMBRANCH_SOFTWARE_INT,
136 IEMBRANCH_HARDWARE_INT
137} IEMBRANCH;
138AssertCompileSize(IEMBRANCH, 4);
139
140
141/**
142 * INT instruction types.
143 */
144typedef enum IEMINT
145{
146 /** INT n instruction (opcode 0xcd imm). */
147 IEMINT_INTN = 0,
148 /** Single byte INT3 instruction (opcode 0xcc). */
149 IEMINT_INT3 = IEM_XCPT_FLAGS_BP_INSTR,
150 /** Single byte INTO instruction (opcode 0xce). */
151 IEMINT_INTO = IEM_XCPT_FLAGS_OF_INSTR,
152 /** Single byte INT1 (ICEBP) instruction (opcode 0xf1). */
153 IEMINT_INT1 = IEM_XCPT_FLAGS_ICEBP_INSTR
154} IEMINT;
155AssertCompileSize(IEMINT, 4);
156
157
158/**
159 * A FPU result.
160 */
161typedef struct IEMFPURESULT
162{
163 /** The output value. */
164 RTFLOAT80U r80Result;
165 /** The output status. */
166 uint16_t FSW;
167} IEMFPURESULT;
168AssertCompileMemberOffset(IEMFPURESULT, FSW, 10);
169/** Pointer to a FPU result. */
170typedef IEMFPURESULT *PIEMFPURESULT;
171/** Pointer to a const FPU result. */
172typedef IEMFPURESULT const *PCIEMFPURESULT;
173
174
175/**
176 * A FPU result consisting of two output values and FSW.
177 */
178typedef struct IEMFPURESULTTWO
179{
180 /** The first output value. */
181 RTFLOAT80U r80Result1;
182 /** The output status. */
183 uint16_t FSW;
184 /** The second output value. */
185 RTFLOAT80U r80Result2;
186} IEMFPURESULTTWO;
187AssertCompileMemberOffset(IEMFPURESULTTWO, FSW, 10);
188AssertCompileMemberOffset(IEMFPURESULTTWO, r80Result2, 12);
189/** Pointer to a FPU result consisting of two output values and FSW. */
190typedef IEMFPURESULTTWO *PIEMFPURESULTTWO;
191/** Pointer to a const FPU result consisting of two output values and FSW. */
192typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO;
193
194
195
196#ifdef IEM_VERIFICATION_MODE_FULL
197
198/**
199 * Verification event type.
200 */
201typedef enum IEMVERIFYEVENT
202{
203 IEMVERIFYEVENT_INVALID = 0,
204 IEMVERIFYEVENT_IOPORT_READ,
205 IEMVERIFYEVENT_IOPORT_WRITE,
206 IEMVERIFYEVENT_IOPORT_STR_READ,
207 IEMVERIFYEVENT_IOPORT_STR_WRITE,
208 IEMVERIFYEVENT_RAM_WRITE,
209 IEMVERIFYEVENT_RAM_READ
210} IEMVERIFYEVENT;
211
212/** Checks if the event type is a RAM read or write. */
213# define IEMVERIFYEVENT_IS_RAM(a_enmType) ((a_enmType) == IEMVERIFYEVENT_RAM_WRITE || (a_enmType) == IEMVERIFYEVENT_RAM_READ)
214
215/**
216 * Verification event record.
217 */
218typedef struct IEMVERIFYEVTREC
219{
220 /** Pointer to the next record in the list. */
221 struct IEMVERIFYEVTREC *pNext;
222 /** The event type. */
223 IEMVERIFYEVENT enmEvent;
224 /** The event data. */
225 union
226 {
227 /** IEMVERIFYEVENT_IOPORT_READ */
228 struct
229 {
230 RTIOPORT Port;
231 uint8_t cbValue;
232 } IOPortRead;
233
234 /** IEMVERIFYEVENT_IOPORT_WRITE */
235 struct
236 {
237 RTIOPORT Port;
238 uint8_t cbValue;
239 uint32_t u32Value;
240 } IOPortWrite;
241
242 /** IEMVERIFYEVENT_IOPORT_STR_READ */
243 struct
244 {
245 RTIOPORT Port;
246 uint8_t cbValue;
247 RTGCUINTREG cTransfers;
248 } IOPortStrRead;
249
250 /** IEMVERIFYEVENT_IOPORT_STR_WRITE */
251 struct
252 {
253 RTIOPORT Port;
254 uint8_t cbValue;
255 RTGCUINTREG cTransfers;
256 } IOPortStrWrite;
257
258 /** IEMVERIFYEVENT_RAM_READ */
259 struct
260 {
261 RTGCPHYS GCPhys;
262 uint32_t cb;
263 } RamRead;
264
265 /** IEMVERIFYEVENT_RAM_WRITE */
266 struct
267 {
268 RTGCPHYS GCPhys;
269 uint32_t cb;
270 uint8_t ab[512];
271 } RamWrite;
272 } u;
273} IEMVERIFYEVTREC;
274/** Pointer to an IEM event verification records. */
275typedef IEMVERIFYEVTREC *PIEMVERIFYEVTREC;
276
277#endif /* IEM_VERIFICATION_MODE_FULL */
278
279
280/**
281 * IEM TLB entry.
282 *
283 * Lookup assembly:
284 * @code{.asm}
285 ; Calculate tag.
286 mov rax, [VA]
287 shl rax, 16
288 shr rax, 16 + X86_PAGE_SHIFT
289 or rax, [uTlbRevision]
290
291 ; Do indexing.
292 movzx ecx, al
293 lea rcx, [pTlbEntries + rcx]
294
295 ; Check tag.
296 cmp [rcx + IEMTLBENTRY.uTag], rax
297 jne .TlbMiss
298
299 ; Check access.
300 movsx rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
301 and rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
302 cmp rax, [uTlbPhysRev]
303 jne .TlbMiss
304
305 ; Calc address and we're done.
306 mov eax, X86_PAGE_OFFSET_MASK
307 and eax, [VA]
308 or rax, [rcx + IEMTLBENTRY.pMappingR3]
309 %ifdef VBOX_WITH_STATISTICS
310 inc qword [cTlbHits]
311 %endif
312 jmp .Done
313
314 .TlbMiss:
315 mov r8d, ACCESS_FLAGS
316 mov rdx, [VA]
317 mov rcx, [pVCpu]
318 call iemTlbTypeMiss
319 .Done:
320
321 @endcode
322 *
323 */
324typedef struct IEMTLBENTRY
325{
326 /** The TLB entry tag.
327 * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits.
328 * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
329 *
330 * The TLB lookup code uses the current TLB revision, which won't ever be zero,
331 * enabling an extremely cheap TLB invalidation most of the time. When the TLB
332 * revision wraps around though, the tags needs to be zeroed.
333 *
334 * @note Try use SHRD instruction? After seeing
335 * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
336 */
337 uint64_t uTag;
338 /** Access flags and physical TLB revision.
339 *
340 * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
341 * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
342 * - Bit 2 - page tables - not user (complemented X86_PTE_US).
343 * - Bit 3 - pgm phys/virt - not directly writable.
344 * - Bit 4 - pgm phys page - not directly readable.
345 * - Bit 5 - currently unused.
346 * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
347 * - Bit 7 - tlb entry - pMappingR3 member not valid.
348 * - Bits 63 thru 8 are used for the physical TLB revision number.
349 *
350 * We're using complemented bit meanings here because it makes it easy to check
351 * whether special action is required. For instance a user mode write access
352 * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
353 * non-zero result would mean special handling needed because either it wasn't
354 * writable, or it wasn't user, or the page wasn't dirty. A user mode read
355 * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
356 * need to check any PTE flag.
357 */
358 uint64_t fFlagsAndPhysRev;
359 /** The guest physical page address. */
360 uint64_t GCPhys;
361 /** Pointer to the ring-3 mapping (possibly also valid in ring-0). */
362#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
363 R3PTRTYPE(uint8_t *) pbMappingR3;
364#else
365 R3R0PTRTYPE(uint8_t *) pbMappingR3;
366#endif
367#if HC_ARCH_BITS == 32
368 uint32_t u32Padding1;
369#endif
370} IEMTLBENTRY;
371AssertCompileSize(IEMTLBENTRY, 32);
372/** Pointer to an IEM TLB entry. */
373typedef IEMTLBENTRY *PIEMTLBENTRY;
374
375/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
376 * @{ */
377#define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
378#define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
379#define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
380#define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
381#define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
382#define IEMTLBE_F_PATCH_CODE RT_BIT_64(5) /**< Code TLB: Patch code (PATM). */
383#define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
384#define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(7) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
385#define IEMTLBE_F_PHYS_REV UINT64_C(0xffffffffffffff00) /**< Physical revision mask. */
386/** @} */
387
388
389/**
390 * An IEM TLB.
391 *
392 * We've got two of these, one for data and one for instructions.
393 */
394typedef struct IEMTLB
395{
396 /** The TLB entries.
397 * We've choosen 256 because that way we can obtain the result directly from a
398 * 8-bit register without an additional AND instruction. */
399 IEMTLBENTRY aEntries[256];
400 /** The TLB revision.
401 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
402 * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
403 * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
404 * (The revision zero indicates an invalid TLB entry.)
405 *
406 * The initial value is choosen to cause an early wraparound. */
407 uint64_t uTlbRevision;
408 /** The TLB physical address revision - shadow of PGM variable.
409 *
410 * This is actually only 56 bits wide (see IEMTLBENTRY::fFlagsAndPhysRev) and is
411 * incremented by adding RT_BIT_64(8). When it wraps around and becomes zero,
412 * a rendezvous is called and each CPU wipe the IEMTLBENTRY::pMappingR3 as well
413 * as IEMTLBENTRY::fFlagsAndPhysRev bits 63 thru 8, 4, and 3.
414 *
415 * The initial value is choosen to cause an early wraparound. */
416 uint64_t volatile uTlbPhysRev;
417
418 /* Statistics: */
419
420 /** TLB hits (VBOX_WITH_STATISTICS only). */
421 uint64_t cTlbHits;
422 /** TLB misses. */
423 uint32_t cTlbMisses;
424 /** Slow read path. */
425 uint32_t cTlbSlowReadPath;
426#if 0
427 /** TLB misses because of tag mismatch. */
428 uint32_t cTlbMissesTag;
429 /** TLB misses because of virtual access violation. */
430 uint32_t cTlbMissesVirtAccess;
431 /** TLB misses because of dirty bit. */
432 uint32_t cTlbMissesDirty;
433 /** TLB misses because of MMIO */
434 uint32_t cTlbMissesMmio;
435 /** TLB misses because of write access handlers. */
436 uint32_t cTlbMissesWriteHandler;
437 /** TLB misses because no r3(/r0) mapping. */
438 uint32_t cTlbMissesMapping;
439#endif
440 /** Alignment padding. */
441 uint32_t au32Padding[3+5];
442} IEMTLB;
443AssertCompileSizeAlignment(IEMTLB, 64);
444/** IEMTLB::uTlbRevision increment. */
445#define IEMTLB_REVISION_INCR RT_BIT_64(36)
446/** IEMTLB::uTlbPhysRev increment. */
447#define IEMTLB_PHYS_REV_INCR RT_BIT_64(8)
448
449
450/**
451 * The per-CPU IEM state.
452 */
453typedef struct IEMCPU
454{
455 /** Info status code that needs to be propagated to the IEM caller.
456 * This cannot be passed internally, as it would complicate all success
457 * checks within the interpreter making the code larger and almost impossible
458 * to get right. Instead, we'll store status codes to pass on here. Each
459 * source of these codes will perform appropriate sanity checks. */
460 int32_t rcPassUp; /* 0x00 */
461
462 /** The current CPU execution mode (CS). */
463 IEMMODE enmCpuMode; /* 0x04 */
464 /** The CPL. */
465 uint8_t uCpl; /* 0x05 */
466
467 /** Whether to bypass access handlers or not. */
468 bool fBypassHandlers; /* 0x06 */
469 /** Indicates that we're interpreting patch code - RC only! */
470 bool fInPatchCode; /* 0x07 */
471
472 /** @name Decoder state.
473 * @{ */
474#ifdef IEM_WITH_CODE_TLB
475 /** The offset of the next instruction byte. */
476 uint32_t offInstrNextByte; /* 0x08 */
477 /** The number of bytes available at pbInstrBuf for the current instruction.
478 * This takes the max opcode length into account so that doesn't need to be
479 * checked separately. */
480 uint32_t cbInstrBuf; /* 0x0c */
481 /** Pointer to the page containing RIP, user specified buffer or abOpcode.
482 * This can be NULL if the page isn't mappable for some reason, in which
483 * case we'll do fallback stuff.
484 *
485 * If we're executing an instruction from a user specified buffer,
486 * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page
487 * aligned pointer but pointer to the user data.
488 *
489 * For instructions crossing pages, this will start on the first page and be
490 * advanced to the next page by the time we've decoded the instruction. This
491 * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt>
492 */
493 uint8_t const *pbInstrBuf; /* 0x10 */
494# if ARCH_BITS == 32
495 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */
496# endif
497 /** The program counter corresponding to pbInstrBuf.
498 * This is set to a non-canonical address when we need to invalidate it. */
499 uint64_t uInstrBufPc; /* 0x18 */
500 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots).
501 * This takes the CS segment limit into account. */
502 uint16_t cbInstrBufTotal; /* 0x20 */
503 /** Offset into pbInstrBuf of the first byte of the current instruction.
504 * Can be negative to efficiently handle cross page instructions. */
505 int16_t offCurInstrStart; /* 0x22 */
506
507 /** The prefix mask (IEM_OP_PRF_XXX). */
508 uint32_t fPrefixes; /* 0x24 */
509 /** The extra REX ModR/M register field bit (REX.R << 3). */
510 uint8_t uRexReg; /* 0x28 */
511 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
512 * (REX.B << 3). */
513 uint8_t uRexB; /* 0x29 */
514 /** The extra REX SIB index field bit (REX.X << 3). */
515 uint8_t uRexIndex; /* 0x2a */
516
517 /** The effective segment register (X86_SREG_XXX). */
518 uint8_t iEffSeg; /* 0x2b */
519
520#else
521 /** The size of what has currently been fetched into abOpcode. */
522 uint8_t cbOpcode; /* 0x08 */
523 /** The current offset into abOpcode. */
524 uint8_t offOpcode; /* 0x09 */
525
526 /** The effective segment register (X86_SREG_XXX). */
527 uint8_t iEffSeg; /* 0x0a */
528
529 /** The extra REX ModR/M register field bit (REX.R << 3). */
530 uint8_t uRexReg; /* 0x0b */
531 /** The prefix mask (IEM_OP_PRF_XXX). */
532 uint32_t fPrefixes; /* 0x0c */
533 /** The extra REX ModR/M r/m field, SIB base and opcode reg bit
534 * (REX.B << 3). */
535 uint8_t uRexB; /* 0x10 */
536 /** The extra REX SIB index field bit (REX.X << 3). */
537 uint8_t uRexIndex; /* 0x11 */
538
539#endif
540
541 /** The effective operand mode. */
542 IEMMODE enmEffOpSize; /* 0x2c, 0x12 */
543 /** The default addressing mode. */
544 IEMMODE enmDefAddrMode; /* 0x2d, 0x13 */
545 /** The effective addressing mode. */
546 IEMMODE enmEffAddrMode; /* 0x2e, 0x14 */
547 /** The default operand mode. */
548 IEMMODE enmDefOpSize; /* 0x2f, 0x15 */
549
550 /** Prefix index (VEX.pp) for two byte and three byte tables. */
551 uint8_t idxPrefix; /* 0x30, 0x16 */
552 /** 3rd VEX/EVEX/XOP register.
553 * Please use IEM_GET_EFFECTIVE_VVVV to access. */
554 uint8_t uVex3rdReg; /* 0x31, 0x17 */
555 /** The VEX/EVEX/XOP length field. */
556 uint8_t uVexLength; /* 0x32, 0x18 */
557 /** Additional EVEX stuff. */
558 uint8_t fEvexStuff; /* 0x33, 0x19 */
559
560 /** The FPU opcode (FOP). */
561 uint16_t uFpuOpcode; /* 0x34, 0x1a */
562
563 /** Explicit alignment padding. */
564#ifdef IEM_WITH_CODE_TLB
565 uint8_t abAlignment2a[2]; /* 0x36 */
566#endif
567
568 /** The opcode bytes. */
569 uint8_t abOpcode[15]; /* 0x48, 0x1c */
570 /** Explicit alignment padding. */
571#ifdef IEM_WITH_CODE_TLB
572 uint8_t abAlignment2c[0x48 - 0x47]; /* 0x37 */
573#else
574 uint8_t abAlignment2c[0x48 - 0x2b]; /* 0x2b */
575#endif
576 /** @} */
577
578
579 /** The flags of the current exception / interrupt. */
580 uint32_t fCurXcpt; /* 0x48, 0x48 */
581 /** The current exception / interrupt. */
582 uint8_t uCurXcpt;
583 /** Exception / interrupt recursion depth. */
584 int8_t cXcptRecursions;
585
586 /** The number of active guest memory mappings. */
587 uint8_t cActiveMappings;
588 /** The next unused mapping index. */
589 uint8_t iNextMapping;
590 /** Records for tracking guest memory mappings. */
591 struct
592 {
593 /** The address of the mapped bytes. */
594 void *pv;
595#if defined(IN_RC) && HC_ARCH_BITS == 64
596 uint32_t u32Alignment3; /**< Alignment padding. */
597#endif
598 /** The access flags (IEM_ACCESS_XXX).
599 * IEM_ACCESS_INVALID if the entry is unused. */
600 uint32_t fAccess;
601#if HC_ARCH_BITS == 64
602 uint32_t u32Alignment4; /**< Alignment padding. */
603#endif
604 } aMemMappings[3];
605
606 /** Locking records for the mapped memory. */
607 union
608 {
609 PGMPAGEMAPLOCK Lock;
610 uint64_t au64Padding[2];
611 } aMemMappingLocks[3];
612
613 /** Bounce buffer info.
614 * This runs in parallel to aMemMappings. */
615 struct
616 {
617 /** The physical address of the first byte. */
618 RTGCPHYS GCPhysFirst;
619 /** The physical address of the second page. */
620 RTGCPHYS GCPhysSecond;
621 /** The number of bytes in the first page. */
622 uint16_t cbFirst;
623 /** The number of bytes in the second page. */
624 uint16_t cbSecond;
625 /** Whether it's unassigned memory. */
626 bool fUnassigned;
627 /** Explicit alignment padding. */
628 bool afAlignment5[3];
629 } aMemBbMappings[3];
630
631 /** Bounce buffer storage.
632 * This runs in parallel to aMemMappings and aMemBbMappings. */
633 struct
634 {
635 uint8_t ab[512];
636 } aBounceBuffers[3];
637
638
639 /** Pointer set jump buffer - ring-3 context. */
640 R3PTRTYPE(jmp_buf *) pJmpBufR3;
641 /** Pointer set jump buffer - ring-0 context. */
642 R0PTRTYPE(jmp_buf *) pJmpBufR0;
643 /** Pointer set jump buffer - raw-mode context. */
644 RCPTRTYPE(jmp_buf *) pJmpBufRC;
645
646 /** @todo Should move this near @a fCurXcpt later. */
647 /** The error code for the current exception / interrupt. */
648 uint32_t uCurXcptErr;
649 /** The CR2 for the current exception / interrupt. */
650 uint64_t uCurXcptCr2;
651
652 /** @name Statistics
653 * @{ */
654 /** The number of instructions we've executed. */
655 uint32_t cInstructions;
656 /** The number of potential exits. */
657 uint32_t cPotentialExits;
658 /** The number of bytes data or stack written (mostly for IEMExecOneEx).
659 * This may contain uncommitted writes. */
660 uint32_t cbWritten;
661 /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
662 uint32_t cRetInstrNotImplemented;
663 /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
664 uint32_t cRetAspectNotImplemented;
665 /** Counts informational statuses returned (other than VINF_SUCCESS). */
666 uint32_t cRetInfStatuses;
667 /** Counts other error statuses returned. */
668 uint32_t cRetErrStatuses;
669 /** Number of times rcPassUp has been used. */
670 uint32_t cRetPassUpStatus;
671 /** Number of times RZ left with instruction commit pending for ring-3. */
672 uint32_t cPendingCommit;
673 /** Number of long jumps. */
674 uint32_t cLongJumps;
675 uint32_t uAlignment6; /**< Alignment padding. */
676#ifdef IEM_VERIFICATION_MODE_FULL
677 /** The Number of I/O port reads that has been performed. */
678 uint32_t cIOReads;
679 /** The Number of I/O port writes that has been performed. */
680 uint32_t cIOWrites;
681 /** Set if no comparison to REM is currently performed.
682 * This is used to skip past really slow bits. */
683 bool fNoRem;
684 /** Saved fNoRem flag used by #iemInitExec and #iemUninitExec. */
685 bool fNoRemSavedByExec;
686 /** Indicates that RAX and RDX differences should be ignored since RDTSC
687 * and RDTSCP are timing sensitive. */
688 bool fIgnoreRaxRdx;
689 /** Indicates that a MOVS instruction with overlapping source and destination
690 * was executed, causing the memory write records to be incorrrect. */
691 bool fOverlappingMovs;
692 /** Set if there are problematic memory accesses (MMIO, write monitored, ++). */
693 bool fProblematicMemory;
694 /** This is used to communicate a CPL changed caused by IEMInjectTrap that
695 * CPUM doesn't yet reflect. */
696 uint8_t uInjectCpl;
697 /** To prevent EMR3HmSingleInstruction from triggering endless recursion via
698 * emR3ExecuteInstruction and iemExecVerificationModeCheck. */
699 uint8_t cVerifyDepth;
700 bool afAlignment7[2];
701 /** Mask of undefined eflags.
702 * The verifier will any difference in these flags. */
703 uint32_t fUndefinedEFlags;
704 /** The CS of the instruction being interpreted. */
705 RTSEL uOldCs;
706 /** The RIP of the instruction being interpreted. */
707 uint64_t uOldRip;
708 /** The physical address corresponding to abOpcodes[0]. */
709 RTGCPHYS GCPhysOpcodes;
710#endif
711 /** @} */
712
713 /** @name Target CPU information.
714 * @{ */
715#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
716 /** The target CPU. */
717 uint32_t uTargetCpu;
718#else
719 uint32_t u32TargetCpuPadding;
720#endif
721 /** The CPU vendor. */
722 CPUMCPUVENDOR enmCpuVendor;
723 /** @} */
724
725 /** @name Host CPU information.
726 * @{ */
727 /** The CPU vendor. */
728 CPUMCPUVENDOR enmHostCpuVendor;
729 /** @} */
730
731 uint32_t au32Alignment8[HC_ARCH_BITS == 64 ? 4 + 8 : 4]; /**< Alignment padding. */
732
733 /** Data TLB.
734 * @remarks Must be 64-byte aligned. */
735 IEMTLB DataTlb;
736 /** Instruction TLB.
737 * @remarks Must be 64-byte aligned. */
738 IEMTLB CodeTlb;
739
740 /** Pointer to the CPU context - ring-3 context.
741 * @todo put inside IEM_VERIFICATION_MODE_FULL++. */
742 R3PTRTYPE(PCPUMCTX) pCtxR3;
743 /** Pointer to the CPU context - ring-0 context. */
744 R0PTRTYPE(PCPUMCTX) pCtxR0;
745 /** Pointer to the CPU context - raw-mode context. */
746 RCPTRTYPE(PCPUMCTX) pCtxRC;
747
748 /** Pointer to instruction statistics for raw-mode context (same as R0). */
749 RCPTRTYPE(PIEMINSTRSTATS) pStatsRC;
750 /** Pointer to instruction statistics for ring-0 context (same as RC). */
751 R0PTRTYPE(PIEMINSTRSTATS) pStatsR0;
752 /** Pointer to instruction statistics for non-ring-3 code. */
753 R3PTRTYPE(PIEMINSTRSTATS) pStatsCCR3;
754 /** Pointer to instruction statistics for ring-3 context. */
755 R3PTRTYPE(PIEMINSTRSTATS) pStatsR3;
756
757#ifdef IEM_VERIFICATION_MODE_FULL
758 /** The event verification records for what IEM did (LIFO). */
759 R3PTRTYPE(PIEMVERIFYEVTREC) pIemEvtRecHead;
760 /** Insertion point for pIemEvtRecHead. */
761 R3PTRTYPE(PIEMVERIFYEVTREC *) ppIemEvtRecNext;
762 /** The event verification records for what the other party did (FIFO). */
763 R3PTRTYPE(PIEMVERIFYEVTREC) pOtherEvtRecHead;
764 /** Insertion point for pOtherEvtRecHead. */
765 R3PTRTYPE(PIEMVERIFYEVTREC *) ppOtherEvtRecNext;
766 /** List of free event records. */
767 R3PTRTYPE(PIEMVERIFYEVTREC) pFreeEvtRec;
768#endif
769} IEMCPU;
770AssertCompileMemberOffset(IEMCPU, fCurXcpt, 0x48);
771AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
772AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
773/** Pointer to the per-CPU IEM state. */
774typedef IEMCPU *PIEMCPU;
775/** Pointer to the const per-CPU IEM state. */
776typedef IEMCPU const *PCIEMCPU;
777
778
779/** @def IEM_GET_CTX
780 * Gets the guest CPU context for the calling EMT.
781 * @returns PCPUMCTX
782 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
783 */
784#if !defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE) \
785 && !defined(IEM_VERIFICATION_MODE_MINIMAL) && defined(VMCPU_INCL_CPUM_GST_CTX)
786# define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
787#else
788# define IEM_GET_CTX(a_pVCpu) ((a_pVCpu)->iem.s.CTX_SUFF(pCtx))
789#endif
790
791/** @def IEM_CTX_ASSERT
792 * Asserts that the @a a_fExtrnMbz is present in the CPU context.
793 * @param a_pCtx The CPUMCTX structure.
794 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
795 */
796#define IEM_CTX_ASSERT(a_pCtx, a_fExtrnMbz) Assert(!((a_pCtx)->fExtrn & (a_fExtrnMbz)))
797
798/** @def IEM_CTX_IMPORT_RET
799 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
800 *
801 * Will call the keep to import the bits as needed.
802 *
803 * Returns on import failure.
804 *
805 * @param a_pVCpu The cross context virtual CPU structure.
806 * @param a_pCtx The CPUMCTX structure.
807 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
808 */
809#define IEM_CTX_IMPORT_RET(a_pVCpu, a_pCtx, a_fExtrnImport) \
810 do { \
811 if (!((a_pCtx)->fExtrn & (a_fExtrnImport))) \
812 { /* likely */ } \
813 else \
814 { \
815 int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \
816 AssertRCReturn(rcCtxImport, rcCtxImport); \
817 } \
818 } while (0)
819
820/** @def IEM_CTX_IMPORT_NORET
821 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
822 *
823 * Will call the keep to import the bits as needed.
824 *
825 * @param a_pVCpu The cross context virtual CPU structure.
826 * @param a_pCtx The CPUMCTX structure.
827 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
828 */
829#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_pCtx, a_fExtrnImport) \
830 do { \
831 if (!((a_pCtx)->fExtrn & (a_fExtrnImport))) \
832 { /* likely */ } \
833 else \
834 { \
835 int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \
836 AssertLogRelRC(rcCtxImport); \
837 } \
838 } while (0)
839
840/** @def IEM_CTX_IMPORT_JMP
841 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
842 *
843 * Will call the keep to import the bits as needed.
844 *
845 * Jumps on import failure.
846 *
847 * @param a_pVCpu The cross context virtual CPU structure.
848 * @param a_pCtx The CPUMCTX structure.
849 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
850 */
851#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_pCtx, a_fExtrnImport) \
852 do { \
853 if (!((a_pCtx)->fExtrn & (a_fExtrnImport))) \
854 { /* likely */ } \
855 else \
856 { \
857 int rcCtxImport = iemCtxImport(a_pVCpu, a_pCtx, a_fExtrnImport); \
858 AssertRCStmt(rcCtxImport, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), rcCtxImport)); \
859 } \
860 } while (0)
861
862int iemCtxImport(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fExtrnImport);
863
864
865/** Gets the current IEMTARGETCPU value.
866 * @returns IEMTARGETCPU value.
867 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
868 */
869#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
870# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
871#else
872# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
873#endif
874
875/** @def Gets the instruction length. */
876#ifdef IEM_WITH_CODE_TLB
877# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offInstrNextByte - (uint32_t)(int32_t)(a_pVCpu)->iem.s.offCurInstrStart)
878#else
879# define IEM_GET_INSTR_LEN(a_pVCpu) ((a_pVCpu)->iem.s.offOpcode)
880#endif
881
882
883/** @name IEM_ACCESS_XXX - Access details.
884 * @{ */
885#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
886#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
887#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
888#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
889#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
890#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
891#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
892#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
893#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
894#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
895/** The writes are partial, so if initialize the bounce buffer with the
896 * orignal RAM content. */
897#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
898/** Used in aMemMappings to indicate that the entry is bounce buffered. */
899#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
900/** Bounce buffer with ring-3 write pending, first page. */
901#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
902/** Bounce buffer with ring-3 write pending, second page. */
903#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
904/** Valid bit mask. */
905#define IEM_ACCESS_VALID_MASK UINT32_C(0x00000fff)
906/** Read+write data alias. */
907#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
908/** Write data alias. */
909#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
910/** Read data alias. */
911#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
912/** Instruction fetch alias. */
913#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
914/** Stack write alias. */
915#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
916/** Stack read alias. */
917#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
918/** Stack read+write alias. */
919#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
920/** Read system table alias. */
921#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
922/** Read+write system table alias. */
923#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
924/** @} */
925
926/** @name Prefix constants (IEMCPU::fPrefixes)
927 * @{ */
928#define IEM_OP_PRF_SEG_CS RT_BIT_32(0) /**< CS segment prefix (0x2e). */
929#define IEM_OP_PRF_SEG_SS RT_BIT_32(1) /**< SS segment prefix (0x36). */
930#define IEM_OP_PRF_SEG_DS RT_BIT_32(2) /**< DS segment prefix (0x3e). */
931#define IEM_OP_PRF_SEG_ES RT_BIT_32(3) /**< ES segment prefix (0x26). */
932#define IEM_OP_PRF_SEG_FS RT_BIT_32(4) /**< FS segment prefix (0x64). */
933#define IEM_OP_PRF_SEG_GS RT_BIT_32(5) /**< GS segment prefix (0x65). */
934#define IEM_OP_PRF_SEG_MASK UINT32_C(0x3f)
935
936#define IEM_OP_PRF_SIZE_OP RT_BIT_32(8) /**< Operand size prefix (0x66). */
937#define IEM_OP_PRF_SIZE_REX_W RT_BIT_32(9) /**< REX.W prefix (0x48-0x4f). */
938#define IEM_OP_PRF_SIZE_ADDR RT_BIT_32(10) /**< Address size prefix (0x67). */
939
940#define IEM_OP_PRF_LOCK RT_BIT_32(16) /**< Lock prefix (0xf0). */
941#define IEM_OP_PRF_REPNZ RT_BIT_32(17) /**< Repeat-not-zero prefix (0xf2). */
942#define IEM_OP_PRF_REPZ RT_BIT_32(18) /**< Repeat-if-zero prefix (0xf3). */
943
944#define IEM_OP_PRF_REX RT_BIT_32(24) /**< Any REX prefix (0x40-0x4f). */
945#define IEM_OP_PRF_REX_R RT_BIT_32(25) /**< REX.R prefix (0x44,0x45,0x46,0x47,0x4c,0x4d,0x4e,0x4f). */
946#define IEM_OP_PRF_REX_B RT_BIT_32(26) /**< REX.B prefix (0x41,0x43,0x45,0x47,0x49,0x4b,0x4d,0x4f). */
947#define IEM_OP_PRF_REX_X RT_BIT_32(27) /**< REX.X prefix (0x42,0x43,0x46,0x47,0x4a,0x4b,0x4e,0x4f). */
948/** Mask with all the REX prefix flags.
949 * This is generally for use when needing to undo the REX prefixes when they
950 * are followed legacy prefixes and therefore does not immediately preceed
951 * the first opcode byte.
952 * For testing whether any REX prefix is present, use IEM_OP_PRF_REX instead. */
953#define IEM_OP_PRF_REX_MASK (IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W )
954
955#define IEM_OP_PRF_VEX RT_BIT_32(28) /**< Indiciates VEX prefix. */
956#define IEM_OP_PRF_EVEX RT_BIT_32(29) /**< Indiciates EVEX prefix. */
957#define IEM_OP_PRF_XOP RT_BIT_32(30) /**< Indiciates XOP prefix. */
958/** @} */
959
960/** @name IEMOPFORM_XXX - Opcode forms
961 * @note These are ORed together with IEMOPHINT_XXX.
962 * @{ */
963/** ModR/M: reg, r/m */
964#define IEMOPFORM_RM 0
965/** ModR/M: reg, r/m (register) */
966#define IEMOPFORM_RM_REG (IEMOPFORM_RM | IEMOPFORM_MOD3)
967/** ModR/M: reg, r/m (memory) */
968#define IEMOPFORM_RM_MEM (IEMOPFORM_RM | IEMOPFORM_NOT_MOD3)
969/** ModR/M: r/m, reg */
970#define IEMOPFORM_MR 1
971/** ModR/M: r/m (register), reg */
972#define IEMOPFORM_MR_REG (IEMOPFORM_MR | IEMOPFORM_MOD3)
973/** ModR/M: r/m (memory), reg */
974#define IEMOPFORM_MR_MEM (IEMOPFORM_MR | IEMOPFORM_NOT_MOD3)
975/** ModR/M: r/m only */
976#define IEMOPFORM_M 2
977/** ModR/M: r/m only (register). */
978#define IEMOPFORM_M_REG (IEMOPFORM_M | IEMOPFORM_MOD3)
979/** ModR/M: r/m only (memory). */
980#define IEMOPFORM_M_MEM (IEMOPFORM_M | IEMOPFORM_NOT_MOD3)
981/** ModR/M: reg only */
982#define IEMOPFORM_R 3
983
984/** VEX+ModR/M: reg, r/m */
985#define IEMOPFORM_VEX_RM 4
986/** VEX+ModR/M: reg, r/m (register) */
987#define IEMOPFORM_VEX_RM_REG (IEMOPFORM_VEX_RM | IEMOPFORM_MOD3)
988/** VEX+ModR/M: reg, r/m (memory) */
989#define IEMOPFORM_VEX_RM_MEM (IEMOPFORM_VEX_RM | IEMOPFORM_NOT_MOD3)
990/** VEX+ModR/M: r/m, reg */
991#define IEMOPFORM_VEX_MR 5
992/** VEX+ModR/M: r/m (register), reg */
993#define IEMOPFORM_VEX_MR_REG (IEMOPFORM_VEX_MR | IEMOPFORM_MOD3)
994/** VEX+ModR/M: r/m (memory), reg */
995#define IEMOPFORM_VEX_MR_MEM (IEMOPFORM_VEX_MR | IEMOPFORM_NOT_MOD3)
996/** VEX+ModR/M: r/m only */
997#define IEMOPFORM_VEX_M 6
998/** VEX+ModR/M: r/m only (register). */
999#define IEMOPFORM_VEX_M_REG (IEMOPFORM_VEX_M | IEMOPFORM_MOD3)
1000/** VEX+ModR/M: r/m only (memory). */
1001#define IEMOPFORM_VEX_M_MEM (IEMOPFORM_VEX_M | IEMOPFORM_NOT_MOD3)
1002/** VEX+ModR/M: reg only */
1003#define IEMOPFORM_VEX_R 7
1004/** VEX+ModR/M: reg, vvvv, r/m */
1005#define IEMOPFORM_VEX_RVM 8
1006/** VEX+ModR/M: reg, vvvv, r/m (register). */
1007#define IEMOPFORM_VEX_RVM_REG (IEMOPFORM_VEX_RVM | IEMOPFORM_MOD3)
1008/** VEX+ModR/M: reg, vvvv, r/m (memory). */
1009#define IEMOPFORM_VEX_RVM_MEM (IEMOPFORM_VEX_RVM | IEMOPFORM_NOT_MOD3)
1010/** VEX+ModR/M: r/m, vvvv, reg */
1011#define IEMOPFORM_VEX_MVR 9
1012/** VEX+ModR/M: r/m, vvvv, reg (register) */
1013#define IEMOPFORM_VEX_MVR_REG (IEMOPFORM_VEX_MVR | IEMOPFORM_MOD3)
1014/** VEX+ModR/M: r/m, vvvv, reg (memory) */
1015#define IEMOPFORM_VEX_MVR_MEM (IEMOPFORM_VEX_MVR | IEMOPFORM_NOT_MOD3)
1016
1017/** Fixed register instruction, no R/M. */
1018#define IEMOPFORM_FIXED 16
1019
1020/** The r/m is a register. */
1021#define IEMOPFORM_MOD3 RT_BIT_32(8)
1022/** The r/m is a memory access. */
1023#define IEMOPFORM_NOT_MOD3 RT_BIT_32(9)
1024/** @} */
1025
1026/** @name IEMOPHINT_XXX - Additional Opcode Hints
1027 * @note These are ORed together with IEMOPFORM_XXX.
1028 * @{ */
1029/** Ignores the operand size prefix (66h). */
1030#define IEMOPHINT_IGNORES_OZ_PFX RT_BIT_32(10)
1031/** Ignores REX.W (aka WIG). */
1032#define IEMOPHINT_IGNORES_REXW RT_BIT_32(11)
1033/** Both the operand size prefixes (66h + REX.W) are ignored. */
1034#define IEMOPHINT_IGNORES_OP_SIZES (IEMOPHINT_IGNORES_OZ_PFX | IEMOPHINT_IGNORES_REXW)
1035/** Allowed with the lock prefix. */
1036#define IEMOPHINT_LOCK_ALLOWED RT_BIT_32(11)
1037/** The VEX.L value is ignored (aka LIG). */
1038#define IEMOPHINT_VEX_L_IGNORED RT_BIT_32(12)
1039/** The VEX.L value must be zero (i.e. 128-bit width only). */
1040#define IEMOPHINT_VEX_L_ZERO RT_BIT_32(13)
1041
1042/** Hint to IEMAllInstructionPython.py that this macro should be skipped. */
1043#define IEMOPHINT_SKIP_PYTHON RT_BIT_32(31)
1044/** @} */
1045
1046/**
1047 * Possible hardware task switch sources.
1048 */
1049typedef enum IEMTASKSWITCH
1050{
1051 /** Task switch caused by an interrupt/exception. */
1052 IEMTASKSWITCH_INT_XCPT = 1,
1053 /** Task switch caused by a far CALL. */
1054 IEMTASKSWITCH_CALL,
1055 /** Task switch caused by a far JMP. */
1056 IEMTASKSWITCH_JUMP,
1057 /** Task switch caused by an IRET. */
1058 IEMTASKSWITCH_IRET
1059} IEMTASKSWITCH;
1060AssertCompileSize(IEMTASKSWITCH, 4);
1061
1062/**
1063 * Possible CrX load (write) sources.
1064 */
1065typedef enum IEMACCESSCRX
1066{
1067 /** CrX access caused by 'mov crX' instruction. */
1068 IEMACCESSCRX_MOV_CRX,
1069 /** CrX (CR0) write caused by 'lmsw' instruction. */
1070 IEMACCESSCRX_LMSW,
1071 /** CrX (CR0) write caused by 'clts' instruction. */
1072 IEMACCESSCRX_CLTS,
1073 /** CrX (CR0) read caused by 'smsw' instruction. */
1074 IEMACCESSCRX_SMSW
1075} IEMACCESSCRX;
1076
1077/**
1078 * Tests if verification mode is enabled.
1079 *
1080 * This expands to @c false when IEM_VERIFICATION_MODE is not defined and
1081 * should therefore cause the compiler to eliminate the verification branch
1082 * of an if statement. */
1083#ifdef IEM_VERIFICATION_MODE_FULL
1084# define IEM_VERIFICATION_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem)
1085#elif defined(IEM_VERIFICATION_MODE_MINIMAL)
1086# define IEM_VERIFICATION_ENABLED(a_pVCpu) (true)
1087#else
1088# define IEM_VERIFICATION_ENABLED(a_pVCpu) (false)
1089#endif
1090
1091/**
1092 * Tests if full verification mode is enabled.
1093 *
1094 * This expands to @c false when IEM_VERIFICATION_MODE_FULL is not defined and
1095 * should therefore cause the compiler to eliminate the verification branch
1096 * of an if statement. */
1097#ifdef IEM_VERIFICATION_MODE_FULL
1098# define IEM_FULL_VERIFICATION_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem)
1099#else
1100# define IEM_FULL_VERIFICATION_ENABLED(a_pVCpu) (false)
1101#endif
1102
1103/**
1104 * Tests if full verification mode is enabled again REM.
1105 *
1106 * This expands to @c false when IEM_VERIFICATION_MODE_FULL is not defined and
1107 * should therefore cause the compiler to eliminate the verification branch
1108 * of an if statement. */
1109#ifdef IEM_VERIFICATION_MODE_FULL
1110# ifdef IEM_VERIFICATION_MODE_FULL_HM
1111# define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem && !HMIsEnabled((a_pVCpu)->CTX_SUFF(pVM)))
1112# else
1113# define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem)
1114# endif
1115#else
1116# define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (false)
1117#endif
1118
1119/** @def IEM_VERIFICATION_MODE
1120 * Indicates that one of the verfication modes are enabled.
1121 */
1122#if (defined(IEM_VERIFICATION_MODE_FULL) || defined(IEM_VERIFICATION_MODE_MINIMAL)) && !defined(IEM_VERIFICATION_MODE) \
1123 || defined(DOXYGEN_RUNNING)
1124# define IEM_VERIFICATION_MODE
1125#endif
1126
1127/**
1128 * Indicates to the verifier that the given flag set is undefined.
1129 *
1130 * Can be invoked again to add more flags.
1131 *
1132 * This is a NOOP if the verifier isn't compiled in.
1133 */
1134#ifdef IEM_VERIFICATION_MODE_FULL
1135# define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { pVCpu->iem.s.fUndefinedEFlags |= (a_fEfl); } while (0)
1136#else
1137# define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0)
1138#endif
1139
1140
1141/** @def IEM_DECL_IMPL_TYPE
1142 * For typedef'ing an instruction implementation function.
1143 *
1144 * @param a_RetType The return type.
1145 * @param a_Name The name of the type.
1146 * @param a_ArgList The argument list enclosed in parentheses.
1147 */
1148
1149/** @def IEM_DECL_IMPL_DEF
1150 * For defining an instruction implementation function.
1151 *
1152 * @param a_RetType The return type.
1153 * @param a_Name The name of the type.
1154 * @param a_ArgList The argument list enclosed in parentheses.
1155 */
1156
1157#if defined(__GNUC__) && defined(RT_ARCH_X86)
1158# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
1159 __attribute__((__fastcall__)) a_RetType (a_Name) a_ArgList
1160# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
1161 __attribute__((__fastcall__, __nothrow__)) a_RetType a_Name a_ArgList
1162
1163#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
1164# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
1165 a_RetType (__fastcall a_Name) a_ArgList
1166# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
1167 a_RetType __fastcall a_Name a_ArgList
1168
1169#else
1170# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
1171 a_RetType (VBOXCALL a_Name) a_ArgList
1172# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
1173 a_RetType VBOXCALL a_Name a_ArgList
1174
1175#endif
1176
1177/** @name Arithmetic assignment operations on bytes (binary).
1178 * @{ */
1179typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU8, (uint8_t *pu8Dst, uint8_t u8Src, uint32_t *pEFlags));
1180typedef FNIEMAIMPLBINU8 *PFNIEMAIMPLBINU8;
1181FNIEMAIMPLBINU8 iemAImpl_add_u8, iemAImpl_add_u8_locked;
1182FNIEMAIMPLBINU8 iemAImpl_adc_u8, iemAImpl_adc_u8_locked;
1183FNIEMAIMPLBINU8 iemAImpl_sub_u8, iemAImpl_sub_u8_locked;
1184FNIEMAIMPLBINU8 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked;
1185FNIEMAIMPLBINU8 iemAImpl_or_u8, iemAImpl_or_u8_locked;
1186FNIEMAIMPLBINU8 iemAImpl_xor_u8, iemAImpl_xor_u8_locked;
1187FNIEMAIMPLBINU8 iemAImpl_and_u8, iemAImpl_and_u8_locked;
1188/** @} */
1189
1190/** @name Arithmetic assignment operations on words (binary).
1191 * @{ */
1192typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU16, (uint16_t *pu16Dst, uint16_t u16Src, uint32_t *pEFlags));
1193typedef FNIEMAIMPLBINU16 *PFNIEMAIMPLBINU16;
1194FNIEMAIMPLBINU16 iemAImpl_add_u16, iemAImpl_add_u16_locked;
1195FNIEMAIMPLBINU16 iemAImpl_adc_u16, iemAImpl_adc_u16_locked;
1196FNIEMAIMPLBINU16 iemAImpl_sub_u16, iemAImpl_sub_u16_locked;
1197FNIEMAIMPLBINU16 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked;
1198FNIEMAIMPLBINU16 iemAImpl_or_u16, iemAImpl_or_u16_locked;
1199FNIEMAIMPLBINU16 iemAImpl_xor_u16, iemAImpl_xor_u16_locked;
1200FNIEMAIMPLBINU16 iemAImpl_and_u16, iemAImpl_and_u16_locked;
1201/** @} */
1202
1203/** @name Arithmetic assignment operations on double words (binary).
1204 * @{ */
1205typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU32, (uint32_t *pu32Dst, uint32_t u32Src, uint32_t *pEFlags));
1206typedef FNIEMAIMPLBINU32 *PFNIEMAIMPLBINU32;
1207FNIEMAIMPLBINU32 iemAImpl_add_u32, iemAImpl_add_u32_locked;
1208FNIEMAIMPLBINU32 iemAImpl_adc_u32, iemAImpl_adc_u32_locked;
1209FNIEMAIMPLBINU32 iemAImpl_sub_u32, iemAImpl_sub_u32_locked;
1210FNIEMAIMPLBINU32 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked;
1211FNIEMAIMPLBINU32 iemAImpl_or_u32, iemAImpl_or_u32_locked;
1212FNIEMAIMPLBINU32 iemAImpl_xor_u32, iemAImpl_xor_u32_locked;
1213FNIEMAIMPLBINU32 iemAImpl_and_u32, iemAImpl_and_u32_locked;
1214/** @} */
1215
1216/** @name Arithmetic assignment operations on quad words (binary).
1217 * @{ */
1218typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINU64, (uint64_t *pu64Dst, uint64_t u64Src, uint32_t *pEFlags));
1219typedef FNIEMAIMPLBINU64 *PFNIEMAIMPLBINU64;
1220FNIEMAIMPLBINU64 iemAImpl_add_u64, iemAImpl_add_u64_locked;
1221FNIEMAIMPLBINU64 iemAImpl_adc_u64, iemAImpl_adc_u64_locked;
1222FNIEMAIMPLBINU64 iemAImpl_sub_u64, iemAImpl_sub_u64_locked;
1223FNIEMAIMPLBINU64 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked;
1224FNIEMAIMPLBINU64 iemAImpl_or_u64, iemAImpl_or_u64_locked;
1225FNIEMAIMPLBINU64 iemAImpl_xor_u64, iemAImpl_xor_u64_locked;
1226FNIEMAIMPLBINU64 iemAImpl_and_u64, iemAImpl_and_u64_locked;
1227/** @} */
1228
1229/** @name Compare operations (thrown in with the binary ops).
1230 * @{ */
1231FNIEMAIMPLBINU8 iemAImpl_cmp_u8;
1232FNIEMAIMPLBINU16 iemAImpl_cmp_u16;
1233FNIEMAIMPLBINU32 iemAImpl_cmp_u32;
1234FNIEMAIMPLBINU64 iemAImpl_cmp_u64;
1235/** @} */
1236
1237/** @name Test operations (thrown in with the binary ops).
1238 * @{ */
1239FNIEMAIMPLBINU8 iemAImpl_test_u8;
1240FNIEMAIMPLBINU16 iemAImpl_test_u16;
1241FNIEMAIMPLBINU32 iemAImpl_test_u32;
1242FNIEMAIMPLBINU64 iemAImpl_test_u64;
1243/** @} */
1244
1245/** @name Bit operations operations (thrown in with the binary ops).
1246 * @{ */
1247FNIEMAIMPLBINU16 iemAImpl_bt_u16, iemAImpl_bt_u16_locked;
1248FNIEMAIMPLBINU32 iemAImpl_bt_u32, iemAImpl_bt_u32_locked;
1249FNIEMAIMPLBINU64 iemAImpl_bt_u64, iemAImpl_bt_u64_locked;
1250FNIEMAIMPLBINU16 iemAImpl_btc_u16, iemAImpl_btc_u16_locked;
1251FNIEMAIMPLBINU32 iemAImpl_btc_u32, iemAImpl_btc_u32_locked;
1252FNIEMAIMPLBINU64 iemAImpl_btc_u64, iemAImpl_btc_u64_locked;
1253FNIEMAIMPLBINU16 iemAImpl_btr_u16, iemAImpl_btr_u16_locked;
1254FNIEMAIMPLBINU32 iemAImpl_btr_u32, iemAImpl_btr_u32_locked;
1255FNIEMAIMPLBINU64 iemAImpl_btr_u64, iemAImpl_btr_u64_locked;
1256FNIEMAIMPLBINU16 iemAImpl_bts_u16, iemAImpl_bts_u16_locked;
1257FNIEMAIMPLBINU32 iemAImpl_bts_u32, iemAImpl_bts_u32_locked;
1258FNIEMAIMPLBINU64 iemAImpl_bts_u64, iemAImpl_bts_u64_locked;
1259/** @} */
1260
1261/** @name Exchange memory with register operations.
1262 * @{ */
1263IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u8, (uint8_t *pu8Mem, uint8_t *pu8Reg));
1264IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u16,(uint16_t *pu16Mem, uint16_t *pu16Reg));
1265IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u32,(uint32_t *pu32Mem, uint32_t *pu32Reg));
1266IEM_DECL_IMPL_DEF(void, iemAImpl_xchg_u64,(uint64_t *pu64Mem, uint64_t *pu64Reg));
1267/** @} */
1268
1269/** @name Exchange and add operations.
1270 * @{ */
1271IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
1272IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
1273IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
1274IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
1275IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u8_locked, (uint8_t *pu8Dst, uint8_t *pu8Reg, uint32_t *pEFlags));
1276IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u16_locked,(uint16_t *pu16Dst, uint16_t *pu16Reg, uint32_t *pEFlags));
1277IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u32_locked,(uint32_t *pu32Dst, uint32_t *pu32Reg, uint32_t *pEFlags));
1278IEM_DECL_IMPL_DEF(void, iemAImpl_xadd_u64_locked,(uint64_t *pu64Dst, uint64_t *pu64Reg, uint32_t *pEFlags));
1279/** @} */
1280
1281/** @name Compare and exchange.
1282 * @{ */
1283IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
1284IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u8_locked, (uint8_t *pu8Dst, uint8_t *puAl, uint8_t uSrcReg, uint32_t *pEFlags));
1285IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16, (uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
1286IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u16_locked,(uint16_t *pu16Dst, uint16_t *puAx, uint16_t uSrcReg, uint32_t *pEFlags));
1287IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32, (uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
1288IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u32_locked,(uint32_t *pu32Dst, uint32_t *puEax, uint32_t uSrcReg, uint32_t *pEFlags));
1289#ifdef RT_ARCH_X86
1290IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
1291IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t *puSrcReg, uint32_t *pEFlags));
1292#else
1293IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64, (uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
1294IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg_u64_locked,(uint64_t *pu64Dst, uint64_t *puRax, uint64_t uSrcReg, uint32_t *pEFlags));
1295#endif
1296IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
1297 uint32_t *pEFlags));
1298IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg8b_locked,(uint64_t *pu64Dst, PRTUINT64U pu64EaxEdx, PRTUINT64U pu64EbxEcx,
1299 uint32_t *pEFlags));
1300IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
1301 uint32_t *pEFlags));
1302IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_locked,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx, PRTUINT128U pu128RbxRcx,
1303 uint32_t *pEFlags));
1304IEM_DECL_IMPL_DEF(void, iemAImpl_cmpxchg16b_fallback,(PRTUINT128U pu128Dst, PRTUINT128U pu128RaxRdx,
1305 PRTUINT128U pu128RbxRcx, uint32_t *pEFlags));
1306/** @} */
1307
1308/** @name Memory ordering
1309 * @{ */
1310typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEMFENCE,(void));
1311typedef FNIEMAIMPLMEMFENCE *PFNIEMAIMPLMEMFENCE;
1312IEM_DECL_IMPL_DEF(void, iemAImpl_mfence,(void));
1313IEM_DECL_IMPL_DEF(void, iemAImpl_sfence,(void));
1314IEM_DECL_IMPL_DEF(void, iemAImpl_lfence,(void));
1315IEM_DECL_IMPL_DEF(void, iemAImpl_alt_mem_fence,(void));
1316/** @} */
1317
1318/** @name Double precision shifts
1319 * @{ */
1320typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags));
1321typedef FNIEMAIMPLSHIFTDBLU16 *PFNIEMAIMPLSHIFTDBLU16;
1322typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU32,(uint32_t *pu32Dst, uint32_t u32Src, uint8_t cShift, uint32_t *pEFlags));
1323typedef FNIEMAIMPLSHIFTDBLU32 *PFNIEMAIMPLSHIFTDBLU32;
1324typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t cShift, uint32_t *pEFlags));
1325typedef FNIEMAIMPLSHIFTDBLU64 *PFNIEMAIMPLSHIFTDBLU64;
1326FNIEMAIMPLSHIFTDBLU16 iemAImpl_shld_u16;
1327FNIEMAIMPLSHIFTDBLU32 iemAImpl_shld_u32;
1328FNIEMAIMPLSHIFTDBLU64 iemAImpl_shld_u64;
1329FNIEMAIMPLSHIFTDBLU16 iemAImpl_shrd_u16;
1330FNIEMAIMPLSHIFTDBLU32 iemAImpl_shrd_u32;
1331FNIEMAIMPLSHIFTDBLU64 iemAImpl_shrd_u64;
1332/** @} */
1333
1334
1335/** @name Bit search operations (thrown in with the binary ops).
1336 * @{ */
1337FNIEMAIMPLBINU16 iemAImpl_bsf_u16;
1338FNIEMAIMPLBINU32 iemAImpl_bsf_u32;
1339FNIEMAIMPLBINU64 iemAImpl_bsf_u64;
1340FNIEMAIMPLBINU16 iemAImpl_bsr_u16;
1341FNIEMAIMPLBINU32 iemAImpl_bsr_u32;
1342FNIEMAIMPLBINU64 iemAImpl_bsr_u64;
1343/** @} */
1344
1345/** @name Signed multiplication operations (thrown in with the binary ops).
1346 * @{ */
1347FNIEMAIMPLBINU16 iemAImpl_imul_two_u16;
1348FNIEMAIMPLBINU32 iemAImpl_imul_two_u32;
1349FNIEMAIMPLBINU64 iemAImpl_imul_two_u64;
1350/** @} */
1351
1352/** @name Arithmetic assignment operations on bytes (unary).
1353 * @{ */
1354typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU8, (uint8_t *pu8Dst, uint32_t *pEFlags));
1355typedef FNIEMAIMPLUNARYU8 *PFNIEMAIMPLUNARYU8;
1356FNIEMAIMPLUNARYU8 iemAImpl_inc_u8, iemAImpl_inc_u8_locked;
1357FNIEMAIMPLUNARYU8 iemAImpl_dec_u8, iemAImpl_dec_u8_locked;
1358FNIEMAIMPLUNARYU8 iemAImpl_not_u8, iemAImpl_not_u8_locked;
1359FNIEMAIMPLUNARYU8 iemAImpl_neg_u8, iemAImpl_neg_u8_locked;
1360/** @} */
1361
1362/** @name Arithmetic assignment operations on words (unary).
1363 * @{ */
1364typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU16, (uint16_t *pu16Dst, uint32_t *pEFlags));
1365typedef FNIEMAIMPLUNARYU16 *PFNIEMAIMPLUNARYU16;
1366FNIEMAIMPLUNARYU16 iemAImpl_inc_u16, iemAImpl_inc_u16_locked;
1367FNIEMAIMPLUNARYU16 iemAImpl_dec_u16, iemAImpl_dec_u16_locked;
1368FNIEMAIMPLUNARYU16 iemAImpl_not_u16, iemAImpl_not_u16_locked;
1369FNIEMAIMPLUNARYU16 iemAImpl_neg_u16, iemAImpl_neg_u16_locked;
1370/** @} */
1371
1372/** @name Arithmetic assignment operations on double words (unary).
1373 * @{ */
1374typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU32, (uint32_t *pu32Dst, uint32_t *pEFlags));
1375typedef FNIEMAIMPLUNARYU32 *PFNIEMAIMPLUNARYU32;
1376FNIEMAIMPLUNARYU32 iemAImpl_inc_u32, iemAImpl_inc_u32_locked;
1377FNIEMAIMPLUNARYU32 iemAImpl_dec_u32, iemAImpl_dec_u32_locked;
1378FNIEMAIMPLUNARYU32 iemAImpl_not_u32, iemAImpl_not_u32_locked;
1379FNIEMAIMPLUNARYU32 iemAImpl_neg_u32, iemAImpl_neg_u32_locked;
1380/** @} */
1381
1382/** @name Arithmetic assignment operations on quad words (unary).
1383 * @{ */
1384typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLUNARYU64, (uint64_t *pu64Dst, uint32_t *pEFlags));
1385typedef FNIEMAIMPLUNARYU64 *PFNIEMAIMPLUNARYU64;
1386FNIEMAIMPLUNARYU64 iemAImpl_inc_u64, iemAImpl_inc_u64_locked;
1387FNIEMAIMPLUNARYU64 iemAImpl_dec_u64, iemAImpl_dec_u64_locked;
1388FNIEMAIMPLUNARYU64 iemAImpl_not_u64, iemAImpl_not_u64_locked;
1389FNIEMAIMPLUNARYU64 iemAImpl_neg_u64, iemAImpl_neg_u64_locked;
1390/** @} */
1391
1392
1393/** @name Shift operations on bytes (Group 2).
1394 * @{ */
1395typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU8,(uint8_t *pu8Dst, uint8_t cShift, uint32_t *pEFlags));
1396typedef FNIEMAIMPLSHIFTU8 *PFNIEMAIMPLSHIFTU8;
1397FNIEMAIMPLSHIFTU8 iemAImpl_rol_u8;
1398FNIEMAIMPLSHIFTU8 iemAImpl_ror_u8;
1399FNIEMAIMPLSHIFTU8 iemAImpl_rcl_u8;
1400FNIEMAIMPLSHIFTU8 iemAImpl_rcr_u8;
1401FNIEMAIMPLSHIFTU8 iemAImpl_shl_u8;
1402FNIEMAIMPLSHIFTU8 iemAImpl_shr_u8;
1403FNIEMAIMPLSHIFTU8 iemAImpl_sar_u8;
1404/** @} */
1405
1406/** @name Shift operations on words (Group 2).
1407 * @{ */
1408typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU16,(uint16_t *pu16Dst, uint8_t cShift, uint32_t *pEFlags));
1409typedef FNIEMAIMPLSHIFTU16 *PFNIEMAIMPLSHIFTU16;
1410FNIEMAIMPLSHIFTU16 iemAImpl_rol_u16;
1411FNIEMAIMPLSHIFTU16 iemAImpl_ror_u16;
1412FNIEMAIMPLSHIFTU16 iemAImpl_rcl_u16;
1413FNIEMAIMPLSHIFTU16 iemAImpl_rcr_u16;
1414FNIEMAIMPLSHIFTU16 iemAImpl_shl_u16;
1415FNIEMAIMPLSHIFTU16 iemAImpl_shr_u16;
1416FNIEMAIMPLSHIFTU16 iemAImpl_sar_u16;
1417/** @} */
1418
1419/** @name Shift operations on double words (Group 2).
1420 * @{ */
1421typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU32,(uint32_t *pu32Dst, uint8_t cShift, uint32_t *pEFlags));
1422typedef FNIEMAIMPLSHIFTU32 *PFNIEMAIMPLSHIFTU32;
1423FNIEMAIMPLSHIFTU32 iemAImpl_rol_u32;
1424FNIEMAIMPLSHIFTU32 iemAImpl_ror_u32;
1425FNIEMAIMPLSHIFTU32 iemAImpl_rcl_u32;
1426FNIEMAIMPLSHIFTU32 iemAImpl_rcr_u32;
1427FNIEMAIMPLSHIFTU32 iemAImpl_shl_u32;
1428FNIEMAIMPLSHIFTU32 iemAImpl_shr_u32;
1429FNIEMAIMPLSHIFTU32 iemAImpl_sar_u32;
1430/** @} */
1431
1432/** @name Shift operations on words (Group 2).
1433 * @{ */
1434typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTU64,(uint64_t *pu64Dst, uint8_t cShift, uint32_t *pEFlags));
1435typedef FNIEMAIMPLSHIFTU64 *PFNIEMAIMPLSHIFTU64;
1436FNIEMAIMPLSHIFTU64 iemAImpl_rol_u64;
1437FNIEMAIMPLSHIFTU64 iemAImpl_ror_u64;
1438FNIEMAIMPLSHIFTU64 iemAImpl_rcl_u64;
1439FNIEMAIMPLSHIFTU64 iemAImpl_rcr_u64;
1440FNIEMAIMPLSHIFTU64 iemAImpl_shl_u64;
1441FNIEMAIMPLSHIFTU64 iemAImpl_shr_u64;
1442FNIEMAIMPLSHIFTU64 iemAImpl_sar_u64;
1443/** @} */
1444
1445/** @name Multiplication and division operations.
1446 * @{ */
1447typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU8,(uint16_t *pu16AX, uint8_t u8FactorDivisor, uint32_t *pEFlags));
1448typedef FNIEMAIMPLMULDIVU8 *PFNIEMAIMPLMULDIVU8;
1449FNIEMAIMPLMULDIVU8 iemAImpl_mul_u8, iemAImpl_imul_u8;
1450FNIEMAIMPLMULDIVU8 iemAImpl_div_u8, iemAImpl_idiv_u8;
1451
1452typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU16,(uint16_t *pu16AX, uint16_t *pu16DX, uint16_t u16FactorDivisor, uint32_t *pEFlags));
1453typedef FNIEMAIMPLMULDIVU16 *PFNIEMAIMPLMULDIVU16;
1454FNIEMAIMPLMULDIVU16 iemAImpl_mul_u16, iemAImpl_imul_u16;
1455FNIEMAIMPLMULDIVU16 iemAImpl_div_u16, iemAImpl_idiv_u16;
1456
1457typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU32,(uint32_t *pu32EAX, uint32_t *pu32EDX, uint32_t u32FactorDivisor, uint32_t *pEFlags));
1458typedef FNIEMAIMPLMULDIVU32 *PFNIEMAIMPLMULDIVU32;
1459FNIEMAIMPLMULDIVU32 iemAImpl_mul_u32, iemAImpl_imul_u32;
1460FNIEMAIMPLMULDIVU32 iemAImpl_div_u32, iemAImpl_idiv_u32;
1461
1462typedef IEM_DECL_IMPL_TYPE(int, FNIEMAIMPLMULDIVU64,(uint64_t *pu64RAX, uint64_t *pu64RDX, uint64_t u64FactorDivisor, uint32_t *pEFlags));
1463typedef FNIEMAIMPLMULDIVU64 *PFNIEMAIMPLMULDIVU64;
1464FNIEMAIMPLMULDIVU64 iemAImpl_mul_u64, iemAImpl_imul_u64;
1465FNIEMAIMPLMULDIVU64 iemAImpl_div_u64, iemAImpl_idiv_u64;
1466/** @} */
1467
1468/** @name Byte Swap.
1469 * @{ */
1470IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u16,(uint32_t *pu32Dst)); /* Yes, 32-bit register access. */
1471IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u32,(uint32_t *pu32Dst));
1472IEM_DECL_IMPL_TYPE(void, iemAImpl_bswap_u64,(uint64_t *pu64Dst));
1473/** @} */
1474
1475/** @name Misc.
1476 * @{ */
1477FNIEMAIMPLBINU16 iemAImpl_arpl;
1478/** @} */
1479
1480
1481/** @name FPU operations taking a 32-bit float argument
1482 * @{ */
1483typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
1484 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
1485typedef FNIEMAIMPLFPUR32FSW *PFNIEMAIMPLFPUR32FSW;
1486
1487typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1488 PCRTFLOAT80U pr80Val1, PCRTFLOAT32U pr32Val2));
1489typedef FNIEMAIMPLFPUR32 *PFNIEMAIMPLFPUR32;
1490
1491FNIEMAIMPLFPUR32FSW iemAImpl_fcom_r80_by_r32;
1492FNIEMAIMPLFPUR32 iemAImpl_fadd_r80_by_r32;
1493FNIEMAIMPLFPUR32 iemAImpl_fmul_r80_by_r32;
1494FNIEMAIMPLFPUR32 iemAImpl_fsub_r80_by_r32;
1495FNIEMAIMPLFPUR32 iemAImpl_fsubr_r80_by_r32;
1496FNIEMAIMPLFPUR32 iemAImpl_fdiv_r80_by_r32;
1497FNIEMAIMPLFPUR32 iemAImpl_fdivr_r80_by_r32;
1498
1499IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r32_to_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT32U pr32Val));
1500IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1501 PRTFLOAT32U pr32Val, PCRTFLOAT80U pr80Val));
1502/** @} */
1503
1504/** @name FPU operations taking a 64-bit float argument
1505 * @{ */
1506typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1507 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
1508typedef FNIEMAIMPLFPUR64 *PFNIEMAIMPLFPUR64;
1509
1510FNIEMAIMPLFPUR64 iemAImpl_fadd_r80_by_r64;
1511FNIEMAIMPLFPUR64 iemAImpl_fmul_r80_by_r64;
1512FNIEMAIMPLFPUR64 iemAImpl_fsub_r80_by_r64;
1513FNIEMAIMPLFPUR64 iemAImpl_fsubr_r80_by_r64;
1514FNIEMAIMPLFPUR64 iemAImpl_fdiv_r80_by_r64;
1515FNIEMAIMPLFPUR64 iemAImpl_fdivr_r80_by_r64;
1516
1517IEM_DECL_IMPL_DEF(void, iemAImpl_fcom_r80_by_r64,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
1518 PCRTFLOAT80U pr80Val1, PCRTFLOAT64U pr64Val2));
1519IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r64_to_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT64U pr64Val));
1520IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1521 PRTFLOAT64U pr32Val, PCRTFLOAT80U pr80Val));
1522/** @} */
1523
1524/** @name FPU operations taking a 80-bit float argument
1525 * @{ */
1526typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1527 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
1528typedef FNIEMAIMPLFPUR80 *PFNIEMAIMPLFPUR80;
1529FNIEMAIMPLFPUR80 iemAImpl_fadd_r80_by_r80;
1530FNIEMAIMPLFPUR80 iemAImpl_fmul_r80_by_r80;
1531FNIEMAIMPLFPUR80 iemAImpl_fsub_r80_by_r80;
1532FNIEMAIMPLFPUR80 iemAImpl_fsubr_r80_by_r80;
1533FNIEMAIMPLFPUR80 iemAImpl_fdiv_r80_by_r80;
1534FNIEMAIMPLFPUR80 iemAImpl_fdivr_r80_by_r80;
1535FNIEMAIMPLFPUR80 iemAImpl_fprem_r80_by_r80;
1536FNIEMAIMPLFPUR80 iemAImpl_fprem1_r80_by_r80;
1537FNIEMAIMPLFPUR80 iemAImpl_fscale_r80_by_r80;
1538
1539FNIEMAIMPLFPUR80 iemAImpl_fpatan_r80_by_r80;
1540FNIEMAIMPLFPUR80 iemAImpl_fyl2x_r80_by_r80;
1541FNIEMAIMPLFPUR80 iemAImpl_fyl2xp1_r80_by_r80;
1542
1543typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80FSW,(PCX86FXSTATE pFpuState, uint16_t *pFSW,
1544 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
1545typedef FNIEMAIMPLFPUR80FSW *PFNIEMAIMPLFPUR80FSW;
1546FNIEMAIMPLFPUR80FSW iemAImpl_fcom_r80_by_r80;
1547FNIEMAIMPLFPUR80FSW iemAImpl_fucom_r80_by_r80;
1548
1549typedef IEM_DECL_IMPL_TYPE(uint32_t, FNIEMAIMPLFPUR80EFL,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
1550 PCRTFLOAT80U pr80Val1, PCRTFLOAT80U pr80Val2));
1551typedef FNIEMAIMPLFPUR80EFL *PFNIEMAIMPLFPUR80EFL;
1552FNIEMAIMPLFPUR80EFL iemAImpl_fcomi_r80_by_r80;
1553FNIEMAIMPLFPUR80EFL iemAImpl_fucomi_r80_by_r80;
1554
1555typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARY,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
1556typedef FNIEMAIMPLFPUR80UNARY *PFNIEMAIMPLFPUR80UNARY;
1557FNIEMAIMPLFPUR80UNARY iemAImpl_fabs_r80;
1558FNIEMAIMPLFPUR80UNARY iemAImpl_fchs_r80;
1559FNIEMAIMPLFPUR80UNARY iemAImpl_f2xm1_r80;
1560FNIEMAIMPLFPUR80UNARY iemAImpl_fsqrt_r80;
1561FNIEMAIMPLFPUR80UNARY iemAImpl_frndint_r80;
1562FNIEMAIMPLFPUR80UNARY iemAImpl_fsin_r80;
1563FNIEMAIMPLFPUR80UNARY iemAImpl_fcos_r80;
1564
1565typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYFSW,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw, PCRTFLOAT80U pr80Val));
1566typedef FNIEMAIMPLFPUR80UNARYFSW *PFNIEMAIMPLFPUR80UNARYFSW;
1567FNIEMAIMPLFPUR80UNARYFSW iemAImpl_ftst_r80;
1568FNIEMAIMPLFPUR80UNARYFSW iemAImpl_fxam_r80;
1569
1570typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80LDCONST,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes));
1571typedef FNIEMAIMPLFPUR80LDCONST *PFNIEMAIMPLFPUR80LDCONST;
1572FNIEMAIMPLFPUR80LDCONST iemAImpl_fld1;
1573FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2t;
1574FNIEMAIMPLFPUR80LDCONST iemAImpl_fldl2e;
1575FNIEMAIMPLFPUR80LDCONST iemAImpl_fldpi;
1576FNIEMAIMPLFPUR80LDCONST iemAImpl_fldlg2;
1577FNIEMAIMPLFPUR80LDCONST iemAImpl_fldln2;
1578FNIEMAIMPLFPUR80LDCONST iemAImpl_fldz;
1579
1580typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUR80UNARYTWO,(PCX86FXSTATE pFpuState, PIEMFPURESULTTWO pFpuResTwo,
1581 PCRTFLOAT80U pr80Val));
1582typedef FNIEMAIMPLFPUR80UNARYTWO *PFNIEMAIMPLFPUR80UNARYTWO;
1583FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fptan_r80_r80;
1584FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fxtract_r80_r80;
1585FNIEMAIMPLFPUR80UNARYTWO iemAImpl_fsincos_r80_r80;
1586
1587IEM_DECL_IMPL_DEF(void, iemAImpl_fld_r80_from_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, PCRTFLOAT80U pr80Val));
1588IEM_DECL_IMPL_DEF(void, iemAImpl_fst_r80_to_r80,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1589 PRTFLOAT80U pr80Dst, PCRTFLOAT80U pr80Src));
1590
1591/** @} */
1592
1593/** @name FPU operations taking a 16-bit signed integer argument
1594 * @{ */
1595typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI16,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1596 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
1597typedef FNIEMAIMPLFPUI16 *PFNIEMAIMPLFPUI16;
1598
1599FNIEMAIMPLFPUI16 iemAImpl_fiadd_r80_by_i16;
1600FNIEMAIMPLFPUI16 iemAImpl_fimul_r80_by_i16;
1601FNIEMAIMPLFPUI16 iemAImpl_fisub_r80_by_i16;
1602FNIEMAIMPLFPUI16 iemAImpl_fisubr_r80_by_i16;
1603FNIEMAIMPLFPUI16 iemAImpl_fidiv_r80_by_i16;
1604FNIEMAIMPLFPUI16 iemAImpl_fidivr_r80_by_i16;
1605
1606IEM_DECL_IMPL_DEF(void, iemAImpl_ficom_r80_by_i16,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
1607 PCRTFLOAT80U pr80Val1, int16_t const *pi16Val2));
1608
1609IEM_DECL_IMPL_DEF(void, iemAImpl_fild_i16_to_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int16_t const *pi16Val));
1610IEM_DECL_IMPL_DEF(void, iemAImpl_fist_r80_to_i16,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1611 int16_t *pi16Val, PCRTFLOAT80U pr80Val));
1612IEM_DECL_IMPL_DEF(void, iemAImpl_fistt_r80_to_i16,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1613 int16_t *pi16Val, PCRTFLOAT80U pr80Val));
1614/** @} */
1615
1616/** @name FPU operations taking a 32-bit signed integer argument
1617 * @{ */
1618typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI32,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1619 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
1620typedef FNIEMAIMPLFPUI32 *PFNIEMAIMPLFPUI32;
1621
1622FNIEMAIMPLFPUI32 iemAImpl_fiadd_r80_by_i32;
1623FNIEMAIMPLFPUI32 iemAImpl_fimul_r80_by_i32;
1624FNIEMAIMPLFPUI32 iemAImpl_fisub_r80_by_i32;
1625FNIEMAIMPLFPUI32 iemAImpl_fisubr_r80_by_i32;
1626FNIEMAIMPLFPUI32 iemAImpl_fidiv_r80_by_i32;
1627FNIEMAIMPLFPUI32 iemAImpl_fidivr_r80_by_i32;
1628
1629IEM_DECL_IMPL_DEF(void, iemAImpl_ficom_r80_by_i32,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
1630 PCRTFLOAT80U pr80Val1, int32_t const *pi32Val2));
1631
1632IEM_DECL_IMPL_DEF(void, iemAImpl_fild_i32_to_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int32_t const *pi32Val));
1633IEM_DECL_IMPL_DEF(void, iemAImpl_fist_r80_to_i32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1634 int32_t *pi32Val, PCRTFLOAT80U pr80Val));
1635IEM_DECL_IMPL_DEF(void, iemAImpl_fistt_r80_to_i32,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1636 int32_t *pi32Val, PCRTFLOAT80U pr80Val));
1637/** @} */
1638
1639/** @name FPU operations taking a 64-bit signed integer argument
1640 * @{ */
1641typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLFPUI64,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes,
1642 PCRTFLOAT80U pr80Val1, int64_t const *pi64Val2));
1643typedef FNIEMAIMPLFPUI64 *PFNIEMAIMPLFPUI64;
1644
1645FNIEMAIMPLFPUI64 iemAImpl_fiadd_r80_by_i64;
1646FNIEMAIMPLFPUI64 iemAImpl_fimul_r80_by_i64;
1647FNIEMAIMPLFPUI64 iemAImpl_fisub_r80_by_i64;
1648FNIEMAIMPLFPUI64 iemAImpl_fisubr_r80_by_i64;
1649FNIEMAIMPLFPUI64 iemAImpl_fidiv_r80_by_i64;
1650FNIEMAIMPLFPUI64 iemAImpl_fidivr_r80_by_i64;
1651
1652IEM_DECL_IMPL_DEF(void, iemAImpl_ficom_r80_by_i64,(PCX86FXSTATE pFpuState, uint16_t *pu16Fsw,
1653 PCRTFLOAT80U pr80Val1, int64_t const *pi64Val2));
1654
1655IEM_DECL_IMPL_DEF(void, iemAImpl_fild_i64_to_r80,(PCX86FXSTATE pFpuState, PIEMFPURESULT pFpuRes, int64_t const *pi64Val));
1656IEM_DECL_IMPL_DEF(void, iemAImpl_fist_r80_to_i64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1657 int64_t *pi64Val, PCRTFLOAT80U pr80Val));
1658IEM_DECL_IMPL_DEF(void, iemAImpl_fistt_r80_to_i64,(PCX86FXSTATE pFpuState, uint16_t *pu16FSW,
1659 int64_t *pi32Val, PCRTFLOAT80U pr80Val));
1660/** @} */
1661
1662
1663/** Temporary type representing a 256-bit vector register. */
1664typedef struct {uint64_t au64[4]; } IEMVMM256;
1665/** Temporary type pointing to a 256-bit vector register. */
1666typedef IEMVMM256 *PIEMVMM256;
1667/** Temporary type pointing to a const 256-bit vector register. */
1668typedef IEMVMM256 *PCIEMVMM256;
1669
1670
1671/** @name Media (SSE/MMX/AVX) operations: full1 + full2 -> full1.
1672 * @{ */
1673typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src));
1674typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF2U64;
1675typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF2U128,(PCX86FXSTATE pFpuState, PRTUINT128U pu128Dst, PCRTUINT128U pu128Src));
1676typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF2U128;
1677FNIEMAIMPLMEDIAF2U64 iemAImpl_pxor_u64, iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqd_u64;
1678FNIEMAIMPLMEDIAF2U128 iemAImpl_pxor_u128, iemAImpl_pcmpeqb_u128, iemAImpl_pcmpeqw_u128, iemAImpl_pcmpeqd_u128;
1679/** @} */
1680
1681/** @name Media (SSE/MMX/AVX) operations: lowhalf1 + lowhalf1 -> full1.
1682 * @{ */
1683typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1L1U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint32_t const *pu32Src));
1684typedef FNIEMAIMPLMEDIAF1L1U64 *PFNIEMAIMPLMEDIAF1L1U64;
1685typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1L1U128,(PCX86FXSTATE pFpuState, PRTUINT128U pu128Dst, uint64_t const *pu64Src));
1686typedef FNIEMAIMPLMEDIAF1L1U128 *PFNIEMAIMPLMEDIAF1L1U128;
1687FNIEMAIMPLMEDIAF1L1U64 iemAImpl_punpcklbw_u64, iemAImpl_punpcklwd_u64, iemAImpl_punpckldq_u64;
1688FNIEMAIMPLMEDIAF1L1U128 iemAImpl_punpcklbw_u128, iemAImpl_punpcklwd_u128, iemAImpl_punpckldq_u128, iemAImpl_punpcklqdq_u128;
1689/** @} */
1690
1691/** @name Media (SSE/MMX/AVX) operations: hihalf1 + hihalf2 -> full1.
1692 * @{ */
1693typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1H1U64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src));
1694typedef FNIEMAIMPLMEDIAF2U64 *PFNIEMAIMPLMEDIAF1H1U64;
1695typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAF1H1U128,(PCX86FXSTATE pFpuState, PRTUINT128U pu128Dst, PCRTUINT128U pu128Src));
1696typedef FNIEMAIMPLMEDIAF2U128 *PFNIEMAIMPLMEDIAF1H1U128;
1697FNIEMAIMPLMEDIAF1H1U64 iemAImpl_punpckhbw_u64, iemAImpl_punpckhwd_u64, iemAImpl_punpckhdq_u64;
1698FNIEMAIMPLMEDIAF1H1U128 iemAImpl_punpckhbw_u128, iemAImpl_punpckhwd_u128, iemAImpl_punpckhdq_u128, iemAImpl_punpckhqdq_u128;
1699/** @} */
1700
1701/** @name Media (SSE/MMX/AVX) operation: Packed Shuffle Stuff (evil)
1702 * @{ */
1703typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLMEDIAPSHUF,(PCX86FXSTATE pFpuState, PRTUINT128U pu128Dst,
1704 PCRTUINT128U pu128Src, uint8_t bEvil));
1705typedef FNIEMAIMPLMEDIAPSHUF *PFNIEMAIMPLMEDIAPSHUF;
1706FNIEMAIMPLMEDIAPSHUF iemAImpl_pshufhw, iemAImpl_pshuflw, iemAImpl_pshufd;
1707IEM_DECL_IMPL_DEF(void, iemAImpl_pshufw,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src, uint8_t bEvil));
1708/** @} */
1709
1710/** @name Media (SSE/MMX/AVX) operation: Move Byte Mask
1711 * @{ */
1712IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u64,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, uint64_t const *pu64Src));
1713IEM_DECL_IMPL_DEF(void, iemAImpl_pmovmskb_u128,(PCX86FXSTATE pFpuState, uint64_t *pu64Dst, PCRTUINT128U pu128Src));
1714/** @} */
1715
1716/** @name Media (SSE/MMX/AVX) operation: Sort this later
1717 * @{ */
1718IEM_DECL_IMPL_DEF(void, iemAImpl_movsldup,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc));
1719IEM_DECL_IMPL_DEF(void, iemAImpl_movshdup,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, PCRTUINT128U puSrc));
1720IEM_DECL_IMPL_DEF(void, iemAImpl_movddup,(PCX86FXSTATE pFpuState, PRTUINT128U puDst, uint64_t uSrc));
1721
1722IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
1723IEM_DECL_IMPL_DEF(void, iemAImpl_vmovsldup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
1724IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rr,(PX86XSAVEAREA pXState, uint8_t iYRegDst, uint8_t iYRegSrc));
1725IEM_DECL_IMPL_DEF(void, iemAImpl_vmovddup_256_rm,(PX86XSAVEAREA pXState, uint8_t iYRegDst, PCRTUINT256U pSrc));
1726
1727/** @} */
1728
1729
1730/** @name Function tables.
1731 * @{
1732 */
1733
1734/**
1735 * Function table for a binary operator providing implementation based on
1736 * operand size.
1737 */
1738typedef struct IEMOPBINSIZES
1739{
1740 PFNIEMAIMPLBINU8 pfnNormalU8, pfnLockedU8;
1741 PFNIEMAIMPLBINU16 pfnNormalU16, pfnLockedU16;
1742 PFNIEMAIMPLBINU32 pfnNormalU32, pfnLockedU32;
1743 PFNIEMAIMPLBINU64 pfnNormalU64, pfnLockedU64;
1744} IEMOPBINSIZES;
1745/** Pointer to a binary operator function table. */
1746typedef IEMOPBINSIZES const *PCIEMOPBINSIZES;
1747
1748
1749/**
1750 * Function table for a unary operator providing implementation based on
1751 * operand size.
1752 */
1753typedef struct IEMOPUNARYSIZES
1754{
1755 PFNIEMAIMPLUNARYU8 pfnNormalU8, pfnLockedU8;
1756 PFNIEMAIMPLUNARYU16 pfnNormalU16, pfnLockedU16;
1757 PFNIEMAIMPLUNARYU32 pfnNormalU32, pfnLockedU32;
1758 PFNIEMAIMPLUNARYU64 pfnNormalU64, pfnLockedU64;
1759} IEMOPUNARYSIZES;
1760/** Pointer to a unary operator function table. */
1761typedef IEMOPUNARYSIZES const *PCIEMOPUNARYSIZES;
1762
1763
1764/**
1765 * Function table for a shift operator providing implementation based on
1766 * operand size.
1767 */
1768typedef struct IEMOPSHIFTSIZES
1769{
1770 PFNIEMAIMPLSHIFTU8 pfnNormalU8;
1771 PFNIEMAIMPLSHIFTU16 pfnNormalU16;
1772 PFNIEMAIMPLSHIFTU32 pfnNormalU32;
1773 PFNIEMAIMPLSHIFTU64 pfnNormalU64;
1774} IEMOPSHIFTSIZES;
1775/** Pointer to a shift operator function table. */
1776typedef IEMOPSHIFTSIZES const *PCIEMOPSHIFTSIZES;
1777
1778
1779/**
1780 * Function table for a multiplication or division operation.
1781 */
1782typedef struct IEMOPMULDIVSIZES
1783{
1784 PFNIEMAIMPLMULDIVU8 pfnU8;
1785 PFNIEMAIMPLMULDIVU16 pfnU16;
1786 PFNIEMAIMPLMULDIVU32 pfnU32;
1787 PFNIEMAIMPLMULDIVU64 pfnU64;
1788} IEMOPMULDIVSIZES;
1789/** Pointer to a multiplication or division operation function table. */
1790typedef IEMOPMULDIVSIZES const *PCIEMOPMULDIVSIZES;
1791
1792
1793/**
1794 * Function table for a double precision shift operator providing implementation
1795 * based on operand size.
1796 */
1797typedef struct IEMOPSHIFTDBLSIZES
1798{
1799 PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16;
1800 PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32;
1801 PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64;
1802} IEMOPSHIFTDBLSIZES;
1803/** Pointer to a double precision shift function table. */
1804typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES;
1805
1806
1807/**
1808 * Function table for media instruction taking two full sized media registers,
1809 * optionally the 2nd being a memory reference (only modifying the first op.)
1810 */
1811typedef struct IEMOPMEDIAF2
1812{
1813 PFNIEMAIMPLMEDIAF2U64 pfnU64;
1814 PFNIEMAIMPLMEDIAF2U128 pfnU128;
1815} IEMOPMEDIAF2;
1816/** Pointer to a media operation function table for full sized ops. */
1817typedef IEMOPMEDIAF2 const *PCIEMOPMEDIAF2;
1818
1819/**
1820 * Function table for media instruction taking taking one full and one lower
1821 * half media register.
1822 */
1823typedef struct IEMOPMEDIAF1L1
1824{
1825 PFNIEMAIMPLMEDIAF1L1U64 pfnU64;
1826 PFNIEMAIMPLMEDIAF1L1U128 pfnU128;
1827} IEMOPMEDIAF1L1;
1828/** Pointer to a media operation function table for lowhalf+lowhalf -> full. */
1829typedef IEMOPMEDIAF1L1 const *PCIEMOPMEDIAF1L1;
1830
1831/**
1832 * Function table for media instruction taking taking one full and one high half
1833 * media register.
1834 */
1835typedef struct IEMOPMEDIAF1H1
1836{
1837 PFNIEMAIMPLMEDIAF1H1U64 pfnU64;
1838 PFNIEMAIMPLMEDIAF1H1U128 pfnU128;
1839} IEMOPMEDIAF1H1;
1840/** Pointer to a media operation function table for hihalf+hihalf -> full. */
1841typedef IEMOPMEDIAF1H1 const *PCIEMOPMEDIAF1H1;
1842
1843
1844/** @} */
1845
1846
1847/** @name C instruction implementations for anything slightly complicated.
1848 * @{ */
1849
1850/**
1851 * For typedef'ing or declaring a C instruction implementation function taking
1852 * no extra arguments.
1853 *
1854 * @param a_Name The name of the type.
1855 */
1856# define IEM_CIMPL_DECL_TYPE_0(a_Name) \
1857 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr))
1858/**
1859 * For defining a C instruction implementation function taking no extra
1860 * arguments.
1861 *
1862 * @param a_Name The name of the function
1863 */
1864# define IEM_CIMPL_DEF_0(a_Name) \
1865 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr))
1866/**
1867 * For calling a C instruction implementation function taking no extra
1868 * arguments.
1869 *
1870 * This special call macro adds default arguments to the call and allow us to
1871 * change these later.
1872 *
1873 * @param a_fn The name of the function.
1874 */
1875# define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu, cbInstr)
1876
1877/**
1878 * For typedef'ing or declaring a C instruction implementation function taking
1879 * one extra argument.
1880 *
1881 * @param a_Name The name of the type.
1882 * @param a_Type0 The argument type.
1883 * @param a_Arg0 The argument name.
1884 */
1885# define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
1886 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
1887/**
1888 * For defining a C instruction implementation function taking one extra
1889 * argument.
1890 *
1891 * @param a_Name The name of the function
1892 * @param a_Type0 The argument type.
1893 * @param a_Arg0 The argument name.
1894 */
1895# define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
1896 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0))
1897/**
1898 * For calling a C instruction implementation function taking one extra
1899 * argument.
1900 *
1901 * This special call macro adds default arguments to the call and allow us to
1902 * change these later.
1903 *
1904 * @param a_fn The name of the function.
1905 * @param a0 The name of the 1st argument.
1906 */
1907# define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu, cbInstr, (a0))
1908
1909/**
1910 * For typedef'ing or declaring a C instruction implementation function taking
1911 * two extra arguments.
1912 *
1913 * @param a_Name The name of the type.
1914 * @param a_Type0 The type of the 1st argument
1915 * @param a_Arg0 The name of the 1st argument.
1916 * @param a_Type1 The type of the 2nd argument.
1917 * @param a_Arg1 The name of the 2nd argument.
1918 */
1919# define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
1920 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
1921/**
1922 * For defining a C instruction implementation function taking two extra
1923 * arguments.
1924 *
1925 * @param a_Name The name of the function.
1926 * @param a_Type0 The type of the 1st argument
1927 * @param a_Arg0 The name of the 1st argument.
1928 * @param a_Type1 The type of the 2nd argument.
1929 * @param a_Arg1 The name of the 2nd argument.
1930 */
1931# define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
1932 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1))
1933/**
1934 * For calling a C instruction implementation function taking two extra
1935 * arguments.
1936 *
1937 * This special call macro adds default arguments to the call and allow us to
1938 * change these later.
1939 *
1940 * @param a_fn The name of the function.
1941 * @param a0 The name of the 1st argument.
1942 * @param a1 The name of the 2nd argument.
1943 */
1944# define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu, cbInstr, (a0), (a1))
1945
1946/**
1947 * For typedef'ing or declaring a C instruction implementation function taking
1948 * three extra arguments.
1949 *
1950 * @param a_Name The name of the type.
1951 * @param a_Type0 The type of the 1st argument
1952 * @param a_Arg0 The name of the 1st argument.
1953 * @param a_Type1 The type of the 2nd argument.
1954 * @param a_Arg1 The name of the 2nd argument.
1955 * @param a_Type2 The type of the 3rd argument.
1956 * @param a_Arg2 The name of the 3rd argument.
1957 */
1958# define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
1959 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
1960/**
1961 * For defining a C instruction implementation function taking three extra
1962 * arguments.
1963 *
1964 * @param a_Name The name of the function.
1965 * @param a_Type0 The type of the 1st argument
1966 * @param a_Arg0 The name of the 1st argument.
1967 * @param a_Type1 The type of the 2nd argument.
1968 * @param a_Arg1 The name of the 2nd argument.
1969 * @param a_Type2 The type of the 3rd argument.
1970 * @param a_Arg2 The name of the 3rd argument.
1971 */
1972# define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
1973 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
1974/**
1975 * For calling a C instruction implementation function taking three extra
1976 * arguments.
1977 *
1978 * This special call macro adds default arguments to the call and allow us to
1979 * change these later.
1980 *
1981 * @param a_fn The name of the function.
1982 * @param a0 The name of the 1st argument.
1983 * @param a1 The name of the 2nd argument.
1984 * @param a2 The name of the 3rd argument.
1985 */
1986# define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu, cbInstr, (a0), (a1), (a2))
1987
1988
1989/**
1990 * For typedef'ing or declaring a C instruction implementation function taking
1991 * four extra arguments.
1992 *
1993 * @param a_Name The name of the type.
1994 * @param a_Type0 The type of the 1st argument
1995 * @param a_Arg0 The name of the 1st argument.
1996 * @param a_Type1 The type of the 2nd argument.
1997 * @param a_Arg1 The name of the 2nd argument.
1998 * @param a_Type2 The type of the 3rd argument.
1999 * @param a_Arg2 The name of the 3rd argument.
2000 * @param a_Type3 The type of the 4th argument.
2001 * @param a_Arg3 The name of the 4th argument.
2002 */
2003# define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
2004 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
2005/**
2006 * For defining a C instruction implementation function taking four extra
2007 * arguments.
2008 *
2009 * @param a_Name The name of the function.
2010 * @param a_Type0 The type of the 1st argument
2011 * @param a_Arg0 The name of the 1st argument.
2012 * @param a_Type1 The type of the 2nd argument.
2013 * @param a_Arg1 The name of the 2nd argument.
2014 * @param a_Type2 The type of the 3rd argument.
2015 * @param a_Arg2 The name of the 3rd argument.
2016 * @param a_Type3 The type of the 4th argument.
2017 * @param a_Arg3 The name of the 4th argument.
2018 */
2019# define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
2020 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, a_Type0 a_Arg0, a_Type1 a_Arg1, \
2021 a_Type2 a_Arg2, a_Type3 a_Arg3))
2022/**
2023 * For calling a C instruction implementation function taking four extra
2024 * arguments.
2025 *
2026 * This special call macro adds default arguments to the call and allow us to
2027 * change these later.
2028 *
2029 * @param a_fn The name of the function.
2030 * @param a0 The name of the 1st argument.
2031 * @param a1 The name of the 2nd argument.
2032 * @param a2 The name of the 3rd argument.
2033 * @param a3 The name of the 4th argument.
2034 */
2035# define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3))
2036
2037
2038/**
2039 * For typedef'ing or declaring a C instruction implementation function taking
2040 * five extra arguments.
2041 *
2042 * @param a_Name The name of the type.
2043 * @param a_Type0 The type of the 1st argument
2044 * @param a_Arg0 The name of the 1st argument.
2045 * @param a_Type1 The type of the 2nd argument.
2046 * @param a_Arg1 The name of the 2nd argument.
2047 * @param a_Type2 The type of the 3rd argument.
2048 * @param a_Arg2 The name of the 3rd argument.
2049 * @param a_Type3 The type of the 4th argument.
2050 * @param a_Arg3 The name of the 4th argument.
2051 * @param a_Type4 The type of the 5th argument.
2052 * @param a_Arg4 The name of the 5th argument.
2053 */
2054# define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
2055 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, \
2056 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
2057 a_Type3 a_Arg3, a_Type4 a_Arg4))
2058/**
2059 * For defining a C instruction implementation function taking five extra
2060 * arguments.
2061 *
2062 * @param a_Name The name of the function.
2063 * @param a_Type0 The type of the 1st argument
2064 * @param a_Arg0 The name of the 1st argument.
2065 * @param a_Type1 The type of the 2nd argument.
2066 * @param a_Arg1 The name of the 2nd argument.
2067 * @param a_Type2 The type of the 3rd argument.
2068 * @param a_Arg2 The name of the 3rd argument.
2069 * @param a_Type3 The type of the 4th argument.
2070 * @param a_Arg3 The name of the 4th argument.
2071 * @param a_Type4 The type of the 5th argument.
2072 * @param a_Arg4 The name of the 5th argument.
2073 */
2074# define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
2075 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPU pVCpu, uint8_t cbInstr, \
2076 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
2077 a_Type3 a_Arg3, a_Type4 a_Arg4))
2078/**
2079 * For calling a C instruction implementation function taking five extra
2080 * arguments.
2081 *
2082 * This special call macro adds default arguments to the call and allow us to
2083 * change these later.
2084 *
2085 * @param a_fn The name of the function.
2086 * @param a0 The name of the 1st argument.
2087 * @param a1 The name of the 2nd argument.
2088 * @param a2 The name of the 3rd argument.
2089 * @param a3 The name of the 4th argument.
2090 * @param a4 The name of the 5th argument.
2091 */
2092# define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu, cbInstr, (a0), (a1), (a2), (a3), (a4))
2093
2094/** @} */
2095
2096
2097/** @} */
2098
2099RT_C_DECLS_END
2100
2101#endif
2102
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette