VirtualBox

Changeset 101484 in vbox for trunk/src/VBox/VMM/include


Ignore:
Timestamp:
Oct 18, 2023 1:32:17 AM (16 months ago)
Author:
vboxsync
Message:

VMM/IEM: Basic register allocator sketches that incorporates simple skipping of guest register value loads. Sketched out varable and argument managmenet. Start telling GDB our jitted code to help with backtraces. ++ bugref:10371

Location:
trunk/src/VBox/VMM/include
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r101448 r101484  
    547547
    548548/** @name IEM_MC_F_XXX - MC block flags/clues.
     549 * @todo Merge with IEM_CIMPL_F_XXX
    549550 * @{ */
    550551#define IEM_MC_F_ONLY_8086          RT_BIT_32(0)
     
    559560#define IEM_MC_F_64BIT              RT_BIT_32(6)
    560561#define IEM_MC_F_NOT_64BIT          RT_BIT_32(7)
     562/** @} */
     563
     564/** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls.
     565 *
     566 * These clues are mainly for the recompiler, so that it can emit correct code.
     567 *
     568 * They are processed by the python script and which also automatically
     569 * calculates flags for MC blocks based on the statements, extending the use of
     570 * these flags to describe MC block behavior to the recompiler core.  The python
     571 * script pass the flags to the IEM_MC2_END_EMIT_CALLS macro, but mainly for
     572 * error checking purposes.  The script emits the necessary fEndTb = true and
     573 * similar statements as this reduces compile time a tiny bit.
     574 *
     575 * @{ */
     576/** Flag set if direct branch, clear if absolute or indirect. */
     577#define IEM_CIMPL_F_BRANCH_DIRECT        RT_BIT_32(0)
     578/** Flag set if indirect branch, clear if direct or relative.
     579 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
     580 * as well as for return instructions (RET, IRET, RETF). */
     581#define IEM_CIMPL_F_BRANCH_INDIRECT      RT_BIT_32(1)
     582/** Flag set if relative branch, clear if absolute or indirect. */
     583#define IEM_CIMPL_F_BRANCH_RELATIVE      RT_BIT_32(2)
     584/** Flag set if conditional branch, clear if unconditional. */
     585#define IEM_CIMPL_F_BRANCH_CONDITIONAL   RT_BIT_32(3)
     586/** Flag set if it's a far branch (changes CS). */
     587#define IEM_CIMPL_F_BRANCH_FAR           RT_BIT_32(4)
     588/** Convenience: Testing any kind of branch. */
     589#define IEM_CIMPL_F_BRANCH_ANY          (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
     590
     591/** Execution flags may change (IEMCPU::fExec). */
     592#define IEM_CIMPL_F_MODE                RT_BIT_32(5)
     593/** May change significant portions of RFLAGS. */
     594#define IEM_CIMPL_F_RFLAGS              RT_BIT_32(6)
     595/** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS. */
     596#define IEM_CIMPL_F_STATUS_FLAGS        RT_BIT_32(7)
     597/** May trigger interrupt shadowing. */
     598#define IEM_CIMPL_F_INHIBIT_SHADOW      RT_BIT_32(8)
     599/** May enable interrupts, so recheck IRQ immediately afterwards executing
     600 *  the instruction. */
     601#define IEM_CIMPL_F_CHECK_IRQ_AFTER     RT_BIT_32(9)
     602/** May disable interrupts, so recheck IRQ immediately before executing the
     603 *  instruction. */
     604#define IEM_CIMPL_F_CHECK_IRQ_BEFORE    RT_BIT_32(10)
     605/** Convenience: Check for IRQ both before and after an instruction. */
     606#define IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER (IEM_CIMPL_F_CHECK_IRQ_BEFORE | IEM_CIMPL_F_CHECK_IRQ_AFTER)
     607/** May trigger a VM exit (treated like IEM_CIMPL_F_MODE atm). */
     608#define IEM_CIMPL_F_VMEXIT              RT_BIT_32(11)
     609/** May modify FPU state.
     610 * @todo Not sure if this is useful yet.  */
     611#define IEM_CIMPL_F_FPU                 RT_BIT_32(12)
     612/** REP prefixed instruction which may yield before updating PC.
     613 * @todo Not sure if this is useful, REP functions now return non-zero
     614 *       status if they don't update the PC. */
     615#define IEM_CIMPL_F_REP                 RT_BIT_32(13)
     616/** I/O instruction.
     617 * @todo Not sure if this is useful yet.  */
     618#define IEM_CIMPL_F_IO                  RT_BIT_32(14)
     619/** Force end of TB after the instruction. */
     620#define IEM_CIMPL_F_END_TB              RT_BIT_32(15)
     621/** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
     622#define IEM_CIMPL_F_XCPT \
     623    (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
    561624/** @} */
    562625
     
    779842typedef IEMTHRDEDCALLENTRY const *PCIEMTHRDEDCALLENTRY;
    780843
    781 /** Native IEM TB 'function' typedef.
    782  * This will throw/longjmp on occation.  */
     844/**
     845 * Native IEM TB 'function' typedef.
     846 *
     847 * This will throw/longjmp on occation.
     848 *
     849 * @note    AMD64 doesn't have that many non-volatile registers and does sport
     850 *          32-bit address displacments, so we don't need pCtx.
     851 *
     852 *          On ARM64 pCtx allows us to directly address the whole register
     853 *          context without requiring a separate indexing register holding the
     854 *          offset. This saves an instruction loading the offset for each guest
     855 *          CPU context access, at the cost of a non-volatile register.
     856 *          Fortunately, ARM64 has quite a lot more registers.
     857 */
     858typedef
     859#ifdef RT_ARCH_AMD64
     860int FNIEMTBNATIVE(PVMCPUCC pVCpu)
     861#else
     862int FNIEMTBNATIVE(PVMCPUCC pVCpu, PCPUMCTX pCtx)
     863#endif
    783864#if RT_CPLUSPLUS_PREREQ(201700)
    784 typedef int FNIEMTBNATIVE(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP;
    785 #else
    786 typedef int FNIEMTBNATIVE(PVMCPUCC pVCpu);
    787 #endif
     865    IEM_NOEXCEPT_MAY_LONGJMP
     866#endif
     867    ;
    788868/** Pointer to a native IEM TB entry point function.
    789869 * This will throw/longjmp on occation.  */
  • trunk/src/VBox/VMM/include/IEMMc.h

    r101387 r101484  
    196196#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg)       a_Type const a_Name = (a_Value)
    197197#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg)   a_Type const a_Name = &(a_Local)
     198/** @note IEMAllInstPython.py duplicates the expansion. */
    198199#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
    199200    uint32_t a_Name; \
     
    19721973#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3)  (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
    19731974
    1974 /** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls.
    1975  *
    1976  * These clues are mainly for the recompiler, so that it can emit correct code.
    1977  *
    1978  * They are processed by the python script and which also automatically
    1979  * calculates flags for MC blocks based on the statements, extending the use of
    1980  * these flags to describe MC block behavior to the recompiler core.  The python
    1981  * script pass the flags to the IEM_MC2_END_EMIT_CALLS macro, but mainly for
    1982  * error checking purposes.  The script emits the necessary fEndTb = true and
    1983  * similar statements as this reduces compile time a tiny bit.
    1984  *
    1985  * @{ */
    1986 /** Flag set if direct branch, clear if absolute or indirect. */
    1987 #define IEM_CIMPL_F_BRANCH_DIRECT        RT_BIT_32(0)
    1988 /** Flag set if indirect branch, clear if direct or relative.
    1989  * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
    1990  * as well as for return instructions (RET, IRET, RETF). */
    1991 #define IEM_CIMPL_F_BRANCH_INDIRECT      RT_BIT_32(1)
    1992 /** Flag set if relative branch, clear if absolute or indirect. */
    1993 #define IEM_CIMPL_F_BRANCH_RELATIVE      RT_BIT_32(2)
    1994 /** Flag set if conditional branch, clear if unconditional. */
    1995 #define IEM_CIMPL_F_BRANCH_CONDITIONAL   RT_BIT_32(3)
    1996 /** Flag set if it's a far branch (changes CS). */
    1997 #define IEM_CIMPL_F_BRANCH_FAR           RT_BIT_32(4)
    1998 /** Convenience: Testing any kind of branch. */
    1999 #define IEM_CIMPL_F_BRANCH_ANY          (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
    2000 
    2001 /** Execution flags may change (IEMCPU::fExec). */
    2002 #define IEM_CIMPL_F_MODE                RT_BIT_32(5)
    2003 /** May change significant portions of RFLAGS. */
    2004 #define IEM_CIMPL_F_RFLAGS              RT_BIT_32(6)
    2005 /** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS. */
    2006 #define IEM_CIMPL_F_STATUS_FLAGS        RT_BIT_32(7)
    2007 /** May trigger interrupt shadowing. */
    2008 #define IEM_CIMPL_F_INHIBIT_SHADOW      RT_BIT_32(8)
    2009 /** May enable interrupts, so recheck IRQ immediately afterwards executing
    2010  *  the instruction. */
    2011 #define IEM_CIMPL_F_CHECK_IRQ_AFTER     RT_BIT_32(9)
    2012 /** May disable interrupts, so recheck IRQ immediately before executing the
    2013  *  instruction. */
    2014 #define IEM_CIMPL_F_CHECK_IRQ_BEFORE    RT_BIT_32(10)
    2015 /** Convenience: Check for IRQ both before and after an instruction. */
    2016 #define IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER (IEM_CIMPL_F_CHECK_IRQ_BEFORE | IEM_CIMPL_F_CHECK_IRQ_AFTER)
    2017 /** May trigger a VM exit (treated like IEM_CIMPL_F_MODE atm). */
    2018 #define IEM_CIMPL_F_VMEXIT              RT_BIT_32(11)
    2019 /** May modify FPU state.
    2020  * @todo Not sure if this is useful yet.  */
    2021 #define IEM_CIMPL_F_FPU                 RT_BIT_32(12)
    2022 /** REP prefixed instruction which may yield before updating PC.
    2023  * @todo Not sure if this is useful, REP functions now return non-zero
    2024  *       status if they don't update the PC. */
    2025 #define IEM_CIMPL_F_REP                 RT_BIT_32(13)
    2026 /** I/O instruction.
    2027  * @todo Not sure if this is useful yet.  */
    2028 #define IEM_CIMPL_F_IO                  RT_BIT_32(14)
    2029 /** Force end of TB after the instruction. */
    2030 #define IEM_CIMPL_F_END_TB              RT_BIT_32(15)
    2031 /** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
    2032 #define IEM_CIMPL_F_XCPT \
    2033     (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
    2034 /** @} */
    20351975
    20361976/** @def IEM_MC_CALL_CIMPL_HLP_RET
     
    24432383
    24442384/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
    2445  *  IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
     2385 *  IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ...
     2386 * @note IEMAllInstPython.py duplicates the expansion.  */
    24462387#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
    24472388    IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r101387 r101484  
    4141 *
    4242 * @{  */
    43 /** The size of the area for stack variables and spills and stuff. */
    44 #define IEMNATIVE_FRAME_VAR_SIZE            0x40
     43/** The size of the area for stack variables and spills and stuff.
     44 * @note This limit is duplicated in the python script(s). */
     45#define IEMNATIVE_FRAME_VAR_SIZE            0xc0
    4546#ifdef RT_ARCH_AMD64
    4647/** Number of stack arguments slots for calls made from the frame. */
     
    113114 * @{ */
    114115/** @def IEMNATIVE_REG_FIXED_PVMCPU
    115  * The register number hold in pVCpu pointer.  */
     116 * The number of the register holding the pVCpu pointer.  */
     117/** @def IEMNATIVE_REG_FIXED_PCPUMCTX
     118 * The number of the register holding the &pVCpu->cpum.GstCtx pointer.
     119 * @note This not available on AMD64, only ARM64. */
    116120/** @def IEMNATIVE_REG_FIXED_TMP0
    117121 * Dedicated temporary register.
    118122 * @todo replace this by a register allocator and content tracker.  */
     123/** @def IEMNATIVE_REG_FIXED_MASK
     124 * Mask GPRs with fixes assignments, either by us or dictated by the CPU/OS
     125 * architecture. */
    119126#ifdef RT_ARCH_AMD64
    120127# define IEMNATIVE_REG_FIXED_PVMCPU         X86_GREG_xBX
    121128# define IEMNATIVE_REG_FIXED_TMP0           X86_GREG_x11
    122 
    123 #elif defined(RT_ARCH_ARM64)
     129# define IEMNATIVE_REG_FIXED_MASK          (  RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU) \
     130                                            | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0) \
     131                                            | RT_BIT_32(X86_GREG_xSP) \
     132                                            | RT_BIT_32(X86_GREG_xBP) )
     133
     134#elif defined(RT_ARCH_ARM64) || defined(DOXYGEN_RUNNING)
    124135# define IEMNATIVE_REG_FIXED_PVMCPU         ARMV8_A64_REG_X28
     136# define IEMNATIVE_REG_FIXED_PCPUMCTX       ARMV8_A64_REG_X27
    125137# define IEMNATIVE_REG_FIXED_TMP0           ARMV8_A64_REG_X15
     138# define IEMNATIVE_REG_FIXED_MASK           (  RT_BIT_32(ARMV8_A64_REG_SP) \
     139                                             | RT_BIT_32(ARMV8_A64_REG_LR) \
     140                                             | RT_BIT_32(ARMV8_A64_REG_BP) \
     141                                             | RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU) \
     142                                             | RT_BIT_32(IEMNATIVE_REG_FIXED_PCPUMCTX) \
     143                                             | RT_BIT_32(ARMV8_A64_REG_X18) \
     144                                             | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0) )
    126145
    127146#else
     
    144163/** @def IEMNATIVE_CALL_ARG3_GREG
    145164 * The general purpose register carrying argument \#3. */
     165/** @def IEMNATIVE_CALL_VOLATILE_GREG_MASK
     166 * Mask of registers the callee will not save and may trash. */
    146167#ifdef RT_ARCH_AMD64
    147168# define IEMNATIVE_CALL_RET_GREG             X86_GREG_xAX
     
    153174#  define IEMNATIVE_CALL_ARG2_GREG          X86_GREG_x8
    154175#  define IEMNATIVE_CALL_ARG3_GREG          X86_GREG_x9
     176#  define IEMNATIVE_CALL_VOLATILE_GREG_MASK (  RT_BIT_32(X86_GREG_xAX) \
     177                                             | RT_BIT_32(X86_GREG_xCX) \
     178                                             | RT_BIT_32(X86_GREG_xDX) \
     179                                             | RT_BIT_32(X86_GREG_x8) \
     180                                             | RT_BIT_32(X86_GREG_x9) \
     181                                             | RT_BIT_32(X86_GREG_x10) \
     182                                             | RT_BIT_32(X86_GREG_x11) )
    155183# else
    156184#  define IEMNATIVE_CALL_ARG_GREG_COUNT     6
     
    161189#  define IEMNATIVE_CALL_ARG4_GREG          X86_GREG_x8
    162190#  define IEMNATIVE_CALL_ARG5_GREG          X86_GREG_x9
     191#  define IEMNATIVE_CALL_VOLATILE_GREG_MASK (  RT_BIT_32(X86_GREG_xAX) \
     192                                             | RT_BIT_32(X86_GREG_xCX) \
     193                                             | RT_BIT_32(X86_GREG_xDX) \
     194                                             | RT_BIT_32(X86_GREG_xDI) \
     195                                             | RT_BIT_32(X86_GREG_xSI) \
     196                                             | RT_BIT_32(X86_GREG_x8) \
     197                                             | RT_BIT_32(X86_GREG_x9) \
     198                                             | RT_BIT_32(X86_GREG_x10) \
     199                                             | RT_BIT_32(X86_GREG_x11) )
    163200# endif
    164201
     
    174211# define IEMNATIVE_CALL_ARG6_GREG           ARMV8_A64_REG_X6
    175212# define IEMNATIVE_CALL_ARG7_GREG           ARMV8_A64_REG_X7
     213# define IEMNATIVE_CALL_VOLATILE_GREG_MASK  (  RT_BIT_32(ARMV8_A64_REG_X0) \
     214                                             | RT_BIT_32(ARMV8_A64_REG_X1) \
     215                                             | RT_BIT_32(ARMV8_A64_REG_X2) \
     216                                             | RT_BIT_32(ARMV8_A64_REG_X3) \
     217                                             | RT_BIT_32(ARMV8_A64_REG_X4) \
     218                                             | RT_BIT_32(ARMV8_A64_REG_X5) \
     219                                             | RT_BIT_32(ARMV8_A64_REG_X6) \
     220                                             | RT_BIT_32(ARMV8_A64_REG_X7) \
     221                                             | RT_BIT_32(ARMV8_A64_REG_X8) \
     222                                             | RT_BIT_32(ARMV8_A64_REG_X9) \
     223                                             | RT_BIT_32(ARMV8_A64_REG_X10) \
     224                                             | RT_BIT_32(ARMV8_A64_REG_X11) \
     225                                             | RT_BIT_32(ARMV8_A64_REG_X12) \
     226                                             | RT_BIT_32(ARMV8_A64_REG_X13) \
     227                                             | RT_BIT_32(ARMV8_A64_REG_X14) \
     228                                             | RT_BIT_32(ARMV8_A64_REG_X15) \
     229                                             | RT_BIT_32(ARMV8_A64_REG_X16) \
     230                                             | RT_BIT_32(ARMV8_A64_REG_X17) )
    176231
    177232#endif
    178233
    179234/** @} */
     235
     236
     237/** @def IEMNATIVE_HST_GREG_COUNT
     238 * Number of host general purpose registers we tracker. */
     239/** @def IEMNATIVE_HST_GREG_MASK
     240 * Mask corresponding to IEMNATIVE_HST_GREG_COUNT that can be applied to
     241 * inverted register masks and such to get down to a correct set of regs. */
     242#ifdef RT_ARCH_AMD64
     243# define IEMNATIVE_HST_GREG_COUNT           16
     244# define IEMNATIVE_HST_GREG_MASK            UINT32_C(0xffff)
     245
     246#elif defined(RT_ARCH_ARM64)
     247# define IEMNATIVE_HST_GREG_COUNT           32
     248# define IEMNATIVE_HST_GREG_MASK            UINT32_MAX
     249#else
     250# error "Port me!"
     251#endif
     252
    180253
    181254/** Native code generator label types. */
     
    232305typedef IEMNATIVEFIXUP *PIEMNATIVEFIXUP;
    233306
     307
     308/**
     309 * Guest registers that can be shadowed in GPRs.
     310 */
     311typedef enum IEMNATIVEGSTREG : uint8_t
     312{
     313    kIemNativeGstReg_GprFirst      = 0,
     314    kIemNativeGstReg_GprLast       = 15,
     315    kIemNativeGstReg_Pc,
     316    kIemNativeGstReg_Rflags,
     317    /* gap: 18..23 */
     318    kIemNativeGstReg_SegSelFirst   = 24,
     319    kIemNativeGstReg_SegSelLast    = 29,
     320    kIemNativeGstReg_SegBaseFirst  = 30,
     321    kIemNativeGstReg_SegBaseLast   = 35,
     322    kIemNativeGstReg_SegLimitFirst = 36,
     323    kIemNativeGstReg_SegLimitLast  = 41,
     324    kIemNativeGstReg_End
     325} IEMNATIVEGSTREG;
     326
     327/**
     328 * Guest registers (classes) that can be referenced.
     329 */
     330typedef enum IEMNATIVEGSTREGREF : uint8_t
     331{
     332    kIemNativeGstRegRef_Invalid = 0,
     333    kIemNativeGstRegRef_Gpr,
     334    kIemNativeGstRegRef_GprHighByte,    /**< AH, CH, DH, BH*/
     335    kIemNativeGstRegRef_EFlags,
     336    kIemNativeGstRegRef_MxCsr,
     337    kIemNativeGstRegRef_FpuReg,
     338    kIemNativeGstRegRef_MReg,
     339    kIemNativeGstRegRef_XReg,
     340    kIemNativeGstRegRef_YReg,
     341    kIemNativeGstRegRef_End
     342} IEMNATIVEGSTREGREF;
     343
     344
     345/** Variable kinds. */
     346typedef enum IEMNATIVEVARKIND : uint8_t
     347{
     348    /** Customary invalid zero value. */
     349    kIemNativeVarKind_Invalid = 0,
     350    /** This is either in a register or on the stack. */
     351    kIemNativeVarKind_Stack,
     352    /** Immediate value - loaded into register when needed, or can live on the
     353     *  stack if referenced (in theory). */
     354    kIemNativeVarKind_Immediate,
     355    /** Variable reference - loaded into register when needed, never stack. */
     356    kIemNativeVarKind_VarRef,
     357    /** Guest register reference - loaded into register when needed, never stack. */
     358    kIemNativeVarKind_GstRegRef,
     359    /** End of valid values. */
     360    kIemNativeVarKind_End
     361} IEMNATIVEVARKIND;
     362
     363
     364/** Variable or argument. */
     365typedef struct IEMNATIVEVAR
     366{
     367    /** The kind of variable. */
     368    IEMNATIVEVARKIND    enmKind;
     369    /** The variable size in bytes. */
     370    uint8_t             cbVar;
     371    /** The first stack slot (uint64_t), except for immediate and references
     372     *  where it usually is UINT8_MAX. */
     373    uint8_t             idxStackSlot;
     374    /** The host register allocated for the variable, UINT8_MAX if not. */
     375    uint8_t             idxReg;
     376    /** The argument number if argument, UINT8_MAX if regular variable. */
     377    uint8_t             uArgNo;
     378    /** If referenced, the index of the variable referencing this one, otherwise
     379     *  UINT8_MAX.  A referenced variable must only be placed on the stack and
     380     *  must be either kIemNativeVarKind_Stack or kIemNativeVarKind_Immediate. */
     381    uint8_t             idxReferrerVar;
     382    /** Guest register being shadowed here, kIemNativeGstReg_End(/UINT8_MAX) if not. */
     383    IEMNATIVEGSTREG     enmGstReg;
     384    uint8_t             bAlign;
     385
     386    union
     387    {
     388        /** kIemNativeVarKind_Immediate: The immediate value. */
     389        uint64_t            uValue;
     390        /** kIemNativeVarKind_VarRef: The index of the variable being referenced. */
     391        uint8_t             idxRefVar;
     392        /** kIemNativeVarKind_GstRegRef: The guest register being referrenced. */
     393        struct
     394        {
     395            /** The class of register. */
     396            IEMNATIVEGSTREGREF  enmClass;
     397            /** Index within the class. */
     398            uint8_t             idx;
     399        } GstRegRef;
     400    } u;
     401} IEMNATIVEVAR;
     402
     403/** What is being kept in a host register. */
     404typedef enum IEMNATIVEWHAT : uint8_t
     405{
     406    /** The traditional invalid zero value. */
     407    kIemNativeWhat_Invalid = 0,
     408    /** Mapping a variable (IEMNATIVEHSTREG::idxVar). */
     409    kIemNativeWhat_Var,
     410    /** Temporary register, this is typically freed when a MC completes. */
     411    kIemNativeWhat_Tmp,
     412    /** Call argument w/o a variable mapping.  This is free (via
     413     * IEMNATIVE_CALL_VOLATILE_GREG_MASK) after the call is emitted. */
     414    kIemNativeWhat_Arg,
     415    /** Return status code.
     416     * @todo not sure if we need this... */
     417    kIemNativeWhat_rc,
     418    /** The fixed pVCpu (PVMCPUCC) register.
     419     * @todo consider offsetting this on amd64 to use negative offsets to access
     420     *       more members using 8-byte disp. */
     421    kIemNativeWhat_pVCpuFixed,
     422    /** The fixed pCtx (PCPUMCTX) register.
     423     * @todo consider offsetting this on amd64 to use negative offsets to access
     424     *       more members using 8-byte disp. */
     425    kIemNativeWhat_pCtxFixed,
     426    /** Fixed temporary register. */
     427    kIemNativeWhat_FixedTmp,
     428    /** Register reserved by the CPU or OS architecture. */
     429    kIemNativeWhat_FixedReserved,
     430    /** End of valid values. */
     431    kIemNativeWhat_End
     432} IEMNATIVEWHAT;
     433
     434/**
     435 * Host general register entry.
     436 *
     437 * The actual allocation status is kept in IEMRECOMPILERSTATE::bmHstRegs.
     438 *
     439 * @todo Track immediate values in host registers similarlly to how we track the
     440 *       guest register shadow copies. For it to be real helpful, though,
     441 *       we probably need to know which will be reused and put them into
     442 *       non-volatile registers, otherwise it's going to be more or less
     443 *       restricted to an instruction or two.
     444 */
     445typedef struct IEMNATIVEHSTREG
     446{
     447    /** Set of guest registers this one shadows.
     448     *
     449     * Using a bitmap here so we can designate the same host register as a copy
     450     * for more than one guest register.  This is expected to be useful in
     451     * situations where one value is copied to several registers in a sequence.
     452     * If the mapping is 1:1, then we'd have to pick which side of a 'MOV SRC,DST'
     453     * sequence we'd want to let this register follow to be a copy of and there
     454     * will always be places where we'd be picking the wrong one.
     455     */
     456    uint64_t        fGstRegShadows;
     457    /** What is being kept in this register. */
     458    IEMNATIVEWHAT   enmWhat;
     459    /** Variable index if holding a variable, otherwise UINT8_MAX. */
     460    uint8_t         idxVar;
     461    /** Alignment padding. */
     462    uint8_t         abAlign[6];
     463} IEMNATIVEHSTREG;
     464
     465
    234466/**
    235467 * Native recompiler state.
     
    260492    /** The translation block being recompiled. */
    261493    PCIEMTB                     pTbOrg;
     494
     495    /** Allocation bitmap fro aHstRegs. */
     496    uint32_t                    bmHstRegs;
     497
     498    /** Bitmap marking which host register contains guest register shadow copies.
     499     * This is used during register allocation to try preserve copies.  */
     500    uint32_t                    bmHstRegsWithGstShadow;
     501    /** Bitmap marking valid entries in aidxGstRegShadows. */
     502    uint64_t                    bmGstRegShadows;
     503
     504    /** Allocation bitmap for aVars. */
     505    uint32_t                    bmVars;
     506    uint32_t                    u32Align;
     507    union
     508    {
     509        /** Index of variable arguments, UINT8_MAX if not valid. */
     510        uint8_t                 aidxArgVars[8];
     511        /** For more efficient resetting. */
     512        uint64_t                u64ArgVars;
     513    };
     514
     515    /** Host register allocation tracking. */
     516    IEMNATIVEHSTREG             aHstRegs[IEMNATIVE_HST_GREG_COUNT];
     517    /** Maps a guest register to a host GPR (index by IEMNATIVEGSTREG).
     518     * Entries are only valid if the corresponding bit in bmGstRegShadows is set.
     519     * (A shadow copy of a guest register can only be held in a one host register,
     520     * there are no duplicate copies or ambiguities like that). */
     521    uint8_t                     aidxGstRegShadows[kIemNativeGstReg_End];
     522    /** Variables and arguments. */
     523    IEMNATIVEVAR                aVars[16];
    262524} IEMRECOMPILERSTATE;
    263525/** Pointer to a native recompiler state. */
     
    293555DECLHIDDEN(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    294556                                                        uint32_t cInstrReq) RT_NOEXCEPT;
     557
     558DECLHIDDEN(uint8_t)         iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
     559                                                 bool fPreferVolatile = true) RT_NOEXCEPT;
     560DECLHIDDEN(uint8_t)         iemNativeRegAllocTmpForGuest(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
     561                                                         IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT;
     562DECLHIDDEN(uint8_t)         iemNativeRegAllocVar(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t idxVar) RT_NOEXCEPT;
     563DECLHIDDEN(uint32_t)        iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs) RT_NOEXCEPT;
     564DECLHIDDEN(uint8_t)         iemNativeRegAssignRc(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
     565DECLHIDDEN(void)            iemNativeRegFree(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
     566DECLHIDDEN(void)            iemNativeRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
     567DECLHIDDEN(void)            iemNativeRegFreeAndFlushMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegMask) RT_NOEXCEPT;
    295568
    296569DECLHIDDEN(uint32_t)        iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     
    468741
    469742
    470 /**
    471  * Emits a 32-bit GPR load of a VCpu value.
    472  */
    473 DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
    474 {
    475 #ifdef RT_ARCH_AMD64
    476     uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
    477     AssertReturn(pbCodeBuf, UINT32_MAX);
    478 
    479     /* mov reg32, mem32 */
    480     if (iGpr >= 8)
    481         pbCodeBuf[off++] = X86_OP_REX_R;
    482     pbCodeBuf[off++] = 0x8b;
     743#ifdef RT_ARCH_AMD64
     744/**
     745 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends.
     746 */
     747DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByVCpuDisp(uint8_t *pbCodeBuf, uint32_t off, uint8_t iGprReg, uint32_t offVCpu)
     748{
    483749    if (offVCpu < 128)
    484750    {
    485         pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGpr & 7, IEMNATIVE_REG_FIXED_PVMCPU);
    486         pbCodeBuf[off++] = (uint8_t)offVCpu;
    487     }
    488     else
    489     {
    490         pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, iGpr & 7, IEMNATIVE_REG_FIXED_PVMCPU);
    491         pbCodeBuf[off++] = RT_BYTE1(offVCpu);
    492         pbCodeBuf[off++] = RT_BYTE2(offVCpu);
    493         pbCodeBuf[off++] = RT_BYTE3(offVCpu);
    494         pbCodeBuf[off++] = RT_BYTE4(offVCpu);
    495     }
    496 
    497 #elif RT_ARCH_ARM64
     751        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, IEMNATIVE_REG_FIXED_PVMCPU);
     752        pbCodeBuf[off++] = (uint8_t)(int8_t)offVCpu;
     753    }
     754    else
     755    {
     756        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, iGprReg & 7, IEMNATIVE_REG_FIXED_PVMCPU);
     757        pbCodeBuf[off++] = RT_BYTE1((uint32_t)offVCpu);
     758        pbCodeBuf[off++] = RT_BYTE2((uint32_t)offVCpu);
     759        pbCodeBuf[off++] = RT_BYTE3((uint32_t)offVCpu);
     760        pbCodeBuf[off++] = RT_BYTE4((uint32_t)offVCpu);
     761    }
     762    return off;
     763}
     764#elif RT_ARCH_ARM64
     765/**
     766 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends.
     767 */
     768DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByVCpuLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg,
     769                                                       uint32_t offVCpu, ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData)
     770{
    498771    /*
    499772     * There are a couple of ldr variants that takes an immediate offset, so
     
    501774     * help with the addressing.
    502775     */
    503     if (offVCpu < _16K)
     776    if (offVCpu < _4K * cbData && !(offVCpu & (cbData - 1)))
    504777    {
    505778        /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
    506779        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    507780        AssertReturn(pu32CodeBuf, UINT32_MAX);
    508         pu32CodeBuf[off++] = UINT32_C(0xb9400000) | (offVCpu << 10) | (IEMNATIVE_REG_FIXED_PVMCPU << 5) | iGpr;
     781        pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGrp, IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
     782    }
     783    else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)(_4K * cbData) && !(offVCpu & (cbData - 1)))
     784    {
     785        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     786        AssertReturn(pu32CodeBuf, UINT32_MAX);
     787        pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGrp, IEMNATIVE_REG_FIXED_PCPUMCTX,
     788                                                      (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
    509789    }
    510790    else
    511791    {
    512792        /* The offset is too large, so we must load it into a register and use
    513            ldr Wt, [<Xn|SP>, (<Wm>|<Xm>). */
     793           ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */
    514794        /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */
    515795        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, offVCpu);
     796
    516797        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    517798        AssertReturn(pu32CodeBuf, UINT32_MAX);
    518         pu32CodeBuf[off++] = UINT32_C(0xb8600800) | ((uint32_t)IEMNATIVE_REG_FIXED_TMP0 << 16)
    519                            | ((uint32_t)IEMNATIVE_REG_FIXED_PVMCPU << 5) | iGpr;
    520     }
     799        pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGpr, IEMNATIVE_REG_FIXED_PVMCPU, IEMNATIVE_REG_FIXED_TMP);
     800    }
     801    return off;
     802}
     803#endif
     804
     805
     806/**
     807 * Emits a 64-bit GPR load of a VCpu value.
     808 */
     809DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
     810{
     811#ifdef RT_ARCH_AMD64
     812    /* mov reg64, mem64 */
     813    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     814    AssertReturn(pbCodeBuf, UINT32_MAX);
     815    if (iGpr < 8)
     816        pbCodeBuf[off++] = X86_OP_REX_W;
     817    else
     818        pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
     819    pbCodeBuf[off++] = 0x8b;
     820    off = iemNativeEmitGprByVCpuDisp(pbCodeBuf,off,iGpr, offVCpu);
     821
     822#elif RT_ARCH_ARM64
     823    off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Dword, sizeof(uint64_t));
     824
     825#else
     826# error "port me"
     827#endif
     828    return off;
     829}
     830
     831
     832/**
     833 * Emits a 32-bit GPR load of a VCpu value.
     834 * @note Bits 32 thru 63 in the GPR will be zero after the operation.
     835 */
     836DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
     837{
     838#ifdef RT_ARCH_AMD64
     839    /* mov reg32, mem32 */
     840    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     841    AssertReturn(pbCodeBuf, UINT32_MAX);
     842    if (iGpr >= 8)
     843        pbCodeBuf[off++] = X86_OP_REX_R;
     844    pbCodeBuf[off++] = 0x8b;
     845    off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
     846
     847#elif RT_ARCH_ARM64
     848    off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t));
     849
     850#else
     851# error "port me"
     852#endif
     853    return off;
     854}
     855
     856
     857/**
     858 * Emits a 16-bit GPR load of a VCpu value.
     859 * @note Bits 16 thru 63 in the GPR will be zero after the operation.
     860 */
     861DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
     862{
     863#ifdef RT_ARCH_AMD64
     864    /* movzx reg32, mem16 */
     865    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
     866    AssertReturn(pbCodeBuf, UINT32_MAX);
     867    if (iGpr >= 8)
     868        pbCodeBuf[off++] = X86_OP_REX_R;
     869    pbCodeBuf[off++] = 0x0f;
     870    pbCodeBuf[off++] = 0xb7;
     871    off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
     872
     873#elif RT_ARCH_ARM64
     874    off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Half, sizeof(uint16_t));
     875
     876#else
     877# error "port me"
     878#endif
     879    return off;
     880}
     881
     882
     883/**
     884 * Emits a 8-bit GPR load of a VCpu value.
     885 * @note Bits 8 thru 63 in the GPR will be zero after the operation.
     886 */
     887DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
     888{
     889#ifdef RT_ARCH_AMD64
     890    /* movzx reg32, mem8 */
     891    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
     892    AssertReturn(pbCodeBuf, UINT32_MAX);
     893    if (iGpr >= 8)
     894        pbCodeBuf[off++] = X86_OP_REX_R;
     895    pbCodeBuf[off++] = 0x0f;
     896    pbCodeBuf[off++] = 0xb6;
     897    off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
     898
     899#elif RT_ARCH_ARM64
     900    off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Byte, sizeof(uint8_t));
     901
     902#else
     903# error "port me"
     904#endif
     905    return off;
     906}
     907
     908
     909/**
     910 * Emits a store of a GPR value to a 64-bit VCpu field.
     911 */
     912DECLINLINE(uint32_t) iemNativeEmitStoreGprToVCpuU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
     913{
     914#ifdef RT_ARCH_AMD64
     915    /* mov mem64, reg64 */
     916    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     917    AssertReturn(pbCodeBuf, UINT32_MAX);
     918    if (iGpr < 8)
     919        pbCodeBuf[off++] = X86_OP_REX_W;
     920    else
     921        pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
     922    pbCodeBuf[off++] = 0x89;
     923    off = iemNativeEmitGprByVCpuDisp(pbCodeBuf,off,iGpr, offVCpu);
     924
     925#elif RT_ARCH_ARM64
     926    off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Dword, sizeof(uint64_t));
     927
     928#else
     929# error "port me"
     930#endif
     931    return off;
     932}
     933
     934
     935/**
     936 * Emits a store of a GPR value to a 32-bit VCpu field.
     937 */
     938DECLINLINE(uint32_t) iemNativeEmitStoreGprFromVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
     939{
     940#ifdef RT_ARCH_AMD64
     941    /* mov mem32, reg32 */
     942    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     943    AssertReturn(pbCodeBuf, UINT32_MAX);
     944    if (iGpr >= 8)
     945        pbCodeBuf[off++] = X86_OP_REX_R;
     946    pbCodeBuf[off++] = 0x89;
     947    off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
     948
     949#elif RT_ARCH_ARM64
     950    off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Word, sizeof(uint32_t));
     951
     952#else
     953# error "port me"
     954#endif
     955    return off;
     956}
     957
     958
     959/**
     960 * Emits a store of a GPR value to a 16-bit VCpu field.
     961 */
     962DECLINLINE(uint32_t) iemNativeEmitStoreGprFromVCpuU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
     963{
     964#ifdef RT_ARCH_AMD64
     965    /* mov mem16, reg16 */
     966    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
     967    AssertReturn(pbCodeBuf, UINT32_MAX);
     968    pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
     969    if (iGpr >= 8)
     970        pbCodeBuf[off++] = X86_OP_REX_R;
     971    pbCodeBuf[off++] = 0x89;
     972    off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
     973
     974#elif RT_ARCH_ARM64
     975    off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Half, sizeof(uint16_t));
     976
     977#else
     978# error "port me"
     979#endif
     980    return off;
     981}
     982
     983
     984/**
     985 * Emits a store of a GPR value to a 8-bit VCpu field.
     986 */
     987DECLINLINE(uint32_t) iemNativeEmitStoreGprFromVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
     988{
     989#ifdef RT_ARCH_AMD64
     990    /* mov mem8, reg8 */
     991    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     992    AssertReturn(pbCodeBuf, UINT32_MAX);
     993    if (iGpr >= 8)
     994        pbCodeBuf[off++] = X86_OP_REX_R;
     995    pbCodeBuf[off++] = 0x88;
     996    off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
     997
     998#elif RT_ARCH_ARM64
     999    off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Byte, sizeof(uint8_t));
    5211000
    5221001#else
     
    7391218    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
    7401219    AssertReturn(pbCodeBuf, UINT32_MAX);
    741     if (iGprDst < 7)
     1220    if (iGprDst < 8)
    7421221        pbCodeBuf[off++] = X86_OP_REX_W;
    7431222    else
     
    7621241#endif
    7631242
     1243
     1244/**
     1245 * Emits a 32-bit GPR additions with a 8-bit signed immediate.
     1246 * @note Bits 32 thru 63 in the GPR will be zero after the operation.
     1247 */
     1248DECLINLINE(uint32_t ) iemNativeEmitAddGpr32Imm8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int8_t iImm8)
     1249{
     1250#if defined(RT_ARCH_AMD64)
     1251    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
     1252    AssertReturn(pbCodeBuf, UINT32_MAX);
     1253    if (iGprDst >= 8)
     1254        pbCodeBuf[off++] = X86_OP_REX_B;
     1255    pbCodeBuf[off++] = 0x83;
     1256    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
     1257    pbCodeBuf[off++] = (uint8_t)iImm8;
     1258
     1259#elif defined(RT_ARCH_ARM64)
     1260    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1261    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1262    if (iImm8 >= 0)
     1263        pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint8_t)iImm8, false /*f64Bit*/);
     1264    else
     1265        pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint8_t)-iImm8, false /*f64Bit*/);
     1266
     1267#else
     1268# error "Port me"
     1269#endif
     1270    return off;
     1271}
     1272
    7641273/** @} */
    7651274
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette