VirtualBox

Ignore:
Timestamp:
Nov 9, 2023 11:38:47 AM (15 months ago)
Author:
vboxsync
Message:

VMM/IEM: Split out the inlined code emitters from IEMN8veRecompiler.h and into IEMN8veRecompilerEmit.h. bugref:10371

File:
1 copied

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h

    r102011 r102022  
    11/* $Id$ */
    22/** @file
    3  * IEM - Interpreted Execution Manager - Native Recompiler Internals.
     3 * IEM - Interpreted Execution Manager - Native Recompiler Inlined Emitters.
    44 */
    55
    66/*
    7  * Copyright (C) 2011-2023 Oracle and/or its affiliates.
     7 * Copyright (C) 2023 Oracle and/or its affiliates.
    88 *
    99 * This file is part of VirtualBox base platform packages, as
     
    2626 */
    2727
    28 #ifndef VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h
    29 #define VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h
     28#ifndef VMM_INCLUDED_SRC_include_IEMN8veRecompilerEmit_h
     29#define VMM_INCLUDED_SRC_include_IEMN8veRecompilerEmit_h
    3030#ifndef RT_WITHOUT_PRAGMA_ONCE
    3131# pragma once
    3232#endif
    3333
    34 
    35 /** @defgroup grp_iem_n8ve_re   Native Recompiler Internals.
    36  * @ingroup grp_iem_int
     34#include "IEMN8veRecompiler.h"
     35
     36
     37/** @defgroup grp_iem_n8ve_re_inline    Native Recompiler Inlined Emitters
     38 * @ingroup grp_iem_n8ve_re
    3739 * @{
    3840 */
    39 
    40 /** @def IEMNATIVE_WITH_TB_DEBUG_INFO
    41  * Enables generating internal debug info for better TB disassembly dumping. */
    42 #if defined(DEBUG) || defined(DOXYGEN_RUNNING)
    43 # define IEMNATIVE_WITH_TB_DEBUG_INFO
    44 #endif
    45 
    46 
    47 /** @name Stack Frame Layout
    48  *
    49  * @{  */
    50 /** The size of the area for stack variables and spills and stuff.
    51  * @note This limit is duplicated in the python script(s).  We add 0x40 for
    52  *       alignment padding. */
    53 #define IEMNATIVE_FRAME_VAR_SIZE            (0xc0 + 0x40)
    54 /** Number of 64-bit variable slots (0x100 / 8 = 32. */
    55 #define IEMNATIVE_FRAME_VAR_SLOTS           (IEMNATIVE_FRAME_VAR_SIZE / 8)
    56 AssertCompile(IEMNATIVE_FRAME_VAR_SLOTS == 32);
    57 
    58 #ifdef RT_ARCH_AMD64
    59 /** An stack alignment adjustment (between non-volatile register pushes and
    60  *  the stack variable area, so the latter better aligned). */
    61 # define IEMNATIVE_FRAME_ALIGN_SIZE         8
    62 
    63 /** Number of stack arguments slots for calls made from the frame. */
    64 # ifdef RT_OS_WINDOWS
    65 #  define IEMNATIVE_FRAME_STACK_ARG_COUNT   4
    66 # else
    67 #  define IEMNATIVE_FRAME_STACK_ARG_COUNT   2
    68 # endif
    69 /** Number of any shadow arguments (spill area) for calls we make. */
    70 # ifdef RT_OS_WINDOWS
    71 #  define IEMNATIVE_FRAME_SHADOW_ARG_COUNT  4
    72 # else
    73 #  define IEMNATIVE_FRAME_SHADOW_ARG_COUNT  0
    74 # endif
    75 
    76 /** Frame pointer (RBP) relative offset of the last push. */
    77 # ifdef RT_OS_WINDOWS
    78 #  define IEMNATIVE_FP_OFF_LAST_PUSH        (7 * -8)
    79 # else
    80 #  define IEMNATIVE_FP_OFF_LAST_PUSH        (5 * -8)
    81 # endif
    82 /** Frame pointer (RBP) relative offset of the stack variable area (the lowest
    83  * address for it). */
    84 # define IEMNATIVE_FP_OFF_STACK_VARS        (IEMNATIVE_FP_OFF_LAST_PUSH - IEMNATIVE_FRAME_ALIGN_SIZE - IEMNATIVE_FRAME_VAR_SIZE)
    85 /** Frame pointer (RBP) relative offset of the first stack argument for calls. */
    86 # define IEMNATIVE_FP_OFF_STACK_ARG0        (IEMNATIVE_FP_OFF_STACK_VARS - IEMNATIVE_FRAME_STACK_ARG_COUNT * 8)
    87 /** Frame pointer (RBP) relative offset of the second stack argument for calls. */
    88 # define IEMNATIVE_FP_OFF_STACK_ARG1        (IEMNATIVE_FP_OFF_STACK_ARG0 + 8)
    89 # ifdef RT_OS_WINDOWS
    90 /** Frame pointer (RBP) relative offset of the third stack argument for calls. */
    91 #  define IEMNATIVE_FP_OFF_STACK_ARG2       (IEMNATIVE_FP_OFF_STACK_ARG0 + 16)
    92 /** Frame pointer (RBP) relative offset of the fourth stack argument for calls. */
    93 #  define IEMNATIVE_FP_OFF_STACK_ARG3       (IEMNATIVE_FP_OFF_STACK_ARG0 + 24)
    94 # endif
    95 
    96 # ifdef RT_OS_WINDOWS
    97 /** Frame pointer (RBP) relative offset of the first incoming shadow argument. */
    98 #  define IEMNATIVE_FP_OFF_IN_SHADOW_ARG0   (16)
    99 /** Frame pointer (RBP) relative offset of the second incoming shadow argument. */
    100 #  define IEMNATIVE_FP_OFF_IN_SHADOW_ARG1   (24)
    101 /** Frame pointer (RBP) relative offset of the third incoming shadow argument. */
    102 #  define IEMNATIVE_FP_OFF_IN_SHADOW_ARG2   (32)
    103 /** Frame pointer (RBP) relative offset of the fourth incoming shadow argument. */
    104 #  define IEMNATIVE_FP_OFF_IN_SHADOW_ARG3   (40)
    105 # endif
    106 
    107 #elif RT_ARCH_ARM64
    108 /** No alignment padding needed for arm64. */
    109 # define IEMNATIVE_FRAME_ALIGN_SIZE         0
    110 /** No stack argument slots, got 8 registers for arguments will suffice. */
    111 # define IEMNATIVE_FRAME_STACK_ARG_COUNT    0
    112 /** There are no argument spill area. */
    113 # define IEMNATIVE_FRAME_SHADOW_ARG_COUNT   0
    114 
    115 /** Number of saved registers at the top of our stack frame.
    116  * This includes the return address and old frame pointer, so x19 thru x30. */
    117 # define IEMNATIVE_FRAME_SAVE_REG_COUNT     (12)
    118 /** The size of the save registered (IEMNATIVE_FRAME_SAVE_REG_COUNT). */
    119 # define IEMNATIVE_FRAME_SAVE_REG_SIZE      (IEMNATIVE_FRAME_SAVE_REG_COUNT * 8)
    120 
    121 /** Frame pointer (BP) relative offset of the last push. */
    122 # define IEMNATIVE_FP_OFF_LAST_PUSH         (7 * -8)
    123 
    124 /** Frame pointer (BP) relative offset of the stack variable area (the lowest
    125  * address for it). */
    126 # define IEMNATIVE_FP_OFF_STACK_VARS        (IEMNATIVE_FP_OFF_LAST_PUSH - IEMNATIVE_FRAME_ALIGN_SIZE - IEMNATIVE_FRAME_VAR_SIZE)
    127 
    128 #else
    129 # error "port me"
    130 #endif
    131 /** @} */
    132 
    133 
    134 /** @name Fixed Register Allocation(s)
    135  * @{ */
    136 /** @def IEMNATIVE_REG_FIXED_PVMCPU
    137  * The number of the register holding the pVCpu pointer.  */
    138 /** @def IEMNATIVE_REG_FIXED_PCPUMCTX
    139  * The number of the register holding the &pVCpu->cpum.GstCtx pointer.
    140  * @note This not available on AMD64, only ARM64. */
    141 /** @def IEMNATIVE_REG_FIXED_TMP0
    142  * Dedicated temporary register.
    143  * @todo replace this by a register allocator and content tracker.  */
    144 /** @def IEMNATIVE_REG_FIXED_MASK
    145  * Mask GPRs with fixes assignments, either by us or dictated by the CPU/OS
    146  * architecture. */
    147 #if defined(RT_ARCH_AMD64) && !defined(DOXYGEN_RUNNING)
    148 # define IEMNATIVE_REG_FIXED_PVMCPU         X86_GREG_xBX
    149 # define IEMNATIVE_REG_FIXED_TMP0           X86_GREG_x11
    150 # define IEMNATIVE_REG_FIXED_MASK          (  RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU) \
    151                                             | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0) \
    152                                             | RT_BIT_32(X86_GREG_xSP) \
    153                                             | RT_BIT_32(X86_GREG_xBP) )
    154 
    155 #elif defined(RT_ARCH_ARM64) || defined(DOXYGEN_RUNNING)
    156 # define IEMNATIVE_REG_FIXED_PVMCPU         ARMV8_A64_REG_X28
    157 # define IEMNATIVE_REG_FIXED_PCPUMCTX       ARMV8_A64_REG_X27
    158 # define IEMNATIVE_REG_FIXED_TMP0           ARMV8_A64_REG_X15
    159 # define IEMNATIVE_REG_FIXED_MASK           (  RT_BIT_32(ARMV8_A64_REG_SP) \
    160                                              | RT_BIT_32(ARMV8_A64_REG_LR) \
    161                                              | RT_BIT_32(ARMV8_A64_REG_BP) \
    162                                              | RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU) \
    163                                              | RT_BIT_32(IEMNATIVE_REG_FIXED_PCPUMCTX) \
    164                                              | RT_BIT_32(ARMV8_A64_REG_X18) \
    165                                              | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0) )
    166 
    167 #else
    168 # error "port me"
    169 #endif
    170 /** @} */
    171 
    172 /** @name Call related registers.
    173  * @{ */
    174 /** @def IEMNATIVE_CALL_RET_GREG
    175  * The return value register. */
    176 /** @def IEMNATIVE_CALL_ARG_GREG_COUNT
    177  * Number of arguments in registers. */
    178 /** @def IEMNATIVE_CALL_ARG0_GREG
    179  * The general purpose register carrying argument \#0. */
    180 /** @def IEMNATIVE_CALL_ARG1_GREG
    181  * The general purpose register carrying argument \#1. */
    182 /** @def IEMNATIVE_CALL_ARG2_GREG
    183  * The general purpose register carrying argument \#2. */
    184 /** @def IEMNATIVE_CALL_ARG3_GREG
    185  * The general purpose register carrying argument \#3. */
    186 /** @def IEMNATIVE_CALL_VOLATILE_GREG_MASK
    187  * Mask of registers the callee will not save and may trash. */
    188 #ifdef RT_ARCH_AMD64
    189 # define IEMNATIVE_CALL_RET_GREG             X86_GREG_xAX
    190 
    191 # ifdef RT_OS_WINDOWS
    192 #  define IEMNATIVE_CALL_ARG_GREG_COUNT     4
    193 #  define IEMNATIVE_CALL_ARG0_GREG          X86_GREG_xCX
    194 #  define IEMNATIVE_CALL_ARG1_GREG          X86_GREG_xDX
    195 #  define IEMNATIVE_CALL_ARG2_GREG          X86_GREG_x8
    196 #  define IEMNATIVE_CALL_ARG3_GREG          X86_GREG_x9
    197 #  define IEMNATIVE_CALL_VOLATILE_GREG_MASK (  RT_BIT_32(X86_GREG_xAX) \
    198                                              | RT_BIT_32(X86_GREG_xCX) \
    199                                              | RT_BIT_32(X86_GREG_xDX) \
    200                                              | RT_BIT_32(X86_GREG_x8) \
    201                                              | RT_BIT_32(X86_GREG_x9) \
    202                                              | RT_BIT_32(X86_GREG_x10) \
    203                                              | RT_BIT_32(X86_GREG_x11) )
    204 # else
    205 #  define IEMNATIVE_CALL_ARG_GREG_COUNT     6
    206 #  define IEMNATIVE_CALL_ARG0_GREG          X86_GREG_xDI
    207 #  define IEMNATIVE_CALL_ARG1_GREG          X86_GREG_xSI
    208 #  define IEMNATIVE_CALL_ARG2_GREG          X86_GREG_xDX
    209 #  define IEMNATIVE_CALL_ARG3_GREG          X86_GREG_xCX
    210 #  define IEMNATIVE_CALL_ARG4_GREG          X86_GREG_x8
    211 #  define IEMNATIVE_CALL_ARG5_GREG          X86_GREG_x9
    212 #  define IEMNATIVE_CALL_VOLATILE_GREG_MASK (  RT_BIT_32(X86_GREG_xAX) \
    213                                              | RT_BIT_32(X86_GREG_xCX) \
    214                                              | RT_BIT_32(X86_GREG_xDX) \
    215                                              | RT_BIT_32(X86_GREG_xDI) \
    216                                              | RT_BIT_32(X86_GREG_xSI) \
    217                                              | RT_BIT_32(X86_GREG_x8) \
    218                                              | RT_BIT_32(X86_GREG_x9) \
    219                                              | RT_BIT_32(X86_GREG_x10) \
    220                                              | RT_BIT_32(X86_GREG_x11) )
    221 # endif
    222 
    223 #elif defined(RT_ARCH_ARM64)
    224 # define IEMNATIVE_CALL_RET_GREG            ARMV8_A64_REG_X0
    225 # define IEMNATIVE_CALL_ARG_GREG_COUNT      8
    226 # define IEMNATIVE_CALL_ARG0_GREG           ARMV8_A64_REG_X0
    227 # define IEMNATIVE_CALL_ARG1_GREG           ARMV8_A64_REG_X1
    228 # define IEMNATIVE_CALL_ARG2_GREG           ARMV8_A64_REG_X2
    229 # define IEMNATIVE_CALL_ARG3_GREG           ARMV8_A64_REG_X3
    230 # define IEMNATIVE_CALL_ARG4_GREG           ARMV8_A64_REG_X4
    231 # define IEMNATIVE_CALL_ARG5_GREG           ARMV8_A64_REG_X5
    232 # define IEMNATIVE_CALL_ARG6_GREG           ARMV8_A64_REG_X6
    233 # define IEMNATIVE_CALL_ARG7_GREG           ARMV8_A64_REG_X7
    234 # define IEMNATIVE_CALL_VOLATILE_GREG_MASK  (  RT_BIT_32(ARMV8_A64_REG_X0) \
    235                                              | RT_BIT_32(ARMV8_A64_REG_X1) \
    236                                              | RT_BIT_32(ARMV8_A64_REG_X2) \
    237                                              | RT_BIT_32(ARMV8_A64_REG_X3) \
    238                                              | RT_BIT_32(ARMV8_A64_REG_X4) \
    239                                              | RT_BIT_32(ARMV8_A64_REG_X5) \
    240                                              | RT_BIT_32(ARMV8_A64_REG_X6) \
    241                                              | RT_BIT_32(ARMV8_A64_REG_X7) \
    242                                              | RT_BIT_32(ARMV8_A64_REG_X8) \
    243                                              | RT_BIT_32(ARMV8_A64_REG_X9) \
    244                                              | RT_BIT_32(ARMV8_A64_REG_X10) \
    245                                              | RT_BIT_32(ARMV8_A64_REG_X11) \
    246                                              | RT_BIT_32(ARMV8_A64_REG_X12) \
    247                                              | RT_BIT_32(ARMV8_A64_REG_X13) \
    248                                              | RT_BIT_32(ARMV8_A64_REG_X14) \
    249                                              | RT_BIT_32(ARMV8_A64_REG_X15) \
    250                                              | RT_BIT_32(ARMV8_A64_REG_X16) \
    251                                              | RT_BIT_32(ARMV8_A64_REG_X17) )
    252 
    253 #endif
    254 
    255 /** This is the maximum argument count we'll ever be needing. */
    256 #define IEMNATIVE_CALL_MAX_ARG_COUNT        7
    257 /** @} */
    258 
    259 
    260 /** @def IEMNATIVE_HST_GREG_COUNT
    261  * Number of host general purpose registers we tracker. */
    262 /** @def IEMNATIVE_HST_GREG_MASK
    263  * Mask corresponding to IEMNATIVE_HST_GREG_COUNT that can be applied to
    264  * inverted register masks and such to get down to a correct set of regs. */
    265 #ifdef RT_ARCH_AMD64
    266 # define IEMNATIVE_HST_GREG_COUNT           16
    267 # define IEMNATIVE_HST_GREG_MASK            UINT32_C(0xffff)
    268 
    269 #elif defined(RT_ARCH_ARM64)
    270 # define IEMNATIVE_HST_GREG_COUNT           32
    271 # define IEMNATIVE_HST_GREG_MASK            UINT32_MAX
    272 #else
    273 # error "Port me!"
    274 #endif
    275 
    276 
    277 /** Native code generator label types. */
    278 typedef enum
    279 {
    280     kIemNativeLabelType_Invalid = 0,
    281     /* Labels w/o data, only once instance per TB: */
    282     kIemNativeLabelType_Return,
    283     kIemNativeLabelType_ReturnBreak,
    284     kIemNativeLabelType_ReturnWithFlags,
    285     kIemNativeLabelType_NonZeroRetOrPassUp,
    286     kIemNativeLabelType_RaiseGp0,
    287     /* Labels with data, potentially multiple instances per TB: */
    288     kIemNativeLabelType_If,
    289     kIemNativeLabelType_Else,
    290     kIemNativeLabelType_Endif,
    291     kIemNativeLabelType_CheckIrq,
    292     kIemNativeLabelType_End
    293 } IEMNATIVELABELTYPE;
    294 
    295 /** Native code generator label definition. */
    296 typedef struct IEMNATIVELABEL
    297 {
    298     /** Code offset if defined, UINT32_MAX if it needs to be generated after/in
    299      * the epilog. */
    300     uint32_t    off;
    301     /** The type of label (IEMNATIVELABELTYPE). */
    302     uint16_t    enmType;
    303     /** Additional label data, type specific. */
    304     uint16_t    uData;
    305 } IEMNATIVELABEL;
    306 /** Pointer to a label. */
    307 typedef IEMNATIVELABEL *PIEMNATIVELABEL;
    308 
    309 
    310 /** Native code generator fixup types.  */
    311 typedef enum
    312 {
    313     kIemNativeFixupType_Invalid = 0,
    314 #if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
    315     /** AMD64 fixup: PC relative 32-bit with addend in bData. */
    316     kIemNativeFixupType_Rel32,
    317 #elif defined(RT_ARCH_ARM64)
    318     /** ARM64 fixup: PC relative offset at bits 25:0 (B, BL).  */
    319     kIemNativeFixupType_RelImm26At0,
    320     /** ARM64 fixup: PC relative offset at bits 23:5 (CBZ, CBNZ, B.CC).  */
    321     kIemNativeFixupType_RelImm19At5,
    322     /** ARM64 fixup: PC relative offset at bits 18:5 (TBZ, TBNZ).  */
    323     kIemNativeFixupType_RelImm14At5,
    324 #endif
    325     kIemNativeFixupType_End
    326 } IEMNATIVEFIXUPTYPE;
    327 
    328 /** Native code generator fixup. */
    329 typedef struct IEMNATIVEFIXUP
    330 {
    331     /** Code offset of the fixup location. */
    332     uint32_t    off;
    333     /** The IEMNATIVELABEL this is a fixup for. */
    334     uint16_t    idxLabel;
    335     /** The fixup type (IEMNATIVEFIXUPTYPE). */
    336     uint8_t     enmType;
    337     /** Addend or other data. */
    338     int8_t      offAddend;
    339 } IEMNATIVEFIXUP;
    340 /** Pointer to a native code generator fixup. */
    341 typedef IEMNATIVEFIXUP *PIEMNATIVEFIXUP;
    342 
    343 
    344 /**
    345  * Guest registers that can be shadowed in GPRs.
    346  */
    347 typedef enum IEMNATIVEGSTREG : uint8_t
    348 {
    349     kIemNativeGstReg_GprFirst      = 0,
    350     kIemNativeGstReg_GprLast       = kIemNativeGstReg_GprFirst + 15,
    351     kIemNativeGstReg_Pc,
    352     kIemNativeGstReg_EFlags,            /**< 32-bit, includes internal flags.  */
    353     kIemNativeGstReg_SegSelFirst,
    354     kIemNativeGstReg_SegSelLast    = kIemNativeGstReg_SegSelFirst + 5,
    355     kIemNativeGstReg_SegBaseFirst,
    356     kIemNativeGstReg_SegBaseLast   = kIemNativeGstReg_SegBaseFirst + 5,
    357     kIemNativeGstReg_SegLimitFirst,
    358     kIemNativeGstReg_SegLimitLast  = kIemNativeGstReg_SegLimitFirst + 5,
    359     kIemNativeGstReg_End
    360 } IEMNATIVEGSTREG;
    361 
    362 /**
    363  * Intended use statement for iemNativeRegAllocTmpForGuestReg().
    364  */
    365 typedef enum IEMNATIVEGSTREGUSE
    366 {
    367     /** The usage is read-only, the register holding the guest register
    368      * shadow copy will not be modified by the caller. */
    369     kIemNativeGstRegUse_ReadOnly = 0,
    370     /** The caller will update the guest register (think: PC += cbInstr).
    371      * The guest shadow copy will follow the returned register. */
    372     kIemNativeGstRegUse_ForUpdate,
    373     /** The caller will use the guest register value as input in a calculation
    374      * and the host register will be modified.
    375      * This means that the returned host register will not be marked as a shadow
    376      * copy of the guest register. */
    377     kIemNativeGstRegUse_Calculation
    378 } IEMNATIVEGSTREGUSE;
    379 
    380 /**
    381  * Guest registers (classes) that can be referenced.
    382  */
    383 typedef enum IEMNATIVEGSTREGREF : uint8_t
    384 {
    385     kIemNativeGstRegRef_Invalid = 0,
    386     kIemNativeGstRegRef_Gpr,
    387     kIemNativeGstRegRef_GprHighByte,    /**< AH, CH, DH, BH*/
    388     kIemNativeGstRegRef_EFlags,
    389     kIemNativeGstRegRef_MxCsr,
    390     kIemNativeGstRegRef_FpuReg,
    391     kIemNativeGstRegRef_MReg,
    392     kIemNativeGstRegRef_XReg,
    393     //kIemNativeGstRegRef_YReg, - doesn't work.
    394     kIemNativeGstRegRef_End
    395 } IEMNATIVEGSTREGREF;
    396 
    397 
    398 /** Variable kinds. */
    399 typedef enum IEMNATIVEVARKIND : uint8_t
    400 {
    401     /** Customary invalid zero value. */
    402     kIemNativeVarKind_Invalid = 0,
    403     /** This is either in a register or on the stack. */
    404     kIemNativeVarKind_Stack,
    405     /** Immediate value - loaded into register when needed, or can live on the
    406      *  stack if referenced (in theory). */
    407     kIemNativeVarKind_Immediate,
    408     /** Variable reference - loaded into register when needed, never stack. */
    409     kIemNativeVarKind_VarRef,
    410     /** Guest register reference - loaded into register when needed, never stack. */
    411     kIemNativeVarKind_GstRegRef,
    412     /** End of valid values. */
    413     kIemNativeVarKind_End
    414 } IEMNATIVEVARKIND;
    415 
    416 
    417 /** Variable or argument. */
    418 typedef struct IEMNATIVEVAR
    419 {
    420     /** The kind of variable. */
    421     IEMNATIVEVARKIND    enmKind;
    422     /** The variable size in bytes. */
    423     uint8_t             cbVar;
    424     /** The first stack slot (uint64_t), except for immediate and references
    425      *  where it usually is UINT8_MAX. */
    426     uint8_t             idxStackSlot;
    427     /** The host register allocated for the variable, UINT8_MAX if not. */
    428     uint8_t             idxReg;
    429     /** The argument number if argument, UINT8_MAX if regular variable. */
    430     uint8_t             uArgNo;
    431     /** If referenced, the index of the variable referencing this one, otherwise
    432      *  UINT8_MAX.  A referenced variable must only be placed on the stack and
    433      *  must be either kIemNativeVarKind_Stack or kIemNativeVarKind_Immediate. */
    434     uint8_t             idxReferrerVar;
    435     /** Guest register being shadowed here, kIemNativeGstReg_End(/UINT8_MAX) if not. */
    436     IEMNATIVEGSTREG     enmGstReg;
    437     uint8_t             bAlign;
    438 
    439     union
    440     {
    441         /** kIemNativeVarKind_Immediate: The immediate value. */
    442         uint64_t            uValue;
    443         /** kIemNativeVarKind_VarRef: The index of the variable being referenced. */
    444         uint8_t             idxRefVar;
    445         /** kIemNativeVarKind_GstRegRef: The guest register being referrenced. */
    446         struct
    447         {
    448             /** The class of register. */
    449             IEMNATIVEGSTREGREF  enmClass;
    450             /** Index within the class. */
    451             uint8_t             idx;
    452         } GstRegRef;
    453     } u;
    454 } IEMNATIVEVAR;
    455 
    456 /** What is being kept in a host register. */
    457 typedef enum IEMNATIVEWHAT : uint8_t
    458 {
    459     /** The traditional invalid zero value. */
    460     kIemNativeWhat_Invalid = 0,
    461     /** Mapping a variable (IEMNATIVEHSTREG::idxVar). */
    462     kIemNativeWhat_Var,
    463     /** Temporary register, this is typically freed when a MC completes. */
    464     kIemNativeWhat_Tmp,
    465     /** Call argument w/o a variable mapping.  This is free (via
    466      * IEMNATIVE_CALL_VOLATILE_GREG_MASK) after the call is emitted. */
    467     kIemNativeWhat_Arg,
    468     /** Return status code.
    469      * @todo not sure if we need this... */
    470     kIemNativeWhat_rc,
    471     /** The fixed pVCpu (PVMCPUCC) register.
    472      * @todo consider offsetting this on amd64 to use negative offsets to access
    473      *       more members using 8-byte disp. */
    474     kIemNativeWhat_pVCpuFixed,
    475     /** The fixed pCtx (PCPUMCTX) register.
    476      * @todo consider offsetting this on amd64 to use negative offsets to access
    477      *       more members using 8-byte disp. */
    478     kIemNativeWhat_pCtxFixed,
    479     /** Fixed temporary register. */
    480     kIemNativeWhat_FixedTmp,
    481     /** Register reserved by the CPU or OS architecture. */
    482     kIemNativeWhat_FixedReserved,
    483     /** End of valid values. */
    484     kIemNativeWhat_End
    485 } IEMNATIVEWHAT;
    486 
    487 /**
    488  * Host general register entry.
    489  *
    490  * The actual allocation status is kept in IEMRECOMPILERSTATE::bmHstRegs.
    491  *
    492  * @todo Track immediate values in host registers similarlly to how we track the
    493  *       guest register shadow copies. For it to be real helpful, though,
    494  *       we probably need to know which will be reused and put them into
    495  *       non-volatile registers, otherwise it's going to be more or less
    496  *       restricted to an instruction or two.
    497  */
    498 typedef struct IEMNATIVEHSTREG
    499 {
    500     /** Set of guest registers this one shadows.
    501      *
    502      * Using a bitmap here so we can designate the same host register as a copy
    503      * for more than one guest register.  This is expected to be useful in
    504      * situations where one value is copied to several registers in a sequence.
    505      * If the mapping is 1:1, then we'd have to pick which side of a 'MOV SRC,DST'
    506      * sequence we'd want to let this register follow to be a copy of and there
    507      * will always be places where we'd be picking the wrong one.
    508      */
    509     uint64_t        fGstRegShadows;
    510     /** What is being kept in this register. */
    511     IEMNATIVEWHAT   enmWhat;
    512     /** Variable index if holding a variable, otherwise UINT8_MAX. */
    513     uint8_t         idxVar;
    514     /** Alignment padding. */
    515     uint8_t         abAlign[6];
    516 } IEMNATIVEHSTREG;
    517 
    518 
    519 /**
    520  * Core state for the native recompiler, that is, things that needs careful
    521  * handling when dealing with branches.
    522  */
    523 typedef struct IEMNATIVECORESTATE
    524 {
    525     /** Allocation bitmap for aHstRegs. */
    526     uint32_t                    bmHstRegs;
    527 
    528     /** Bitmap marking which host register contains guest register shadow copies.
    529      * This is used during register allocation to try preserve copies.  */
    530     uint32_t                    bmHstRegsWithGstShadow;
    531     /** Bitmap marking valid entries in aidxGstRegShadows. */
    532     uint64_t                    bmGstRegShadows;
    533 
    534     union
    535     {
    536         /** Index of variable arguments, UINT8_MAX if not valid. */
    537         uint8_t                 aidxArgVars[8];
    538         /** For more efficient resetting. */
    539         uint64_t                u64ArgVars;
    540     };
    541 
    542     /** Allocation bitmap for the stack. */
    543     uint32_t                    bmStack;
    544     /** Allocation bitmap for aVars. */
    545     uint32_t                    bmVars;
    546 
    547     /** Maps a guest register to a host GPR (index by IEMNATIVEGSTREG).
    548      * Entries are only valid if the corresponding bit in bmGstRegShadows is set.
    549      * (A shadow copy of a guest register can only be held in a one host register,
    550      * there are no duplicate copies or ambiguities like that). */
    551     uint8_t                     aidxGstRegShadows[kIemNativeGstReg_End];
    552 
    553     /** Host register allocation tracking. */
    554     IEMNATIVEHSTREG             aHstRegs[IEMNATIVE_HST_GREG_COUNT];
    555 
    556     /** Variables and arguments. */
    557     IEMNATIVEVAR                aVars[9];
    558 } IEMNATIVECORESTATE;
    559 /** Pointer to core state. */
    560 typedef IEMNATIVECORESTATE *PIEMNATIVECORESTATE;
    561 /** Pointer to const core state. */
    562 typedef IEMNATIVECORESTATE const *PCIEMNATIVECORESTATE;
    563 
    564 
    565 /**
    566  * Conditional stack entry.
    567  */
    568 typedef struct IEMNATIVECOND
    569 {
    570     /** Set if we're in the "else" part, clear if we're in the "if" before it. */
    571     bool                        fInElse;
    572     /** The label for the IEM_MC_ELSE. */
    573     uint32_t                    idxLabelElse;
    574     /** The label for the IEM_MC_ENDIF. */
    575     uint32_t                    idxLabelEndIf;
    576     /** The initial state snapshot as the if-block starts executing. */
    577     IEMNATIVECORESTATE          InitialState;
    578     /** The state snapshot at the end of the if-block. */
    579     IEMNATIVECORESTATE          IfFinalState;
    580 } IEMNATIVECOND;
    581 /** Pointer to a condition stack entry. */
    582 typedef IEMNATIVECOND *PIEMNATIVECOND;
    583 
    584 
    585 /**
    586  * Native recompiler state.
    587  */
    588 typedef struct IEMRECOMPILERSTATE
    589 {
    590     /** Size of the buffer that pbNativeRecompileBufR3 points to in
    591      * IEMNATIVEINSTR units. */
    592     uint32_t                    cInstrBufAlloc;
    593 #ifdef VBOX_STRICT
    594     /** Strict: How far the last iemNativeInstrBufEnsure() checked. */
    595     uint32_t                    offInstrBufChecked;
    596 #else
    597     uint32_t                    uPadding1; /* We don't keep track of the size here... */
    598 #endif
    599     /** Fixed temporary code buffer for native recompilation. */
    600     PIEMNATIVEINSTR             pInstrBuf;
    601 
    602     /** Bitmaps with the label types used. */
    603     uint64_t                    bmLabelTypes;
    604     /** Actual number of labels in paLabels. */
    605     uint32_t                    cLabels;
    606     /** Max number of entries allowed in paLabels before reallocating it. */
    607     uint32_t                    cLabelsAlloc;
    608     /** Labels defined while recompiling (referenced by fixups). */
    609     PIEMNATIVELABEL             paLabels;
    610 
    611     /** Actual number of fixups paFixups. */
    612     uint32_t                    cFixups;
    613     /** Max number of entries allowed in paFixups before reallocating it. */
    614     uint32_t                    cFixupsAlloc;
    615     /** Buffer used by the recompiler for recording fixups when generating code. */
    616     PIEMNATIVEFIXUP             paFixups;
    617 
    618 #ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
    619     /** Number of debug info entries allocated for pDbgInfo. */
    620     uint32_t                    cDbgInfoAlloc;
    621     uint32_t                    uPadding;
    622     /** Debug info. */
    623     PIEMTBDBG                   pDbgInfo;
    624 #endif
    625 
    626     /** The translation block being recompiled. */
    627     PCIEMTB                     pTbOrg;
    628 
    629     /** The current condition stack depth (aCondStack). */
    630     uint8_t                     cCondDepth;
    631     uint8_t                     bPadding2;
    632     /** Condition sequence number (for generating unique labels). */
    633     uint16_t                    uCondSeqNo;
    634     /** Check IRQ seqeunce number (for generating unique lables). */
    635     uint16_t                    uCheckIrqSeqNo;
    636     uint8_t                     bPadding3;
    637 
    638     /** The argument count + hidden regs from the IEM_MC_BEGIN statement. */
    639     uint8_t                     cArgs;
    640     /** The IEM_CIMPL_F_XXX flags from the IEM_MC_BEGIN statement. */
    641     uint32_t                    fCImpl;
    642     /** The IEM_MC_F_XXX flags from the IEM_MC_BEGIN statement. */
    643     uint32_t                    fMc;
    644 
    645     /** Core state requiring care with branches. */
    646     IEMNATIVECORESTATE          Core;
    647 
    648     /** The condition nesting stack. */
    649     IEMNATIVECOND               aCondStack[2];
    650 
    651 #ifndef IEM_WITH_THROW_CATCH
    652     /** Pointer to the setjmp/longjmp buffer if we're not using C++ exceptions
    653      *  for recompilation error handling. */
    654     jmp_buf                     JmpBuf;
    655 #endif
    656 } IEMRECOMPILERSTATE;
    657 /** Pointer to a native recompiler state. */
    658 typedef IEMRECOMPILERSTATE *PIEMRECOMPILERSTATE;
    659 
    660 
    661 /** @def IEMNATIVE_TRY_SETJMP
    662  * Wrapper around setjmp / try, hiding all the ugly differences.
    663  *
    664  * @note Use with extreme care as this is a fragile macro.
    665  * @param   a_pReNative The native recompile state.
    666  * @param   a_rcTarget  The variable that should receive the status code in case
    667  *                      of a longjmp/throw.
    668  */
    669 /** @def IEMNATIVE_CATCH_LONGJMP_BEGIN
    670  * Start wrapper for catch / setjmp-else.
    671  *
    672  * This will set up a scope.
    673  *
    674  * @note Use with extreme care as this is a fragile macro.
    675  * @param   a_pReNative The native recompile state.
    676  * @param   a_rcTarget  The variable that should receive the status code in case
    677  *                      of a longjmp/throw.
    678  */
    679 /** @def IEMNATIVE_CATCH_LONGJMP_END
    680  * End wrapper for catch / setjmp-else.
    681  *
    682  * This will close the scope set up by IEMNATIVE_CATCH_LONGJMP_BEGIN and clean
    683  * up the state.
    684  *
    685  * @note Use with extreme care as this is a fragile macro.
    686  * @param   a_pReNative The native recompile state.
    687  */
    688 /** @def IEMNATIVE_DO_LONGJMP
    689  *
    690  * Wrapper around longjmp / throw.
    691  *
    692  * @param   a_pReNative The native recompile state.
    693  * @param   a_rc        The status code jump back with / throw.
    694  */
    695 #ifdef IEM_WITH_THROW_CATCH
    696 # define IEMNATIVE_TRY_SETJMP(a_pReNative, a_rcTarget) \
    697        a_rcTarget = VINF_SUCCESS; \
    698        try
    699 # define IEMNATIVE_CATCH_LONGJMP_BEGIN(a_pReNative, a_rcTarget) \
    700        catch (int rcThrown) \
    701        { \
    702            a_rcTarget = rcThrown
    703 # define IEMNATIVE_CATCH_LONGJMP_END(a_pReNative) \
    704        } \
    705        ((void)0)
    706 # define IEMNATIVE_DO_LONGJMP(a_pReNative, a_rc)  throw int(a_rc)
    707 #else  /* !IEM_WITH_THROW_CATCH */
    708 # define IEMNATIVE_TRY_SETJMP(a_pReNative, a_rcTarget) \
    709        if ((a_rcTarget = setjmp((a_pReNative)->JmpBuf)) == 0)
    710 # define IEMNATIVE_CATCH_LONGJMP_BEGIN(a_pReNative, a_rcTarget) \
    711        else \
    712        { \
    713            ((void)0)
    714 # define IEMNATIVE_CATCH_LONGJMP_END(a_pReNative) \
    715        }
    716 # define IEMNATIVE_DO_LONGJMP(a_pReNative, a_rc)  longjmp((a_pReNative)->JmpBuf, (a_rc))
    717 #endif /* !IEM_WITH_THROW_CATCH */
    718 
    719 
    720 /**
    721  * Native recompiler worker for a threaded function.
    722  *
    723  * @returns New code buffer offset; throws VBox status code in case of a failure.
    724  * @param   pReNative   The native recompiler state.
    725  * @param   off         The current code buffer offset.
    726  * @param   pCallEntry  The threaded call entry.
    727  *
    728  * @note    This may throw/longjmp VBox status codes (int) to abort compilation, so no RT_NOEXCEPT!
    729  */
    730 typedef uint32_t (VBOXCALL FNIEMNATIVERECOMPFUNC)(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry);
    731 /** Pointer to a native recompiler worker for a threaded function. */
    732 typedef FNIEMNATIVERECOMPFUNC *PFNIEMNATIVERECOMPFUNC;
    733 
    734 /** Defines a native recompiler worker for a threaded function.
    735  * @see FNIEMNATIVERECOMPFUNC  */
    736 #define IEM_DECL_IEMNATIVERECOMPFUNC_DEF(a_Name) \
    737     uint32_t VBOXCALL a_Name(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
    738 
    739 /** Prototypes a native recompiler function for a threaded function.
    740  * @see FNIEMNATIVERECOMPFUNC  */
    741 #define IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(a_Name) FNIEMNATIVERECOMPFUNC a_Name
    742 
    743 DECL_HIDDEN_THROW(uint32_t) iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType,
    744                                                  uint32_t offWhere = UINT32_MAX, uint16_t uData = 0);
    745 DECL_HIDDEN_THROW(void)     iemNativeLabelDefine(PIEMRECOMPILERSTATE pReNative, uint32_t idxLabel, uint32_t offWhere);
    746 DECL_HIDDEN_THROW(void)     iemNativeAddFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, uint32_t idxLabel,
    747                                               IEMNATIVEFIXUPTYPE enmType, int8_t offAddend = 0);
    748 DECL_HIDDEN_THROW(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq);
    749 
    750 DECL_HIDDEN_THROW(uint8_t)  iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile = true);
    751 DECL_HIDDEN_THROW(uint8_t)  iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm,
    752                                                     bool fPreferVolatile = true);
    753 DECL_HIDDEN_THROW(uint8_t)  iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
    754                                                             IEMNATIVEGSTREG enmGstReg, IEMNATIVEGSTREGUSE enmIntendedUse);
    755 DECL_HIDDEN_THROW(uint8_t)  iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
    756                                                                             IEMNATIVEGSTREG enmGstReg);
    757 
    758 DECL_HIDDEN_THROW(uint8_t)  iemNativeRegAllocVar(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t idxVar);
    759 DECL_HIDDEN_THROW(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs);
    760 DECL_HIDDEN_THROW(uint8_t)  iemNativeRegAssignRc(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg);
    761 DECLHIDDEN(void)            iemNativeRegFree(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
    762 DECLHIDDEN(void)            iemNativeRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
    763 DECLHIDDEN(void)            iemNativeRegFreeTmpImm(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
    764 DECLHIDDEN(void)            iemNativeRegFreeAndFlushMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegMask) RT_NOEXCEPT;
    765 DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushPendingWrites(PIEMRECOMPILERSTATE pReNative, uint32_t off);
    766 
    767 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitLoadGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    768                                                                  uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg);
    769 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr);
    770 
    771 extern DECL_HIDDEN_DATA(const char * const) g_apszIemNativeHstRegNames[];
    772 
    773 
    774 /**
    775  * Ensures that there is sufficient space in the instruction output buffer.
    776  *
    777  * This will reallocate the buffer if needed and allowed.
    778  *
    779  * @note    Always use IEMNATIVE_ASSERT_INSTR_BUF_ENSURE when done to check the
    780  *          allocation size.
    781  *
    782  * @returns Pointer to the instruction output buffer on success; throws VBox
    783  *          status code on failure, so no need to check it.
    784  * @param   pReNative   The native recompile state.
    785  * @param   off         Current instruction offset.  Works safely for UINT32_MAX
    786  *                      as well.
    787  * @param   cInstrReq   Number of instruction about to be added.  It's okay to
    788  *                      overestimate this a bit.
    789  */
    790 DECL_FORCE_INLINE_THROW(PIEMNATIVEINSTR)
    791 iemNativeInstrBufEnsure(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq)
    792 {
    793     uint64_t const offChecked = off + (uint64_t)cInstrReq; /** @todo may reconsider the need for UINT32_MAX safety... */
    794     if (RT_LIKELY(offChecked <= pReNative->cInstrBufAlloc))
    795     {
    796 #ifdef VBOX_STRICT
    797         pReNative->offInstrBufChecked = offChecked;
    798 #endif
    799         return pReNative->pInstrBuf;
    800     }
    801     return iemNativeInstrBufEnsureSlow(pReNative, off, cInstrReq);
    802 }
    803 
    804 /**
    805  * Checks that we didn't exceed the space requested in the last
    806  * iemNativeInstrBufEnsure() call. */
    807 #define IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(a_pReNative, a_off) \
    808     AssertMsg((a_off) <= (a_pReNative)->offInstrBufChecked, \
    809               ("off=%#x offInstrBufChecked=%#x\n", (a_off), (a_pReNative)->offInstrBufChecked))
    810 
    81141
    81242/**
     
    34852715/** @} */
    34862716
    3487 #endif /* !VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h */
    3488 
     2717#endif /* !VMM_INCLUDED_SRC_include_IEMN8veRecompilerEmit_h */
     2718
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette