VirtualBox

Changeset 101516 in vbox


Ignore:
Timestamp:
Oct 20, 2023 1:07:03 PM (19 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
159593
Message:

VMM/IEM: Deal with unconditional relative jumps (sans flag checking). bugref:10371

Location:
trunk
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/iprt/armv8.h

    r101506 r101516  
    22322232    /** Add @a iImm7*sizeof(reg) to @a iBaseReg after the store/load,
    22332233     * and update the register. */
    2234     kArm64InstrStLdPairType_kPostIndex = 1,
     2234    kArm64InstrStLdPairType_PostIndex = 1,
    22352235    /** Add @a iImm7*sizeof(reg) to @a iBaseReg before the store/load,
    22362236     * but don't update the register. */
    2237     kArm64InstrStLdPairType_kSigned    = 2,
     2237    kArm64InstrStLdPairType_Signed    = 2,
    22382238    /** Add @a iImm7*sizeof(reg) to @a iBaseReg before the store/load,
    22392239     * and update the register. */
    2240     kArm64InstrStLdPairType_kPreIndex  = 3
     2240    kArm64InstrStLdPairType_PreIndex  = 3
    22412241} ARM64INSTRSTLDPAIRTYPE;
    22422242
     
    25332533typedef enum
    25342534{
    2535     kArmv8A64InstrShift_kLsl = 0,
    2536     kArmv8A64InstrShift_kLsr,
    2537     kArmv8A64InstrShift_kAsr,
    2538     kArmv8A64InstrShift_kRor
     2535    kArmv8A64InstrShift_Lsl = 0,
     2536    kArmv8A64InstrShift_Lsr,
     2537    kArmv8A64InstrShift_Asr,
     2538    kArmv8A64InstrShift_Ror
    25392539} ARMV8A64INSTRSHIFT;
    25402540
     
    25762576 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
    25772577DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrAnd(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
    2578                                                uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2578                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsl)
    25792579{
    25802580    return Armv8A64MkInstrLogicalShiftedReg(0, false /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     
    25852585 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
    25862586DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBic(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
    2587                                                uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2587                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsl)
    25882588{
    25892589    return Armv8A64MkInstrLogicalShiftedReg(0, true /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     
    25942594 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
    25952595DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrOrr(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
    2596                                                uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2596                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsl)
    25972597{
    25982598    return Armv8A64MkInstrLogicalShiftedReg(1, false /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     
    26032603 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
    26042604DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrOrn(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
    2605                                                uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2605                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsl)
    26062606{
    26072607    return Armv8A64MkInstrLogicalShiftedReg(1, true /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     
    26122612 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
    26132613DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrEor(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
    2614                                                uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2614                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsl)
    26152615{
    26162616    return Armv8A64MkInstrLogicalShiftedReg(2, false /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     
    26212621 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
    26222622DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrEon(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
    2623                                                uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2623                                               uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsl)
    26242624{
    26252625    return Armv8A64MkInstrLogicalShiftedReg(2, true /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     
    26302630 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
    26312631DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrAnds(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
    2632                                                 uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2632                                                uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsl)
    26332633{
    26342634    return Armv8A64MkInstrLogicalShiftedReg(3, false /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     
    26392639 * @see Armv8A64MkInstrLogicalShiftedReg for parameter details.  */
    26402640DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBics(uint32_t iRegResult, uint32_t iReg1, uint32_t iReg2Shifted, bool f64Bit = true,
    2641                                                 uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2641                                                uint32_t offShift6 = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsl)
    26422642{
    26432643    return Armv8A64MkInstrLogicalShiftedReg(3, true /*fNot*/, iRegResult, iReg1, iReg2Shifted, f64Bit, offShift6, enmShift);
     
    27082708{
    27092709    return Armv8A64MkInstrLogicalImm(3, iRegResult, iRegSrc, uImm7SizeLen, uImm6Rotations, f64Bit);
     2710}
     2711
     2712
     2713/**
     2714 * A64: Encodes a bitfield instruction.
     2715 *
     2716 * @returns The encoded instruction.
     2717 * @param   u2Opc           The bitfield operation to perform.
     2718 * @param   iRegResult      The output register.
     2719 * @param   iRegSrc         The 1st register operand.
     2720 * @param   cImm6Ror        The right rotation count.
     2721 * @param   uImm6S          The leftmost bit to be moved.
     2722 * @param   f64Bit          true for 64-bit GPRs, @c false for 32-bit GPRs.
     2723 * @param   uN1             This must match @a f64Bit for all instructions
     2724 *                          currently specified.
     2725 * @see https://dinfuehr.github.io/blog/encoding-of-immediate-values-on-aarch64/
     2726 *      https://gist.githubusercontent.com/dinfuehr/51a01ac58c0b23e4de9aac313ed6a06a/raw/1892a274aa3238d55f83eec5b3828da2aec5f229/aarch64-logical-immediates.txt
     2727 */
     2728DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBitfieldImm(uint32_t u2Opc, uint32_t iRegResult, uint32_t iRegSrc,
     2729                                                       uint32_t cImm6Ror, uint32_t uImm6S, bool f64Bit, uint32_t uN1)
     2730{
     2731    Assert(cImm6Ror < (f64Bit ? UINT32_C(0x3f) : UINT32_C(0x1f))); Assert(iRegResult < 32); Assert(u2Opc < 4);
     2732    Assert(uImm6S < (f64Bit ? UINT32_C(0x3f) : UINT32_C(0x1f))); Assert(iRegSrc    < 32); Assert(uN1 <= (unsigned)f64Bit);
     2733    return ((uint32_t)f64Bit   << 31)
     2734         | (u2Opc              << 29)
     2735         | UINT32_C(0x13000000)
     2736         | (uN1                << 22)
     2737         | (cImm6Ror           << 16)
     2738         | (uImm6S             << 10)
     2739         | (iRegSrc            <<  5)
     2740         | iRegResult;
     2741}
     2742
     2743
     2744/** A64: Encodes a SBFM instruction immediates.
     2745 * @see Armv8A64MkInstrBitfieldImm for parameter details.  */
     2746DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrSbfmImm(uint32_t iRegResult, uint32_t iRegSrc, uint32_t cImm6Ror, uint32_t uImm6S,
     2747                                                   bool f64Bit = true, uint32_t uN1 = UINT32_MAX)
     2748{
     2749    return Armv8A64MkInstrBitfieldImm(0, iRegResult, iRegSrc, cImm6Ror, uImm6S, f64Bit, uN1 == UINT32_MAX ? f64Bit : uN1);
     2750}
     2751
     2752
     2753/** A64: Encodes a BFM instruction immediates.
     2754 * @see Armv8A64MkInstrBitfieldImm for parameter details.  */
     2755DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBfmImm(uint32_t iRegResult, uint32_t iRegSrc, uint32_t cImm6Ror, uint32_t uImm6S,
     2756                                                  bool f64Bit = true, uint32_t uN1 = UINT32_MAX)
     2757{
     2758    return Armv8A64MkInstrBitfieldImm(1, iRegResult, iRegSrc, cImm6Ror, uImm6S, f64Bit, uN1 == UINT32_MAX ? f64Bit : uN1);
     2759}
     2760
     2761
     2762/** A64: Encodes an UBFM instruction immediates.
     2763 * @see Armv8A64MkInstrBitfieldImm for parameter details.  */
     2764DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrUbfmImm(uint32_t iRegResult, uint32_t iRegSrc, uint32_t cImm6Ror, uint32_t uImm6S,
     2765                                                   bool f64Bit = true, uint32_t uN1 = UINT32_MAX)
     2766{
     2767    return Armv8A64MkInstrBitfieldImm(2, iRegResult, iRegSrc, cImm6Ror, uImm6S, f64Bit, uN1 == UINT32_MAX ? f64Bit : uN1);
     2768}
     2769
     2770
     2771/** A64: Encodes an LSL instruction w/ immediate shift value.
     2772 * @see Armv8A64MkInstrBitfieldImm for parameter details.  */
     2773DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrLslImm(uint32_t iRegResult, uint32_t iRegSrc, uint32_t cShift, bool f64Bit = true)
     2774{
     2775    uint32_t const cWidth = f64Bit ? 63 : 31;
     2776    Assert(cShift > 0); Assert(cShift <= cWidth);
     2777    return Armv8A64MkInstrBitfieldImm(2, iRegResult, iRegSrc, (uint32_t)-cShift & cWidth, cWidth - cShift /*uImm6S*/, false, 0);
     2778}
     2779
     2780
     2781/** A64: Encodes an LSR instruction w/ immediate shift value.
     2782 * @see Armv8A64MkInstrBitfieldImm for parameter details.  */
     2783DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrLsrImm(uint32_t iRegResult, uint32_t iRegSrc, uint32_t cShift, bool f64Bit = true)
     2784{
     2785    uint32_t const cWidth = f64Bit ? 63 : 31;
     2786    Assert(cShift > 0); Assert(cShift <= cWidth);
     2787    return Armv8A64MkInstrBitfieldImm(2, iRegResult, iRegSrc, cShift, cWidth /*uImm6S*/, f64Bit, f64Bit);
     2788}
     2789
     2790
     2791/** A64: Encodes an UXTB instruction - zero extend byte (8-bit).
     2792 * @see Armv8A64MkInstrBitfieldImm for parameter details.  */
     2793DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrUxtb(uint32_t iRegResult, uint32_t iRegSrc, bool f64Bit = false)
     2794{
     2795    return Armv8A64MkInstrBitfieldImm(2, iRegResult, iRegSrc, 0, 7, f64Bit, f64Bit);
     2796}
     2797
     2798
     2799/** A64: Encodes an UXTH instruction - zero extend half word (16-bit).
     2800 * @see Armv8A64MkInstrBitfieldImm for parameter details.  */
     2801DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrUxth(uint32_t iRegResult, uint32_t iRegSrc, bool f64Bit = false)
     2802{
     2803    return Armv8A64MkInstrBitfieldImm(2, iRegResult, iRegSrc, 0, 15, f64Bit, f64Bit);
    27102804}
    27112805
     
    27672861 * @param   cShift                  The shift count to apply to @a iRegSrc2.
    27682862 * @param   enmShift                The shift type to apply to the @a iRegSrc2
    2769  *                                  register. kArmv8A64InstrShift_kRor is
     2863 *                                  register. kArmv8A64InstrShift_Ror is
    27702864 *                                  reserved.
    27712865 */
    27722866DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrAddSubReg(bool fSub, uint32_t iRegResult, uint32_t iRegSrc1, uint32_t iRegSrc2,
    27732867                                                     bool f64Bit = true, bool fSetFlags = false, uint32_t cShift = 0,
    2774                                                      ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_kLsl)
     2868                                                     ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsl)
    27752869{
    27762870    Assert(iRegResult < 32); Assert(iRegSrc1 < 32); Assert(iRegSrc2 < 32);
    2777     Assert(cShift < (f64Bit ? 64U : 32U)); Assert(enmShift != kArmv8A64InstrShift_kRor);
     2871    Assert(cShift < (f64Bit ? 64U : 32U)); Assert(enmShift != kArmv8A64InstrShift_Ror);
    27782872
    27792873    return ((uint32_t)f64Bit       << 31)
     
    28592953}
    28602954
     2955/** Armv8 Condition codes.    */
     2956typedef enum ARMV8INSTRCOND
     2957{
     2958    kArmv8InstrCond_Eq = 0,                     /**< Equal - Zero set. */
     2959    kArmv8InstrCond_Cs,                         /**< Carry set (also known as 'HS'). */
     2960    kArmv8InstrCond_Hs = kArmv8InstrCond_Cs,    /**< Unsigned higher or same. */
     2961    kArmv8InstrCond_Mi,                         /**< Negative result (minus). */
     2962    kArmv8InstrCond_Vs,                         /**< Overflow set. */
     2963    kArmv8InstrCond_Hi,                         /**< Unsigned higher. */
     2964    kArmv8InstrCond_Ge,                         /**< Signed greater or equal. */
     2965    kArmv8InstrCond_Le,                         /**< Signed less or equal. */
     2966
     2967    kArmv8InstrCond_Ne,                         /**< Not equal - Zero clear. */
     2968    kArmv8InstrCond_Cc,                         /**< Carry clear (also known as 'LO'). */
     2969    kArmv8InstrCond_Lo = kArmv8InstrCond_Cc,    /**< Unsigned lower. */
     2970    kArmv8InstrCond_Pl,                         /**< Positive or zero result (plus). */
     2971    kArmv8InstrCond_Vc,                         /**< Overflow clear. */
     2972    kArmv8InstrCond_Ls,                         /**< Unsigned lower or same. */
     2973    kArmv8InstrCond_Lt,                         /**< Signed less than. */
     2974    kArmv8InstrCond_Al                          /**< Condition is always true. */
     2975} ARMV8INSTRCOND;
     2976
     2977/**
     2978 * A64: Encodes conditional branch instruction w/ immediate target.
     2979 *
     2980 * @returns The encoded instruction.
     2981 * @param   enmCond         The branch condition.
     2982 * @param   iImm19          Signed number of instruction to jump (i.e. *4).
     2983 */
     2984DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBCond(ARMV8INSTRCOND enmCond, int32_t iImm19)
     2985{
     2986    Assert(enmCond >= 0 && enmCond < 16);
     2987    return UINT32_C(0x54000000)
     2988         | (((uint32_t)iImm19 & 0x7ffff) <<  5)
     2989         | (uint32_t)enmCond;
     2990}
     2991
     2992
    28612993/**
    28622994 * A64: Encodes the BRK instruction.
  • trunk/include/iprt/x86.h

    r101430 r101516  
    49754975#define X86_OP_PRF_REPZ         UINT8_C(0xf3)
    49764976#define X86_OP_PRF_REPNZ        UINT8_C(0xf2)
     4977#define X86_OP_REX              UINT8_C(0x40)
    49774978#define X86_OP_REX_B            UINT8_C(0x41)
    49784979#define X86_OP_REX_X            UINT8_C(0x42)
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h

    r101484 r101516  
    80358035{
    80368036    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
    8037     if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
     8037    if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Iz in this group. */
    80388038        IEMOP_RAISE_INVALID_OPCODE_RET();
    80398039    IEMOP_MNEMONIC(mov_Ev_Iz, "mov Ev,Iz");
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py

    r101505 r101516  
    7575    'IEM_MC_CALL_CIMPL_5_THREADED':                              (None, True,  False, ),
    7676
    77     'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16':                (None, True,  False, ),
    78     'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32':                (None, True,  False,  ),
    79     'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64':                (None, True,  False, ),
    80     'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16':               (None, True,  False, ),
    81     'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32':               (None, True,  False, ),
    82     'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64':               (None, True,  False, ),
    83     'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32':               (None, True,  False, ),
    84     'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64':               (None, True,  False, ),
     77    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16':                (None, True,  True, ),
     78    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32':                (None, True,  True,  ),
     79    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64':                (None, True,  True, ),
     80    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16':               (None, True,  True, ),
     81    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32':               (None, True,  True, ),
     82    'IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64':               (None, True,  True, ),
     83    'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32':               (None, True,  True, ),
     84    'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64':               (None, True,  True, ),
    8585
    8686    'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16_WITH_FLAGS':     (None, True,  False, ),
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r101505 r101516  
    15441544
    15451545/**
     1546 * Used by TB code when it wants to raise a \#GP(0).
     1547 */
     1548IEM_DECL_IMPL_DEF(int, iemNativeHlpExecRaiseGp0,(PVMCPUCC pVCpu, uint8_t idxInstr))
     1549{
     1550    pVCpu->iem.s.cInstructions += idxInstr;
     1551    iemRaiseGeneralProtectionFault0Jmp(pVCpu);
     1552    return VINF_IEM_RAISED_XCPT; /* not reached */
     1553}
     1554
     1555
     1556/**
    15461557 * Reinitializes the native recompiler state.
    15471558 *
     
    15501561static PIEMRECOMPILERSTATE iemNativeReInit(PIEMRECOMPILERSTATE pReNative, PCIEMTB pTb)
    15511562{
    1552     pReNative->cLabels   = 0;
    1553     pReNative->cFixups   = 0;
    1554     pReNative->pTbOrg    = pTb;
     1563    pReNative->cLabels                = 0;
     1564    pReNative->bmLabelTypes           = 0;
     1565    pReNative->cFixups                = 0;
     1566    pReNative->pTbOrg                 = pTb;
    15551567
    15561568    pReNative->bmHstRegs              = IEMNATIVE_REG_FIXED_MASK
     
    17091721    paLabels[cLabels].uData   = uData;
    17101722    pReNative->cLabels = cLabels + 1;
     1723
     1724    Assert(enmType >= 0 && enmType < 64);
     1725    pReNative->bmLabelTypes |= RT_BIT_64(enmType);
    17111726    return cLabels;
    17121727}
     
    17211736                                   uint32_t offWhere = UINT32_MAX, uint16_t uData = 0) RT_NOEXCEPT
    17221737{
    1723     PIEMNATIVELABEL paLabels = pReNative->paLabels;
    1724     uint32_t const  cLabels  = pReNative->cLabels;
    1725     for (uint32_t i = 0; i < cLabels; i++)
    1726         if (   paLabels[i].enmType == enmType
    1727             && paLabels[i].uData   == uData
    1728             && (   paLabels[i].off == offWhere
    1729                 || offWhere        == UINT32_MAX
    1730                 || paLabels[i].off == UINT32_MAX))
    1731             return i;
     1738    Assert(enmType >= 0 && enmType < 64);
     1739    if (RT_BIT_64(enmType) & pReNative->bmLabelTypes)
     1740    {
     1741        PIEMNATIVELABEL paLabels = pReNative->paLabels;
     1742        uint32_t const  cLabels  = pReNative->cLabels;
     1743        for (uint32_t i = 0; i < cLabels; i++)
     1744            if (   paLabels[i].enmType == enmType
     1745                && paLabels[i].uData   == uData
     1746                && (   paLabels[i].off == offWhere
     1747                    || offWhere        == UINT32_MAX
     1748                    || paLabels[i].off == UINT32_MAX))
     1749                return i;
     1750    }
    17321751    return UINT32_MAX;
    17331752}
     
    17961815
    17971816    uint32_t const cbNew = cNew * sizeof(IEMNATIVEINSTR);
     1817#if RT_ARCH_ARM64
     1818    AssertReturn(cbNew <= _1M, NULL); /* Limited by the branch instruction range (18+2 bits). */
     1819#else
    17981820    AssertReturn(cbNew <= _2M, NULL);
     1821#endif
    17991822
    18001823    void *pvNew = RTMemRealloc(pReNative->pInstrBuf, cbNew);
     
    20852108    }
    20862109    return iemNativeRegMarkAllocated(pReNative, idxReg, kIemNativeWhat_Tmp);
     2110}
     2111
     2112
     2113/**
     2114 * Allocates a temporary register for loading an immediate value into.
     2115 *
     2116 * This will emit code to load the immediate, unless there happens to be an
     2117 * unused register with the value already loaded.
     2118 *
     2119 * The caller will not modify the returned register, it must be considered
     2120 * read-only.  Free using iemNativeRegFreeTmpImm.
     2121 *
     2122 * @returns The host register number, UINT8_MAX on failure.
     2123 * @param   pReNative       The native recompile state.
     2124 * @param   poff            Pointer to the variable with the code buffer position.
     2125 * @param   uImm            The immediate value that the register must hold upon
     2126 *                          return.
     2127 * @param   fPreferVolatile Wheter to prefer volatile over non-volatile
     2128 *                          registers (@c true, default) or the other way around
     2129 *                          (@c false).
     2130 *
     2131 * @note    Reusing immediate values has not been implemented yet.
     2132 */
     2133DECLHIDDEN(uint8_t) iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm,
     2134                                            bool fPreferVolatile /*= true*/) RT_NOEXCEPT
     2135{
     2136    uint8_t idxReg = iemNativeRegAllocTmp(pReNative, poff, fPreferVolatile);
     2137    if (idxReg < RT_ELEMENTS(pReNative->aHstRegs))
     2138    {
     2139        uint32_t off = *poff;
     2140        *poff = off = iemNativeEmitLoadGprImm64(pReNative, off, idxReg, uImm);
     2141        AssertReturnStmt(off != UINT32_MAX, iemNativeRegFreeTmp(pReNative, idxReg), UINT8_MAX);
     2142    }
     2143    return idxReg;
    20872144}
    20882145
     
    26772734
    26782735/**
     2736 * Frees a temporary immediate register.
     2737 *
     2738 * It is assumed that the call has not modified the register, so it still hold
     2739 * the same value as when it was allocated via iemNativeRegAllocTmpImm().
     2740 */
     2741DECLHIDDEN(void) iemNativeRegFreeTmpImm(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT
     2742{
     2743    iemNativeRegFreeTmp(pReNative, idxHstReg);
     2744}
     2745
     2746
     2747/**
    26792748 * Called right before emitting a call instruction to move anything important
    26802749 * out of call-volatile registers, free and flush the call-volatile registers,
     
    28582927
    28592928/**
     2929 * Flushes any delayed guest register writes.
     2930 *
     2931 * This must be called prior to calling CImpl functions and any helpers that use
     2932 * the guest state (like raising exceptions) and such.
     2933 *
     2934 * This optimization has not yet been implemented.  The first target would be
     2935 * RIP updates, since these are the most common ones.
     2936 */
     2937DECLHIDDEN(uint32_t) iemNativeRegFlushPendingWrites(PIEMRECOMPILERSTATE pReNative, uint32_t off) RT_NOEXCEPT
     2938{
     2939    RT_NOREF(pReNative, off);
     2940    return off;
     2941}
     2942
     2943
     2944/**
    28602945 * Emits a code for checking the return code of a call and rcPassUp, returning
    28612946 * from the code if either are non-zero.
     
    29163001# error "port me"
    29173002#endif
     3003    return off;
     3004}
     3005
     3006
     3007/**
     3008 * Emits code to check if the content of @a idxAddrReg is a canonical address,
     3009 * raising a \#GP(0) if it isn't.
     3010 *
     3011 * @returns New code buffer offset, UINT32_MAX on failure.
     3012 * @param   pReNative       The native recompile state.
     3013 * @param   off             The code buffer offset.
     3014 * @param   idxAddrReg      The host register with the address to check.
     3015 * @param   idxInstr        The current instruction.
     3016 */
     3017DECLHIDDEN(uint32_t) iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     3018                                                                 uint8_t idxAddrReg, uint8_t idxInstr)
     3019{
     3020    RT_NOREF(idxInstr);
     3021
     3022    /*
     3023     * Make sure we don't have any outstanding guest register writes as we may
     3024     * raise an #GP(0) and all guest register must be up to date in CPUMCTX.
     3025     */
     3026    off = iemNativeRegFlushPendingWrites(pReNative, off);
     3027
     3028#ifdef RT_ARCH_AMD64
     3029    /*
     3030     * if ((((uint32_t)(a_u64Addr >> 32) + UINT32_C(0x8000)) >> 16) != 0)
     3031     *     return raisexcpt();
     3032     * ---- this wariant avoid loading a 64-bit immediate, but is an instruction longer.
     3033     */
     3034    uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off);
     3035    AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->aHstRegs), UINT32_MAX);
     3036
     3037    off = iemNativeEmitLoadGprFromGpr(pReNative, off, iTmpReg, idxAddrReg);
     3038    off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 32);
     3039    off = iemNativeEmitAddGpr32Imm(pReNative, off, iTmpReg, (int32_t)0x8000);
     3040    off = iemNativeEmitShiftGprRight(pReNative, off, iTmpReg, 16);
     3041
     3042# ifndef IEMNATIVE_WITH_INSTRUCTION_COUNTING
     3043    off = iemNativeEmitJnzToNewLabel(pReNative, off, kIemNativeLabelType_RaiseGp0);
     3044# else
     3045    uint32_t const offFixup = off;
     3046    off = iemNativeEmitJzToFixed(pReNative, off, 0);
     3047    off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxInstr);
     3048    off = iemNativeEmitJmpToNewLabel(pReNative, off, kIemNativeLabelType_RaiseGp0);
     3049    iemNativeFixupFixedJump(pReNative, offFixup, off /*offTarget*/);
     3050# endif
     3051
     3052    iemNativeRegFreeTmp(pReNative, iTmpReg);
     3053
     3054#elif defined(RT_ARCH_ARM64)
     3055    /*
     3056     * if ((((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000)) >> 48) != 0)
     3057     *     return raisexcpt();
     3058     * ----
     3059     *     mov     x1, 0x800000000000
     3060     *     add     x1, x0, x1
     3061     *     cmp     xzr, x1, lsr 48
     3062     * and either:
     3063     *     b.ne    .Lraisexcpt
     3064     * or:
     3065     *     b.eq    .Lnoexcept
     3066     *     movz    x1, #instruction-number
     3067     *     b       .Lraisexcpt
     3068     * .Lnoexcept:
     3069     */
     3070    uint8_t const iTmpReg = iemNativeRegAllocTmp(pReNative, &off);
     3071    AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->aHstRegs), UINT32_MAX);
     3072
     3073    off = iemNativeEmitLoadGprImm64(pReNative, off, iTmpReg, UINT64_C(0x800000000000));
     3074    off = iemNativeEmitAddTwoGprs(pReNative, off, iTmpReg, idxAddrReg);
     3075    off = iemNativeEmitCmpArm64(pReNative, off, ARMV8_A64_REG_XZR, idxAddrReg, true /*f64Bit*/, 48 /*cShift*/, kArmv8A64InstrShift_Lsr);
     3076
     3077# ifndef IEMNATIVE_WITH_INSTRUCTION_COUNTING
     3078    off = iemNativeEmitJnzToNewLabel(pReNative, off, kIemNativeLabelType_RaiseGp0);
     3079# else
     3080    uint32_t const offFixup = off;
     3081    off = iemNativeEmitJzToFixed(pReNative, off, 0);
     3082    off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxInstr);
     3083    off = iemNativeEmitJmpToNewLabel(pReNative, off, kIemNativeLabelType_RaiseGp0);
     3084    iemNativeFixupFixedJump(pReNative, offFixup, off /*offTarget*/);
     3085# endif
     3086
     3087    iemNativeRegFreeTmp(pReNative, iTmpReg);
     3088
     3089#else
     3090# error "Port me"
     3091#endif
     3092    return off;
     3093}
     3094
     3095
     3096/**
     3097 * Emits code to check if the content of @a idxAddrReg is within the limit of
     3098 * idxSegReg, raising a \#GP(0) if it isn't.
     3099 *
     3100 * @returns New code buffer offset, UINT32_MAX on failure.
     3101 * @param   pReNative       The native recompile state.
     3102 * @param   off             The code buffer offset.
     3103 * @param   idxAddrReg      The host register (32-bit) with the address to
     3104 *                          check.
     3105 * @param   idxSegReg       The segment register (X86_SREG_XXX) to check
     3106 *                          against.
     3107 * @param   idxInstr        The current instruction.
     3108 */
     3109DECLHIDDEN(uint32_t) iemNativeEmitCheckGpr32AgainstSegLimitMaybeRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     3110                                                                         uint8_t idxAddrReg, uint8_t idxSegReg, uint8_t idxInstr)
     3111{
     3112    /*
     3113     * Make sure we don't have any outstanding guest register writes as we may
     3114     * raise an #GP(0) and all guest register must be up to date in CPUMCTX.
     3115     */
     3116    off = iemNativeRegFlushPendingWrites(pReNative, off);
     3117
     3118    /** @todo implement expand down/whatnot checking */
     3119    AssertReturn(idxSegReg == X86_SREG_CS, UINT32_MAX);
     3120
     3121    uint8_t const iTmpLimReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off,
     3122                                                               (IEMNATIVEGSTREG)(kIemNativeGstReg_SegLimitFirst + idxSegReg),
     3123                                                               kIemNativeGstRegUse_ForUpdate);
     3124    AssertReturn(iTmpLimReg < RT_ELEMENTS(pReNative->aHstRegs), UINT32_MAX);
     3125
     3126    off = iemNativeEmitCmpGpr32WithGpr(pReNative, off, idxAddrReg, iTmpLimReg);
     3127
     3128#ifndef IEMNATIVE_WITH_INSTRUCTION_COUNTING
     3129    off = iemNativeEmitJaToNewLabel(pReNative, off, kIemNativeLabelType_RaiseGp0);
     3130    RT_NOREF(idxInstr);
     3131#else
     3132    uint32_t const offFixup = off;
     3133    off = iemNativeEmitJbeToFixed(pReNative, off, 0);
     3134    off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxInstr);
     3135    off = iemNativeEmitJmpToNewLabel(pReNative, off, kIemNativeLabelType_RaiseGp0);
     3136    iemNativeFixupFixedJump(pReNative, offFixup, off /*offTarget*/);
     3137#endif
     3138
     3139    iemNativeRegFreeTmp(pReNative, iTmpLimReg);
    29183140    return off;
    29193141}
     
    29983220 * Emits a call to a threaded worker function.
    29993221 */
    3000 static int32_t iemNativeEmitThreadedCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
     3222static uint32_t iemNativeEmitThreadedCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
    30013223{
    30023224    iemNativeRegFlushGuestShadows(pReNative, UINT64_MAX); /** @todo optimize this */
     
    30803302
    30813303/**
     3304 * Emits the code at the RaiseGP0 label.
     3305 */
     3306static uint32_t iemNativeEmitRaiseGp0(PIEMRECOMPILERSTATE pReNative, uint32_t off)
     3307{
     3308    uint32_t idxLabel = iemNativeFindLabel(pReNative, kIemNativeLabelType_RaiseGp0);
     3309    if (idxLabel != UINT32_MAX)
     3310    {
     3311        Assert(pReNative->paLabels[idxLabel].off == UINT32_MAX);
     3312        pReNative->paLabels[idxLabel].off = off;
     3313
     3314        /* iemNativeHlpExecRaiseGp0(PVMCPUCC pVCpu, uint8_t idxInstr) */
     3315        off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
     3316#ifndef IEMNATIVE_WITH_INSTRUCTION_COUNTING
     3317        off = iemNativeEmitLoadGpr8Imm(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, 0);
     3318#endif
     3319#ifdef RT_ARCH_AMD64
     3320        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, (uintptr_t)iemNativeHlpExecRaiseGp0);
     3321        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     3322
     3323        /* call rax */
     3324        uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
     3325        AssertReturn(pbCodeBuf, UINT32_MAX);
     3326        pbCodeBuf[off++] = 0xff;
     3327        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX);
     3328
     3329#elif defined(RT_ARCH_ARM64)
     3330        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, (uintptr_t)iemNativeHlpExecRaiseGp0);
     3331        AssertReturn(off != UINT32_MAX, UINT32_MAX);
     3332        pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0);
     3333#else
     3334# error "Port me"
     3335#endif
     3336
     3337        /* jump back to the return sequence. */
     3338        off = iemNativeEmitJmpToLabel(pReNative, off, iemNativeFindLabel(pReNative, kIemNativeLabelType_Return));
     3339
     3340#ifdef RT_ARCH_AMD64
     3341        /* int3 poison */
     3342        pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     3343        AssertReturn(pbCodeBuf, UINT32_MAX);
     3344        pbCodeBuf[off++] = 0xcc;
     3345#endif
     3346    }
     3347    return off;
     3348}
     3349
     3350
     3351/**
    30823352 * Emits the RC fiddling code for handling non-zero return code or rcPassUp.
    30833353 */
     
    31423412            pbCodeBuf[off++] = RT_BYTE4(offRel);
    31433413        }
    3144         pbCodeBuf[off++] = 0xcc;                    /*  int3 poison */
    3145 
    3146 #elif RT_ARCH_ARM64
     3414        pbCodeBuf[off++] = 0xcc;                    /* int3 poison */
     3415
     3416#elif defined(RT_ARCH_ARM64)
    31473417        /*
    31483418         * ARM64:
     
    32253495    /* ldp x19, x20, [sp #IEMNATIVE_FRAME_VAR_SIZE]! ; Unallocate the variable space and restore x19+x20. */
    32263496    AssertCompile(IEMNATIVE_FRAME_VAR_SIZE < 64*8);
    3227     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kPreIndex,
     3497    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_PreIndex,
    32283498                                                 ARMV8_A64_REG_X19, ARMV8_A64_REG_X20, ARMV8_A64_REG_SP,
    32293499                                                 IEMNATIVE_FRAME_VAR_SIZE / 8);
    32303500    /* Restore x21 thru x28 + BP and LR (ret address) (SP remains unchanged in the kSigned variant). */
    3231     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     3501    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
    32323502                                                 ARMV8_A64_REG_X21, ARMV8_A64_REG_X22, ARMV8_A64_REG_SP, 2);
    3233     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     3503    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
    32343504                                                 ARMV8_A64_REG_X23, ARMV8_A64_REG_X24, ARMV8_A64_REG_SP, 4);
    3235     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     3505    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
    32363506                                                 ARMV8_A64_REG_X25, ARMV8_A64_REG_X26, ARMV8_A64_REG_SP, 6);
    3237     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     3507    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
    32383508                                                 ARMV8_A64_REG_X27, ARMV8_A64_REG_X28, ARMV8_A64_REG_SP, 8);
    3239     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     3509    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(true /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
    32403510                                                 ARMV8_A64_REG_BP,  ARMV8_A64_REG_LR,  ARMV8_A64_REG_SP, 10);
    32413511    AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE / 8 == 12);
     
    33323602    /* stp x19, x20, [sp, #-IEMNATIVE_FRAME_SAVE_REG_SIZE] ; Allocate space for saving registers and place x19+x20 at the bottom. */
    33333603    AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE < 64*8);
    3334     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kPreIndex,
     3604    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_PreIndex,
    33353605                                                 ARMV8_A64_REG_X19, ARMV8_A64_REG_X20, ARMV8_A64_REG_SP,
    33363606                                                 -IEMNATIVE_FRAME_SAVE_REG_SIZE / 8);
    33373607    /* Save x21 thru x28 (SP remains unchanged in the kSigned variant). */
    3338     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     3608    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
    33393609                                                 ARMV8_A64_REG_X21, ARMV8_A64_REG_X22, ARMV8_A64_REG_SP, 2);
    3340     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     3610    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
    33413611                                                 ARMV8_A64_REG_X23, ARMV8_A64_REG_X24, ARMV8_A64_REG_SP, 4);
    3342     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     3612    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
    33433613                                                 ARMV8_A64_REG_X25, ARMV8_A64_REG_X26, ARMV8_A64_REG_SP, 6);
    3344     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     3614    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
    33453615                                                 ARMV8_A64_REG_X27, ARMV8_A64_REG_X28, ARMV8_A64_REG_SP, 8);
    33463616    /* Save the BP and LR (ret address) registers at the top of the frame. */
    3347     pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_kSigned,
     3617    pu32CodeBuf[off++] = Armv8A64MkInstrStLdPair(false /*fLoad*/, 2 /*64-bit*/, kArm64InstrStLdPairType_Signed,
    33483618                                                 ARMV8_A64_REG_BP,  ARMV8_A64_REG_LR,  ARMV8_A64_REG_SP, 10);
    33493619    AssertCompile(IEMNATIVE_FRAME_SAVE_REG_SIZE / 8 == 12);
     
    33653635    return off;
    33663636}
     3637
     3638
     3639
     3640/*********************************************************************************************************************************
     3641*   Emitters for IEM_MC_XXXX                                                                                                     *
     3642*********************************************************************************************************************************/
    33673643
    33683644
     
    34293705}
    34303706
     3707
    34313708/** Same as iemRegAddToEip32AndFinishingNoFlags. */
    34323709DECLINLINE(uint32_t) iemNativeEmitAddToEip32AndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr)
     
    34663743
    34673744
    3468 /*
    3469  * MC definitions for the native recompiler.
    3470  */
     3745/** Same as iemRegRip64RelativeJumpS8AndFinishNoFlags,
     3746 *  iemRegRip64RelativeJumpS16AndFinishNoFlags and
     3747 *  iemRegRip64RelativeJumpS32AndFinishNoFlags. */
     3748DECLINLINE(uint32_t) iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     3749                                                                       uint8_t cbInstr, int32_t offDisp, IEMMODE enmEffOpSize,
     3750                                                                       uint8_t idxInstr)
     3751{
     3752    Assert(enmEffOpSize == IEMMODE_64BIT || enmEffOpSize == IEMMODE_16BIT);
     3753
     3754    /* We speculatively modify PC and may raise #GP(0), so make sure the right value is in CPUMCTX. */
     3755    off = iemNativeRegFlushPendingWrites(pReNative, off);
     3756
     3757    /* Allocate a temporary PC register. */
     3758    uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
     3759    AssertReturn(idxPcReg != UINT8_MAX, UINT32_MAX);
     3760
     3761    /* Perform the addition. */
     3762    off = iemNativeEmitAddGprImm(pReNative, off, idxPcReg, (int64_t)offDisp + cbInstr);
     3763
     3764    if (RT_LIKELY(enmEffOpSize == IEMMODE_64BIT))
     3765    {
     3766        /* Check that the address is canonical, raising #GP(0) + exit TB if it isn't. */
     3767        off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(pReNative, off, idxPcReg, idxInstr);
     3768    }
     3769    else
     3770    {
     3771        /* Just truncate the result to 16-bit IP. */
     3772        Assert(enmEffOpSize == IEMMODE_16BIT);
     3773        off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg);
     3774    }
     3775    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     3776
     3777    /* Free but don't flush the PC register. */
     3778    iemNativeRegFreeTmp(pReNative, idxPcReg);
     3779
     3780    return off;
     3781}
     3782
     3783
     3784/** Same as iemRegEip32RelativeJumpS8AndFinishNoFlags,
     3785 *  iemRegEip32RelativeJumpS16AndFinishNoFlags and
     3786 *  iemRegEip32RelativeJumpS32AndFinishNoFlags. */
     3787DECLINLINE(uint32_t) iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     3788                                                                       uint8_t cbInstr, int32_t offDisp, IEMMODE enmEffOpSize,
     3789                                                                       uint8_t idxInstr)
     3790{
     3791    Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
     3792
     3793    /* We speculatively modify PC and may raise #GP(0), so make sure the right value is in CPUMCTX. */
     3794    off = iemNativeRegFlushPendingWrites(pReNative, off);
     3795
     3796    /* Allocate a temporary PC register. */
     3797    uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
     3798    AssertReturn(idxPcReg != UINT8_MAX, UINT32_MAX);
     3799
     3800    /* Perform the addition. */
     3801    off = iemNativeEmitAddGpr32Imm(pReNative, off, idxPcReg, offDisp + cbInstr);
     3802
     3803    /* Truncate the result to 16-bit IP if the operand size is 16-bit. */
     3804    if (enmEffOpSize == IEMMODE_16BIT)
     3805    {
     3806        Assert(enmEffOpSize == IEMMODE_16BIT);
     3807        off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg);
     3808    }
     3809
     3810    /* Perform limit checking, potentially raising #GP(0) and exit the TB. */
     3811    off = iemNativeEmitCheckGpr32AgainstSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, X86_SREG_CS, idxInstr);
     3812
     3813    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     3814
     3815    /* Free but don't flush the PC register. */
     3816    iemNativeRegFreeTmp(pReNative, idxPcReg);
     3817
     3818    return off;
     3819}
     3820
     3821
     3822/** Same as iemRegIp16RelativeJumpS8AndFinishNoFlags. */
     3823DECLINLINE(uint32_t) iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     3824                                                                      uint8_t cbInstr, int32_t offDisp, uint8_t idxInstr)
     3825{
     3826    /* We speculatively modify PC and may raise #GP(0), so make sure the right value is in CPUMCTX. */
     3827    off = iemNativeRegFlushPendingWrites(pReNative, off);
     3828
     3829    /* Allocate a temporary PC register. */
     3830    uint8_t const idxPcReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc, kIemNativeGstRegUse_ForUpdate);
     3831    AssertReturn(idxPcReg != UINT8_MAX, UINT32_MAX);
     3832
     3833    /* Perform the addition, clamp the result, check limit (may #GP(0) + exit TB) and store the result. */
     3834    off = iemNativeEmitAddGpr32Imm(pReNative, off, idxPcReg, offDisp + cbInstr);
     3835    off = iemNativeEmitClear16UpGpr(pReNative, off, idxPcReg);
     3836    off = iemNativeEmitCheckGpr32AgainstSegLimitMaybeRaiseGp0(pReNative, off, idxPcReg, X86_SREG_CS, idxInstr);
     3837    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxPcReg, RT_UOFFSETOF(VMCPU, cpum.GstCtx.rip));
     3838
     3839    /* Free but don't flush the PC register. */
     3840    iemNativeRegFreeTmp(pReNative, idxPcReg);
     3841
     3842    return off;
     3843}
     3844
     3845
     3846
     3847/*********************************************************************************************************************************
     3848*   MC definitions for the native recompiler                                                                                     *
     3849*********************************************************************************************************************************/
    34713850
    34723851#define IEM_MC_DEFER_TO_CIMPL_0_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl) \
     
    34893868    } AssertFailedReturn(UINT32_MAX /* shouldn't be reached! */)
    34903869
     3870
    34913871#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC16(a_cbInstr) \
    3492     return iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, a_cbInstr)
     3872    return iemNativeEmitAddToIp16AndFinishingNoFlags(pReNative, off, (a_cbInstr))
    34933873
    34943874#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32(a_cbInstr) \
    3495     return iemNativeEmitAddToEip32AndFinishingNoFlags(pReNative, off, a_cbInstr)
     3875    return iemNativeEmitAddToEip32AndFinishingNoFlags(pReNative, off, (a_cbInstr))
    34963876
    34973877#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64(a_cbInstr) \
    3498     return iemNativeEmitAddToRip64AndFinishingNoFlags(pReNative, off, a_cbInstr)
     3878    return iemNativeEmitAddToRip64AndFinishingNoFlags(pReNative, off, (a_cbInstr))
     3879
     3880
     3881#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16(a_i8, a_cbInstr) \
     3882    return iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), pCallEntry->idxInstr)
     3883
     3884#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC32(a_i8, a_cbInstr, a_enmEffOpSize) \
     3885    return iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), (a_enmEffOpSize), pCallEntry->idxInstr)
     3886
     3887#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC64(a_i8, a_cbInstr, a_enmEffOpSize) \
     3888    return iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int8_t)(a_i8), (a_enmEffOpSize), pCallEntry->idxInstr)
     3889
     3890
     3891#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr) \
     3892    return iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), pCallEntry->idxInstr)
     3893
     3894#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr) \
     3895    return iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), IEMMODE_16BIT, pCallEntry->idxInstr)
     3896
     3897#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr) \
     3898    return iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (int16_t)(a_i16), IEMMODE_16BIT, pCallEntry->idxInstr)
     3899
     3900
     3901#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC16(a_i32, a_cbInstr) \
     3902    return iemNativeEmitIp16RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), pCallEntry->idxInstr)
     3903
     3904#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32(a_i32, a_cbInstr) \
     3905    return iemNativeEmitEip32RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), IEMMODE_32BIT, pCallEntry->idxInstr)
     3906
     3907#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64(a_i32, a_cbInstr) \
     3908    return iemNativeEmitRip64RelativeJumpAndFinishingNoFlags(pReNative, off, (a_cbInstr), (a_i32), IEMMODE_64BIT, pCallEntry->idxInstr)
     3909
    34993910
    35003911
     
    35843995    off = iemNativeEmitEpilog(pReNative, off);
    35853996    AssertReturn(off != UINT32_MAX, pTb);
     3997
     3998    /*
     3999     * Generate special jump labels.
     4000     */
     4001    if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_RaiseGp0))
     4002    {
     4003        off = iemNativeEmitRaiseGp0(pReNative, off);
     4004        AssertReturn(off != UINT32_MAX, pTb);
     4005    }
    35864006
    35874007    /*
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r101505 r101516  
    258258    kIemNativeLabelType_Return,
    259259    kIemNativeLabelType_NonZeroRetOrPassUp,
     260    kIemNativeLabelType_RaiseGp0,
    260261    kIemNativeLabelType_End
    261262} IEMNATIVELABELTYPE;
     
    476477    PIEMNATIVEINSTR             pInstrBuf;
    477478
     479    /** Bitmaps with the label types used. */
     480    uint64_t                    bmLabelTypes;
    478481    /** Actual number of labels in paLabels. */
    479482    uint32_t                    cLabels;
     
    558561DECLHIDDEN(uint8_t)         iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
    559562                                                 bool fPreferVolatile = true) RT_NOEXCEPT;
     563DECLHIDDEN(uint8_t)         iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm,
     564                                                    bool fPreferVolatile = true) RT_NOEXCEPT;
    560565DECLHIDDEN(uint8_t)         iemNativeRegAllocTmpForGuest(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
    561566                                                         IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT;
     
    565570DECLHIDDEN(void)            iemNativeRegFree(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
    566571DECLHIDDEN(void)            iemNativeRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
     572DECLHIDDEN(void)            iemNativeRegFreeTmpImm(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
    567573DECLHIDDEN(void)            iemNativeRegFreeAndFlushMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegMask) RT_NOEXCEPT;
     574DECLHIDDEN(uint32_t)        iemNativeRegFlushPendingWrites(PIEMRECOMPILERSTATE pReNative, uint32_t off) RT_NOEXCEPT;
    568575
    569576DECLHIDDEN(uint32_t)        iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     
    755762
    756763
     764/**
     765 * Emits loading a constant into a 8-bit GPR
     766 * @note The AMD64 version does *NOT* clear any bits in the 8..63 range,
     767 *       only the ARM64 version does that.
     768 */
     769DECLINLINE(uint32_t) iemNativeEmitLoadGpr8Imm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint8_t uImm8)
     770{
     771#ifdef RT_ARCH_AMD64
     772    /* mov gpr, imm8 */
     773    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
     774    AssertReturn(pbCodeBuf, UINT32_MAX);
     775    if (iGpr >= 8)
     776        pbCodeBuf[off++] = X86_OP_REX_B;
     777    else if (iGpr >= 4)
     778        pbCodeBuf[off++] = X86_OP_REX;
     779    pbCodeBuf[off++] = 0xb0 + (iGpr & 7);
     780    pbCodeBuf[off++] = RT_BYTE1(uImm8);
     781
     782#elif RT_ARCH_ARM64
     783    /* movz gpr, imm16, lsl #0 */
     784    uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     785    AssertReturn(pu32CodeBuf, UINT32_MAX);
     786    pu32CodeBuf[off++] = UINT32_C(0xd2800000) | (UINT32_C(0) << 21) | ((uint32_t)uImm8 << 5) | iGpr;
     787
     788#else
     789# error "port me"
     790#endif
     791    return off;
     792}
     793
     794
    757795#ifdef RT_ARCH_AMD64
    758796/**
     
    793831        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    794832        AssertReturn(pu32CodeBuf, UINT32_MAX);
    795         pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGrp, IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
     833        pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGpr, IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
    796834    }
    797835    else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)(_4K * cbData) && !(offVCpu & (cbData - 1)))
     
    799837        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    800838        AssertReturn(pu32CodeBuf, UINT32_MAX);
    801         pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGrp, IEMNATIVE_REG_FIXED_PCPUMCTX,
     839        pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGpr, IEMNATIVE_REG_FIXED_PCPUMCTX,
    802840                                                      (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
    803841    }
     
    12571295
    12581296/**
     1297 * Emits adding a 64-bit GPR to another, storing the result in the frist.
     1298 * @note The AMD64 version sets flags.
     1299 */
     1300DECLINLINE(uint32_t ) iemNativeEmitAddTwoGprs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend)
     1301{
     1302#if defined(RT_ARCH_AMD64)
     1303    /* add Gv,Ev */
     1304    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
     1305    AssertReturn(pbCodeBuf, UINT32_MAX);
     1306    pbCodeBuf[off++] = (iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R)
     1307                     | (iGprAddend < 8 ? 0 : X86_OP_REX_B);
     1308    pbCodeBuf[off++] = 0x04;
     1309    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprAddend & 7);
     1310
     1311#elif defined(RT_ARCH_ARM64)
     1312    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1313    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1314    pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iGprAddend);
     1315
     1316#else
     1317# error "Port me"
     1318#endif
     1319    return off;
     1320}
     1321
     1322
     1323/**
    12591324 * Emits a 64-bit GPR additions with a 8-bit signed immediate.
    12601325 */
     
    13241389    else
    13251390        pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint8_t)-iImm8, false /*f64Bit*/);
     1391
     1392#else
     1393# error "Port me"
     1394#endif
     1395    return off;
     1396}
     1397
     1398
     1399/**
     1400 * Emits a 64-bit GPR additions with a 64-bit signed addend.
     1401 */
     1402DECLINLINE(uint32_t ) iemNativeEmitAddGprImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int64_t iAddend)
     1403{
     1404#if defined(RT_ARCH_AMD64)
     1405    if (iAddend <= INT8_MAX && iAddend >= INT8_MIN)
     1406        return iemNativeEmitAddGprImm8(pReNative, off, iGprDst, (int8_t)iAddend);
     1407
     1408    if (iAddend <= INT32_MAX && iAddend >= INT32_MIN)
     1409    {
     1410        /* add grp, imm32 */
     1411        uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     1412        AssertReturn(pbCodeBuf, UINT32_MAX);
     1413        pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
     1414        pbCodeBuf[off++] = 0x81;
     1415        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
     1416        pbCodeBuf[off++] = RT_BYTE1((uint32_t)iAddend);
     1417        pbCodeBuf[off++] = RT_BYTE2((uint32_t)iAddend);
     1418        pbCodeBuf[off++] = RT_BYTE3((uint32_t)iAddend);
     1419        pbCodeBuf[off++] = RT_BYTE4((uint32_t)iAddend);
     1420    }
     1421    else
     1422    {
     1423        /* Best to use a temporary register to deal with this in the simplest way: */
     1424        uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint64_t)iAddend);
     1425        AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->aHstRegs), UINT32_MAX);
     1426
     1427        /* add dst, tmpreg  */
     1428        uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
     1429        AssertReturn(pbCodeBuf, UINT32_MAX);
     1430        pbCodeBuf[off++] = (iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R)
     1431                         | (iTmpReg < 8 ? 0 : X86_OP_REX_B);
     1432        pbCodeBuf[off++] = 0x03;
     1433        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iTmpReg & 7);
     1434
     1435        iemNativeRegFreeTmpImm(pReNative, iTmpReg);
     1436    }
     1437
     1438#elif defined(RT_ARCH_ARM64)
     1439    if ((uint64_t)RT_ABS(iAddend) < RT_BIT_32(12))
     1440    {
     1441        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1442        AssertReturn(pu32CodeBuf, UINT32_MAX);
     1443        if (iAddend >= 0)
     1444            pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint32_t)iAddend);
     1445        else
     1446            pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint32_t)-iAddend);
     1447    }
     1448    else
     1449    {
     1450        /* Use temporary register for the immediate. */
     1451        uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint64_t)iAddend);
     1452        AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->aHstRegs), UINT32_MAX);
     1453
     1454        /* add gprdst, gprdst, tmpreg */
     1455        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1456        AssertReturn(pu32CodeBuf, UINT32_MAX);
     1457        pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iTmpReg);
     1458
     1459        iemNativeRegFreeTmpImm(pReNative, iTmpReg);
     1460    }
     1461
     1462#else
     1463# error "Port me"
     1464#endif
     1465    return off;
     1466}
     1467
     1468
     1469/**
     1470 * Emits a 32-bit GPR additions with a 32-bit signed immediate.
     1471 * @note Bits 32 thru 63 in the GPR will be zero after the operation.
     1472 */
     1473DECLINLINE(uint32_t ) iemNativeEmitAddGpr32Imm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t iAddend)
     1474{
     1475#if defined(RT_ARCH_AMD64)
     1476    if (iAddend <= INT8_MAX && iAddend >= INT8_MIN)
     1477        return iemNativeEmitAddGpr32Imm8(pReNative, off, iGprDst, (int8_t)iAddend);
     1478
     1479    /* add grp, imm32 */
     1480    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
     1481    AssertReturn(pbCodeBuf, UINT32_MAX);
     1482    if (iGprDst >= 8)
     1483        pbCodeBuf[off++] = X86_OP_REX_B;
     1484    pbCodeBuf[off++] = 0x81;
     1485    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
     1486    pbCodeBuf[off++] = RT_BYTE1((uint32_t)iAddend);
     1487    pbCodeBuf[off++] = RT_BYTE2((uint32_t)iAddend);
     1488    pbCodeBuf[off++] = RT_BYTE3((uint32_t)iAddend);
     1489    pbCodeBuf[off++] = RT_BYTE4((uint32_t)iAddend);
     1490
     1491#elif defined(RT_ARCH_ARM64)
     1492    if ((uint64_t)RT_ABS(iAddend) < RT_BIT_32(12))
     1493    {
     1494        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1495        AssertReturn(pu32CodeBuf, UINT32_MAX);
     1496        if (iAddend >= 0)
     1497            pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint32_t)iAddend, false /*f64Bit*/);
     1498        else
     1499            pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint32_t)-iAddend, false /*f64Bit*/);
     1500    }
     1501    else
     1502    {
     1503        /* Use temporary register for the immediate. */
     1504        uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint32_t)iAddend);
     1505        AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->aHstRegs), UINT32_MAX);
     1506
     1507        /* add gprdst, gprdst, tmpreg */
     1508        uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1509        AssertReturn(pu32CodeBuf, UINT32_MAX);
     1510        pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iTmpReg, false /*f64Bit*/);
     1511
     1512        iemNativeRegFreeTmpImm(pReNative, iTmpReg);
     1513    }
    13261514
    13271515#else
     
    13501538    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
    13511539    AssertReturn(pu32CodeBuf, UINT32_MAX);
    1352     /* This produces 0xffff; 0x4f: N=1 imms=001111 (immr=0) => size=64 length=15 */
    1353     pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGrpDst, iGrpDst, 0x4f);
     1540# if 1
     1541    pu32CodeBuf[off++] = Armv8A64MkInstrUxth(iGprDst, iGprDst);
     1542# else
     1543    ///* This produces 0xffff; 0x4f: N=1 imms=001111 (immr=0) => size=64 length=15 */
     1544    //pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, 0x4f);
     1545# endif
    13541546#else
    13551547# error "Port me"
     
    13581550}
    13591551
     1552
     1553/**
     1554 * Emits code for (unsigned) shifting a GPR a fixed number of bits to the right.
     1555 */
     1556DECLINLINE(uint32_t ) iemNativeEmitShiftGprRight(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift)
     1557{
     1558#if defined(RT_ARCH_AMD64)
     1559    /* shr dst, cShift */
     1560    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
     1561    AssertReturn(pbCodeBuf, UINT32_MAX);
     1562    pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
     1563    pbCodeBuf[off++] = 0xc0;
     1564    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
     1565    pbCodeBuf[off++] = cShift;
     1566    Assert(cShift > 0 && cShift < 64);
     1567
     1568#elif defined(RT_ARCH_ARM64)
     1569    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1570    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1571    pu32CodeBuf[off++] = Armv8A64MkInstrLsrImm(iGprDst, iGprDst, cShift);
     1572#else
     1573# error "Port me"
     1574#endif
     1575    return off;
     1576}
     1577
     1578
     1579#ifdef RT_ARCH_ARM64
     1580/**
     1581 * Emits an ARM64 compare instruction.
     1582 */
     1583DECLINLINE(uint32_t) iemNativeEmitCmpArm64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight,
     1584                                           bool f64Bit = true, uint32_t cShift = 0,
     1585                                           ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsr)
     1586{
     1587    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1588    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1589    pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, ARMV8_A64_REG_XZR /*iRegResult*/, iGprLeft, iGprRight,
     1590                                                  f64Bit, true /*fSetFlags*/, cShift, enmShift);
     1591    return off;
     1592}
     1593#endif
     1594
     1595
     1596/**
     1597 * Emits a compare of two 64-bit GPRs, settings status flags/whatever for use
     1598 * with conditional instruction.
     1599 */
     1600DECLINLINE(uint32_t) iemNativeEmitCmpGprWithGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight)
     1601{
     1602#ifdef RT_ARCH_AMD64
     1603    /* cmp Gv, Ev */
     1604    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
     1605    AssertReturn(pbCodeBuf, UINT32_MAX);
     1606    pbCodeBuf[off++] = X86_OP_REX_W | (iGprLeft >= 8 ? X86_OP_REX_R : 0) | (iGprRight >= 8 ? X86_OP_REX_B : 0);
     1607    pbCodeBuf[off++] = 0x3b;
     1608    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprLeft & 7, iGprRight & 7);
     1609
     1610#elif defined(RT_ARCH_ARM64)
     1611    off = iemNativeEmitCmpArm64(pReNative, off, iGprLeft, iGprRight, false /*f64Bit*/);
     1612
     1613#else
     1614# error "Port me!"
     1615#endif
     1616    return off;
     1617}
     1618
     1619
     1620/**
     1621 * Emits a compare of two 32-bit GPRs, settings status flags/whatever for use
     1622 * with conditional instruction.
     1623 */
     1624DECLINLINE(uint32_t) iemNativeEmitCmpGpr32WithGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1625                                                  uint8_t iGprLeft, uint8_t iGprRight)
     1626{
     1627#ifdef RT_ARCH_AMD64
     1628    /* cmp Gv, Ev */
     1629    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
     1630    AssertReturn(pbCodeBuf, UINT32_MAX);
     1631    if (iGprLeft >= 8 || iGprRight >= 8)
     1632        pbCodeBuf[off++] = (iGprLeft >= 8 ? X86_OP_REX_R : 0) | (iGprRight >= 8 ? X86_OP_REX_B : 0);
     1633    pbCodeBuf[off++] = 0x3b;
     1634    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprLeft & 7, iGprRight & 7);
     1635
     1636#elif defined(RT_ARCH_ARM64)
     1637    off = iemNativeEmitCmpArm64(pReNative, off, iGprLeft, iGprRight, false /*f64Bit*/);
     1638
     1639#else
     1640# error "Port me!"
     1641#endif
     1642    return off;
     1643}
     1644
     1645
     1646/**
     1647 * Emits a JMP rel32 / B imm19 to the given label (ASSUMED requiring fixup).
     1648 */
     1649DECLINLINE(uint32_t) iemNativeEmitJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
     1650{
     1651#ifdef RT_ARCH_AMD64
     1652    /* jnz rel32 */
     1653    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
     1654    AssertReturn(pbCodeBuf, UINT32_MAX);
     1655    pbCodeBuf[off++] = 0xe9;
     1656    AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_Rel32, -4), UINT32_MAX);
     1657    pbCodeBuf[off++] = 0xfe;
     1658    pbCodeBuf[off++] = 0xff;
     1659    pbCodeBuf[off++] = 0xff;
     1660    pbCodeBuf[off++] = 0xff;
     1661
     1662#elif defined(RT_ARCH_ARM64)
     1663    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1664    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1665    AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5), UINT32_MAX);
     1666    pu32CodeBuf[off++] = Armv8A64MkInstrB(-1);
     1667
     1668#else
     1669# error "Port me!"
     1670#endif
     1671    return off;
     1672}
     1673
     1674
     1675/**
     1676 * Emits a JMP rel32 / B imm19 to a new undefined label.
     1677 */
     1678DECLINLINE(uint32_t) iemNativeEmitJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1679                                                IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
     1680{
     1681    uint32_t const idxLabel = iemNativeMakeLabel(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
     1682    AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);
     1683    return iemNativeEmitJmpToLabel(pReNative, off, idxLabel);
     1684}
     1685
     1686/** Condition type. */
     1687#ifdef RT_ARCH_AMD64
     1688typedef uint8_t         IEMNATIVEINSTRCOND;
     1689#elif defined(RT_ARCH_ARM64)
     1690typedef ARMV8INSTRCOND  IEMNATIVEINSTRCOND;
     1691#else
     1692# error "Port me!"
     1693#endif
     1694
     1695
     1696/**
     1697 * Emits a Jcc rel32 / B.cc imm19 to the given label (ASSUMED requiring fixup).
     1698 */
     1699DECLINLINE(uint32_t) iemNativeEmitJccToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1700                                             uint32_t idxLabel, IEMNATIVEINSTRCOND enmCond)
     1701{
     1702#ifdef RT_ARCH_AMD64
     1703    /* jcc rel32 */
     1704    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
     1705    AssertReturn(pbCodeBuf, UINT32_MAX);
     1706    pbCodeBuf[off++] = 0x0f;
     1707    pbCodeBuf[off++] = enmCond | 0x80;
     1708    AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_Rel32, -4), UINT32_MAX);
     1709    pbCodeBuf[off++] = 0x00;
     1710    pbCodeBuf[off++] = 0x00;
     1711    pbCodeBuf[off++] = 0x00;
     1712    pbCodeBuf[off++] = 0x00;
     1713
     1714#elif defined(RT_ARCH_ARM64)
     1715    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1716    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1717    AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5), UINT32_MAX);
     1718    pu32CodeBuf[off++] = Armv8A64MkInstrBCond(enmCond, -1);
     1719
     1720#else
     1721# error "Port me!"
     1722#endif
     1723    return off;
     1724}
     1725
     1726
     1727/**
     1728 * Emits a Jcc rel32 / B.cc imm19 to a new label.
     1729 */
     1730DECLINLINE(uint32_t) iemNativeEmitJccToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1731                                                IEMNATIVELABELTYPE enmLabelType, uint16_t uData, IEMNATIVEINSTRCOND enmCond)
     1732{
     1733    uint32_t const idxLabel = iemNativeMakeLabel(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
     1734    AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);
     1735    return iemNativeEmitJccToLabel(pReNative, off, idxLabel, enmCond);
     1736}
     1737
     1738
     1739/**
     1740 * Emits a JZ/JE rel32 / B.EQ imm19 to a new label.
     1741 */
     1742DECLINLINE(uint32_t) iemNativeEmitJzToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1743                                               IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
     1744{
     1745#ifdef RT_ARCH_AMD64
     1746    return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, 0x4);
     1747#elif defined(RT_ARCH_ARM64)
     1748    return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Eq);
     1749#else
     1750# error "Port me!"
     1751#endif
     1752}
     1753
     1754
     1755/**
     1756 * Emits a JNZ/JNE rel32 / B.NE imm19 to a new label.
     1757 */
     1758DECLINLINE(uint32_t) iemNativeEmitJnzToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1759                                                IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
     1760{
     1761#ifdef RT_ARCH_AMD64
     1762    return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, 0x5);
     1763#elif defined(RT_ARCH_ARM64)
     1764    return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Ne);
     1765#else
     1766# error "Port me!"
     1767#endif
     1768}
     1769
     1770
     1771/**
     1772 * Emits a JBE/JNA rel32 / B.LS imm19 to a new label.
     1773 */
     1774DECLINLINE(uint32_t) iemNativeEmitJbeToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1775                                                IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
     1776{
     1777#ifdef RT_ARCH_AMD64
     1778    return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, 0x6);
     1779#elif defined(RT_ARCH_ARM64)
     1780    return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Ls);
     1781#else
     1782# error "Port me!"
     1783#endif
     1784}
     1785
     1786
     1787/**
     1788 * Emits a JA/JNBE rel32 / B.HI imm19 to a new label.
     1789 */
     1790DECLINLINE(uint32_t) iemNativeEmitJaToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1791                                               IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
     1792{
     1793#ifdef RT_ARCH_AMD64
     1794    return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, 0x7);
     1795#elif defined(RT_ARCH_ARM64)
     1796    return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Hi);
     1797#else
     1798# error "Port me!"
     1799#endif
     1800}
     1801
     1802
     1803/**
     1804 * Emits a Jcc rel32 / B.cc imm19 with a fixed displacement.
     1805 * How @a offJmp is applied is are target specific.
     1806 */
     1807DECLINLINE(uint32_t) iemNativeEmitJccToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     1808                                             int32_t offTarget, IEMNATIVEINSTRCOND enmCond)
     1809{
     1810#ifdef RT_ARCH_AMD64
     1811    /* jcc rel32 */
     1812    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
     1813    AssertReturn(pbCodeBuf, UINT32_MAX);
     1814    if (offTarget < 128 && offTarget >= -128)
     1815    {
     1816        pbCodeBuf[off++] = 0x0f;
     1817        pbCodeBuf[off++] = enmCond | 0x70;
     1818        pbCodeBuf[off++] = RT_BYTE1((uint32_t)offTarget);
     1819    }
     1820    else
     1821    {
     1822        pbCodeBuf[off++] = 0x0f;
     1823        pbCodeBuf[off++] = enmCond | 0x80;
     1824        pbCodeBuf[off++] = RT_BYTE1((uint32_t)offTarget);
     1825        pbCodeBuf[off++] = RT_BYTE2((uint32_t)offTarget);
     1826        pbCodeBuf[off++] = RT_BYTE3((uint32_t)offTarget);
     1827        pbCodeBuf[off++] = RT_BYTE4((uint32_t)offTarget);
     1828    }
     1829
     1830#elif defined(RT_ARCH_ARM64)
     1831    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1832    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1833    pu32CodeBuf[off++] = Armv8A64MkInstrBCond(enmCond, offTarget);
     1834
     1835#else
     1836# error "Port me!"
     1837#endif
     1838    return off;
     1839}
     1840
     1841
     1842/**
     1843 * Emits a JZ/JE rel32 / B.EQ imm19 with a fixed displacement.
     1844 * How @a offJmp is applied is are target specific.
     1845 */
     1846DECLINLINE(uint32_t) iemNativeEmitJzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
     1847{
     1848#ifdef RT_ARCH_AMD64
     1849    return iemNativeEmitJccToFixed(pReNative, off, offTarget, 0x4);
     1850#elif defined(RT_ARCH_ARM64)
     1851    return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Eq);
     1852#else
     1853# error "Port me!"
     1854#endif
     1855}
     1856
     1857
     1858/**
     1859 * Emits a JNZ/JNE rel32 / B.NE imm19 with a fixed displacement.
     1860 * How @a offJmp is applied is are target specific.
     1861 */
     1862DECLINLINE(uint32_t) iemNativeEmitJnzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
     1863{
     1864#ifdef RT_ARCH_AMD64
     1865    return iemNativeEmitJccToFixed(pReNative, off, offTarget, 0x5);
     1866#elif defined(RT_ARCH_ARM64)
     1867    return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Ne);
     1868#else
     1869# error "Port me!"
     1870#endif
     1871}
     1872
     1873
     1874/**
     1875 * Emits a JBE/JNA rel32 / B.LS imm19 with a fixed displacement.
     1876 * How @a offJmp is applied is are target specific.
     1877 */
     1878DECLINLINE(uint32_t) iemNativeEmitJbeToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
     1879{
     1880#ifdef RT_ARCH_AMD64
     1881    return iemNativeEmitJccToFixed(pReNative, off, offTarget, 0x6);
     1882#elif defined(RT_ARCH_ARM64)
     1883    return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Ls);
     1884#else
     1885# error "Port me!"
     1886#endif
     1887}
     1888
     1889
     1890/**
     1891 * Emits a JA/JNBE rel32 / B.EQ imm19 with a fixed displacement.
     1892 * How @a offJmp is applied is are target specific.
     1893 */
     1894DECLINLINE(uint32_t) iemNativeEmitJaToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
     1895{
     1896#ifdef RT_ARCH_AMD64
     1897    return iemNativeEmitJccToFixed(pReNative, off, offTarget, 0x7);
     1898#elif defined(RT_ARCH_ARM64)
     1899    return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Hi);
     1900#else
     1901# error "Port me!"
     1902#endif
     1903}
     1904
     1905
     1906/**
     1907 * Fixes up a conditional jump to a fixed label.
     1908 * @see  iemNativeEmitJnzToFixed, iemNativeEmitJzToFixed, ...
     1909 */
     1910DECLINLINE(void) iemNativeFixupFixedJump(PIEMRECOMPILERSTATE pReNative, uint32_t offFixup, uint32_t offTarget)
     1911{
     1912# if defined(RT_ARCH_AMD64)
     1913    uint8_t * const pbCodeBuf = pReNative->pInstrBuf;
     1914    if (pbCodeBuf[offFixup] != 0x0f)
     1915    {
     1916        Assert((uint8_t)(pbCodeBuf[offFixup] - 0x70) <= 0x10);
     1917        pbCodeBuf[offFixup + 1] = (uint8_t)(offTarget - (offFixup + 2));
     1918        Assert(pbCodeBuf[offFixup + 1] == offTarget - (offFixup + 2));
     1919    }
     1920    else
     1921    {
     1922        Assert((uint8_t)(pbCodeBuf[offFixup + 1] - 0x80) <= 0x10);
     1923        uint32_t const offRel32 = offTarget - (offFixup + 6);
     1924        pbCodeBuf[offFixup + 2] = RT_BYTE1(offRel32);
     1925        pbCodeBuf[offFixup + 3] = RT_BYTE2(offRel32);
     1926        pbCodeBuf[offFixup + 4] = RT_BYTE3(offRel32);
     1927        pbCodeBuf[offFixup + 5] = RT_BYTE4(offRel32);
     1928    }
     1929
     1930# elif defined(RT_ARCH_ARM64)
     1931    uint32_t * const pu32CodeBuf = pReNative->pInstrBuf;
     1932    Assert(RT_ABS((int32_t)(offTarget - offFixup)) < RT_BIT_32(18)); /* off by one for negative jumps, but not relevant here */
     1933    pu32CodeBuf[offFixup] = (pu32CodeBuf[offFixup] & ~((RT_BIT_32(19) - 1U) << 5))
     1934                          | (((offTarget - offFixup) & (RT_BIT_32(19) - 1U)) << 5);
     1935
     1936# endif
     1937}
     1938
     1939
     1940
    13601941/** @} */
    13611942
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette