VirtualBox

Changeset 101585 in vbox


Ignore:
Timestamp:
Oct 25, 2023 10:09:38 AM (18 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
159666
Message:

VMM/IEM: Native IEM_MC_SUB_GREG_U16, IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET and IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET translation. Covers 16-bit loop instructions. bugref:10371

Location:
trunk
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/iprt/armv8.h

    r101544 r101585  
    27292729{
    27302730    Assert(cImm6Ror < (f64Bit ? UINT32_C(0x3f) : UINT32_C(0x1f))); Assert(iRegResult < 32); Assert(u2Opc < 4);
    2731     Assert(uImm6S < (f64Bit ? UINT32_C(0x3f) : UINT32_C(0x1f))); Assert(iRegSrc    < 32); Assert(uN1 <= (unsigned)f64Bit);
     2731    Assert(uImm6S   < (f64Bit ? UINT32_C(0x3f) : UINT32_C(0x1f))); Assert(iRegSrc    < 32); Assert(uN1 <= (unsigned)f64Bit);
    27322732    return ((uint32_t)f64Bit   << 31)
    27332733         | (u2Opc              << 29)
     
    27412741
    27422742
    2743 /** A64: Encodes a SBFM instruction immediates.
     2743/** A64: Encodes a SBFM instruction.
    27442744 * @see Armv8A64MkInstrBitfieldImm for parameter details.  */
    2745 DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrSbfmImm(uint32_t iRegResult, uint32_t iRegSrc, uint32_t cImm6Ror, uint32_t uImm6S,
    2746                                                    bool f64Bit = true, uint32_t uN1 = UINT32_MAX)
     2745DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrSbfm(uint32_t iRegResult, uint32_t iRegSrc, uint32_t cImm6Ror, uint32_t uImm6S,
     2746                                                bool f64Bit = true, uint32_t uN1 = UINT32_MAX)
    27472747{
    27482748    return Armv8A64MkInstrBitfieldImm(0, iRegResult, iRegSrc, cImm6Ror, uImm6S, f64Bit, uN1 == UINT32_MAX ? f64Bit : uN1);
     
    27502750
    27512751
    2752 /** A64: Encodes a BFM instruction immediates.
     2752/** A64: Encodes a BFM instruction.
    27532753 * @see Armv8A64MkInstrBitfieldImm for parameter details.  */
    2754 DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBfmImm(uint32_t iRegResult, uint32_t iRegSrc, uint32_t cImm6Ror, uint32_t uImm6S,
     2754DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBfm(uint32_t iRegResult, uint32_t iRegSrc, uint32_t cImm6Ror, uint32_t uImm6S,
    27552755                                                  bool f64Bit = true, uint32_t uN1 = UINT32_MAX)
    27562756{
     
    27592759
    27602760
    2761 /** A64: Encodes an UBFM instruction immediates.
     2761/** A64: Encodes a BFI instruction.
    27622762 * @see Armv8A64MkInstrBitfieldImm for parameter details.  */
    2763 DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrUbfmImm(uint32_t iRegResult, uint32_t iRegSrc, uint32_t cImm6Ror, uint32_t uImm6S,
    2764                                                    bool f64Bit = true, uint32_t uN1 = UINT32_MAX)
     2763DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrBfi(uint32_t iRegResult, uint32_t iRegSrc,
     2764                                               uint32_t offFirstBit, uint32_t cBitsWidth, bool f64Bit = true)
     2765{
     2766    Assert(cBitsWidth > 0U); Assert(cBitsWidth < (f64Bit ? 64U : 32U)); Assert(offFirstBit < (f64Bit ? 64U : 32U));
     2767    return Armv8A64MkInstrBfm(iRegResult, iRegSrc, (uint32_t)-(int32_t)cBitsWidth & (f64bit ? 0x3f : 0x1f),
     2768                              cBitsWidth - 1, f64Bit);
     2769}
     2770
     2771
     2772/** A64: Encodes an UBFM instruction.
     2773 * @see Armv8A64MkInstrBitfieldImm for parameter details.  */
     2774DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrUbfm(uint32_t iRegResult, uint32_t iRegSrc, uint32_t cImm6Ror, uint32_t uImm6S,
     2775                                                bool f64Bit = true, uint32_t uN1 = UINT32_MAX)
    27652776{
    27662777    return Armv8A64MkInstrBitfieldImm(2, iRegResult, iRegSrc, cImm6Ror, uImm6S, f64Bit, uN1 == UINT32_MAX ? f64Bit : uN1);
     
    28832894
    28842895
     2896/** Extension option for 'extended register' instructions. */
     2897typedef enum ARMV8A64INSTREXTEND
     2898{
     2899    kArmv8A64InstrExtend_UxtB = 0,
     2900    kArmv8A64InstrExtend_UxtH,
     2901    kArmv8A64InstrExtend_UxtW,
     2902    kArmv8A64InstrExtend_UxtX,
     2903    kArmv8A64InstrExtend_SxtB,
     2904    kArmv8A64InstrExtend_SxtH,
     2905    kArmv8A64InstrExtend_SxtW,
     2906    kArmv8A64InstrExtend_SxtX,
     2907    /** The default is either UXTW or UXTX depending on whether the instruction
     2908     *  is in 32-bit or 64-bit mode.  Thus, this needs to be resolved according
     2909     *  to the f64Bit value. */
     2910    kArmv8A64InstrExtend_Default
     2911} ARMV8A64INSTREXTEND;
     2912
     2913
     2914/**
     2915 * A64: Encodes either add, adds, sub or subs with extended register encoding.
     2916 *
     2917 * @returns The encoded instruction.
     2918 * @param   fSub                    true for sub and subs, false for add and
     2919 *                                  adds.
     2920 * @param   iRegResult              The register to store the result in.
     2921 *                                  SP is NOT valid, but ZR is.
     2922 * @param   iRegSrc1                The register containing the augend (@a fSub
     2923 *                                  = false) or minuend (@a fSub = true).
     2924 *                                  SP is valid, but ZR is NOT.
     2925 * @param   iRegSrc2                The register containing the addened (@a fSub
     2926 *                                  = false) or subtrahend (@a fSub = true).
     2927 *                                  SP is NOT valid, but ZR is.
     2928 * @param   f64Bit                  true for 64-bit GRPs (default), false for
     2929 *                                  32-bit GPRs.
     2930 * @param   fSetFlags               Whether to set flags (adds / subs) or not
     2931 *                                  (add / sub - default).
     2932 * @param   enmExtend               The type of extension to apply to @a
     2933 *                                  iRegSrc2.
     2934 * @param   cShift                  The left shift count to apply to @a iRegSrc2
     2935 *                                  after enmExtend processing is done.
     2936 *                                  Max shift is 4 for some reason.
     2937 */
     2938DECL_FORCE_INLINE(uint32_t) Armv8A64MkInstrAddSubRegExtend(bool fSub, uint32_t iRegResult, uint32_t iRegSrc1, uint32_t iRegSrc2,
     2939                                                           bool f64Bit = true, bool fSetFlags = false,
     2940                                                           ARMV8A64INSTREXTEND enmExtend = kArmv8A64InstrExtend_Default,
     2941                                                           uint32_t cShift = 0)
     2942{
     2943    if (enmExtend == kArmv8A64InstrExtend_Default)
     2944        enmExtend = f64Bit ? kArmv8A64InstrExtend_UxtW : kArmv8A64InstrExtend_UxtX;
     2945    Assert(iRegResult < 32); Assert(iRegSrc1 < 32); Assert(iRegSrc2 < 32); Assert(cShift <= 4);
     2946
     2947    return ((uint32_t)f64Bit       << 31)
     2948         | ((uint32_t)fSub         << 30)
     2949         | ((uint32_t)fSetFlags    << 29)
     2950         | UINT32_C(0x0b200000)
     2951         | (iRegSrc2               << 16)
     2952         | ((uint32_t)enmExtend    << 13)
     2953         | (cShift                 << 10)
     2954         | (iRegSrc1               <<  5)
     2955         | iRegResult;
     2956}
     2957
     2958
    28852959/**
    28862960 * A64: Encodes a B (unconditional branch w/ imm) instruction.
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py

    r101584 r101585  
    28812881    'IEM_MC_FPU_TO_MMX_MODE':                                    (McBlock.parseMcGeneric,           True,  False, ),
    28822882    'IEM_MC_IF_CX_IS_NZ':                                        (McBlock.parseMcGenericCond,       True,  True,  ),
    2883     'IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET':                    (McBlock.parseMcGenericCond,       True,  False, ),
    2884     'IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET':                        (McBlock.parseMcGenericCond,       True,  False, ),
     2883    'IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET':                    (McBlock.parseMcGenericCond,       True,  True, ),
     2884    'IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET':                        (McBlock.parseMcGenericCond,       True,  True, ),
    28852885    'IEM_MC_IF_ECX_IS_NZ':                                       (McBlock.parseMcGenericCond,       True,  True,  ),
    28862886    'IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET':                   (McBlock.parseMcGenericCond,       True,  False, ),
     
    30753075    'IEM_MC_STORE_YREG_U32_ZX_VLMAX':                            (McBlock.parseMcGeneric,           True,  False, ),
    30763076    'IEM_MC_STORE_YREG_U64_ZX_VLMAX':                            (McBlock.parseMcGeneric,           True,  False, ),
    3077     'IEM_MC_SUB_GREG_U16':                                       (McBlock.parseMcGeneric,           True,  False, ),
     3077    'IEM_MC_SUB_GREG_U16':                                       (McBlock.parseMcGeneric,           True,  True, ),
    30783078    'IEM_MC_SUB_GREG_U32':                                       (McBlock.parseMcGeneric,           True,  False, ),
    30793079    'IEM_MC_SUB_GREG_U64':                                       (McBlock.parseMcGeneric,           True,  False, ),
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r101584 r101585  
    46064606}
    46074607
     4608#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
     4609    off = iemNativeEmitIfCxIsNotZeroAndTestEflagsBit(pReNative, off, a_fBit, true /*fCheckIfSet*/); \
     4610    AssertReturn(off != UINT32_MAX, UINT32_MAX); \
     4611    do {
     4612
     4613#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
     4614    off = iemNativeEmitIfCxIsNotZeroAndTestEflagsBit(pReNative, off, a_fBit, false /*fCheckIfSet*/); \
     4615    AssertReturn(off != UINT32_MAX, UINT32_MAX); \
     4616    do {
     4617
     4618/** Emits code for IEM_MC_IF_CX_IS_NZ. */
     4619DECLINLINE(uint32_t) iemNativeEmitIfCxIsNotZeroAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     4620                                                                 uint32_t fBitInEfl, bool fCheckIfSet)
     4621{
     4622    PIEMNATIVECOND pEntry = iemNativeCondPushIf(pReNative);
     4623    AssertReturn(pEntry, UINT32_MAX);
     4624
     4625    /* We have to load both RCX and EFLAGS before we can start branching,
     4626       otherwise we'll end up in the else-block with an inconsistent
     4627       register allocator state.
     4628       Doing EFLAGS first as it's more likely to be loaded, right? */
     4629    uint8_t const idxEflReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_EFlags,
     4630                                                              kIemNativeGstRegUse_ReadOnly);
     4631    AssertReturn(idxEflReg != UINT8_MAX, UINT32_MAX);
     4632
     4633    uint8_t const idxGstRcxReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off,
     4634                                                                 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + X86_GREG_xCX),
     4635                                                                 kIemNativeGstRegUse_ReadOnly);
     4636    AssertReturn(idxGstRcxReg != UINT8_MAX, UINT32_MAX);
     4637
     4638    /** @todo we could reduce this to a single branch instruction by spending a
     4639     *        temporary register and some setnz stuff.  Not sure if loops are
     4640     *        worth it. */
     4641    /* Check CX. */
     4642    off = iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfNoneSet(pReNative, off, idxGstRcxReg, UINT16_MAX, pEntry->idxLabelElse);
     4643
     4644    /* Check the EFlags bit. */
     4645    unsigned const iBitNo = ASMBitFirstSetU32(fBitInEfl) - 1;
     4646    Assert(RT_BIT_32(iBitNo) == fBitInEfl);
     4647    off = iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, idxEflReg, iBitNo, pEntry->idxLabelElse,
     4648                                                     !fCheckIfSet /*fJmpIfSet*/);
     4649
     4650    iemNativeRegFreeTmp(pReNative, idxGstRcxReg);
     4651    iemNativeRegFreeTmp(pReNative, idxEflReg);
     4652
     4653    iemNativeCondStartIfBlock(pReNative, off);
     4654    return off;
     4655}
     4656
     4657
     4658
     4659/*
     4660 * General purpose register manipulation (add, sub).
     4661 */
     4662
     4663#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) \
     4664    off = iemNativeEmitSubGregU16(pReNative, off, a_iGReg, a_u16Value); \
     4665    AssertReturn(off != UINT32_MAX, UINT32_MAX)
     4666
     4667/** Emits code for IEM_MC_SUB_GREG_U16. */
     4668DECLINLINE(uint32_t) iemNativeEmitSubGregU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint16_t uSubtrahend)
     4669{
     4670    uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off,
     4671                                                                 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + iGReg),
     4672                                                                  kIemNativeGstRegUse_ForUpdate);
     4673    AssertReturn(idxGstTmpReg != UINT8_MAX, UINT32_MAX);
     4674
     4675#ifdef RT_ARCH_AMD64
     4676    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
     4677    AssertReturn(pbCodeBuf, UINT32_MAX);
     4678    pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
     4679    if (idxGstTmpReg >= 8)
     4680        pbCodeBuf[off++] = X86_OP_REX_B;
     4681    pbCodeBuf[off++] = 0x81;
     4682    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, idxGstTmpReg & 7);
     4683    pbCodeBuf[off++] = RT_BYTE1(uSubtrahend);
     4684    pbCodeBuf[off++] = RT_BYTE2(uSubtrahend);
     4685
     4686#else
     4687    uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uSubtrahend);
     4688    AssertReturn(idxTmpReg != UINT8_MAX, UINT32_MAX);
     4689
     4690    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
     4691    AssertReturn(pu32CodeBuf, UINT32_MAX);
     4692
     4693    /* sub w2, w1, w2, uxth - kind of performs a 16-bit subtract. */
     4694    /** @todo could also use sub #imm12 variant here if uSubtrahend is in range,
     4695     *        avoiding an const mov instruction.  We don't really need the UxtH
     4696     *        bit either, since the register value is zero extended */
     4697    pu32CodeBuf[off++] = Armv8A64MkInstrAddSubRegExtend(true /*fSub*/, idxTmpReg, idxGstTmpReg, idxTmpReg2, false /*f64Bit*/,
     4698                                                        false /*fSetFlags*/, kArmv8A64InstrExtend_UxtH);
     4699
     4700    /* bfi w1, w2, 0, 16 - moves bits 15:0 from tmpreg2 to tmpreg. */
     4701    pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxTmpReg, 0, 16);
     4702
     4703    iemNativeRegFreeTmp(pReNative, idxTmpReg);
     4704#endif
     4705
     4706    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     4707
     4708    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg]));
     4709
     4710    iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
     4711    return off;
     4712}
     4713
     4714
    46084715
    46094716
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette