VirtualBox

Changeset 102624 in vbox for trunk/src


Ignore:
Timestamp:
Dec 16, 2023 3:15:54 AM (17 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
160779
Message:

VMM/IEM: BODY_CONSIDER_CS_LIM_CHECKING. bugref:10371

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r102623 r102624  
    15961596
    15971597/**
    1598  * Used by TB code when it wants to raise a \#GP(0).
     1598 * Used by TB code when detecting opcode changes.
    15991599 * @see iemThreadeFuncWorkerObsoleteTb
    16001600 */
     
    16061606       that return path codes via the native code generated for the TB. */
    16071607    iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
     1608    return VINF_IEM_REEXEC_BREAK;
     1609}
     1610
     1611
     1612/**
     1613 * Used by TB code when we need to switch to a TB with CS.LIM checking.
     1614 */
     1615IEM_DECL_NATIVE_HLP_DEF(int, iemNativeHlpNeedCsLimChecking,(PVMCPUCC pVCpu))
     1616{
     1617    Log7(("TB need CS.LIM: %p at %04x:%08RX64; offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n",
     1618          pVCpu->iem.s.pCurTbR3, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip,
     1619          (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.rip,
     1620          pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base));
     1621    STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking);
    16081622    return VINF_IEM_REEXEC_BREAK;
    16091623}
     
    24352449    pReNative->Core.u64ArgVars             = UINT64_MAX;
    24362450
    2437     AssertCompile(RT_ELEMENTS(pReNative->aidxUniqueLabels) == 7);
     2451    AssertCompile(RT_ELEMENTS(pReNative->aidxUniqueLabels) == 8);
    24382452    pReNative->aidxUniqueLabels[0]         = UINT32_MAX;
    24392453    pReNative->aidxUniqueLabels[1]         = UINT32_MAX;
     
    24432457    pReNative->aidxUniqueLabels[5]         = UINT32_MAX;
    24442458    pReNative->aidxUniqueLabels[6]         = UINT32_MAX;
     2459    pReNative->aidxUniqueLabels[7]         = UINT32_MAX;
    24452460
    24462461    /* Full host register reinit: */
     
    48304845    off = iemNativeEmitCheckCallRetAndPassUp(pReNative, off, pCallEntry->idxInstr);
    48314846
     4847    return off;
     4848}
     4849
     4850
     4851/**
     4852 * Emits the code at the NeedCsLimChecking label.
     4853 */
     4854static uint32_t iemNativeEmitNeedCsLimChecking(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel)
     4855{
     4856    uint32_t const idxLabel = iemNativeLabelFind(pReNative, kIemNativeLabelType_NeedCsLimChecking);
     4857    if (idxLabel != UINT32_MAX)
     4858    {
     4859        iemNativeLabelDefine(pReNative, idxLabel, off);
     4860
     4861        /* int iemNativeHlpNeedCsLimChecking(PVMCPUCC pVCpu) */
     4862        off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
     4863        off = iemNativeEmitCallImm(pReNative, off, (uintptr_t)iemNativeHlpNeedCsLimChecking);
     4864
     4865        /* jump back to the return sequence. */
     4866        off = iemNativeEmitJmpToLabel(pReNative, off, idxReturnLabel);
     4867    }
    48324868    return off;
    48334869}
     
    1124411280
    1124511281/**
     11282 * Macro that considers whether we need CS.LIM checking after a branch or
     11283 * crossing over to a new page.
     11284 */
     11285#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) \
     11286    RT_NOREF(cbInstr); \
     11287    off = iemNativeEmitBltInConsiderLimChecking(pReNative, off)
     11288
     11289DECL_FORCE_INLINE(uint32_t)
     11290iemNativeEmitBltInConsiderLimChecking(PIEMRECOMPILERSTATE pReNative, uint32_t off)
     11291{
     11292    /*
     11293     * This check must match the ones in the iem in iemGetTbFlagsForCurrentPc
     11294     * exactly:
     11295     *
     11296     *  int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip;
     11297     *  if (offFromLim >= X86_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK))
     11298     *      return fRet;
     11299     *  return fRet | IEMTB_F_CS_LIM_CHECKS;
     11300     *
     11301     *
     11302     * We need EIP, CS.LIM and CS.BASE here.
     11303     */
     11304
     11305    /* Calculate the offFromLim first: */
     11306    uint8_t const  idxRegPc     = iemNativeRegAllocTmpForGuestReg(pReNative, &off, kIemNativeGstReg_Pc,
     11307                                                                  kIemNativeGstRegUse_ReadOnly);
     11308    uint8_t const  idxRegCsLim  = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_LIMIT(X86_SREG_CS),
     11309                                                                  kIemNativeGstRegUse_ReadOnly);
     11310    uint8_t const  idxRegLeft   = iemNativeRegAllocTmp(pReNative, &off);
     11311
     11312#ifdef RT_ARCH_ARM64
     11313    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     11314    pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegLeft, idxRegCsLim, idxRegPc);
     11315    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     11316#else
     11317    off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegLeft, idxRegCsLim);
     11318    off = iemNativeEmitSubTwoGprs(pReNative, off, idxRegLeft, idxRegPc);
     11319#endif
     11320
     11321    iemNativeRegFreeTmp(pReNative, idxRegCsLim);
     11322    iemNativeRegFreeTmp(pReNative, idxRegPc);
     11323
     11324    /* Calculate the threshold level (right side). */
     11325    uint8_t const  idxRegCsBase = iemNativeRegAllocTmpForGuestReg(pReNative, &off, IEMNATIVEGSTREG_SEG_BASE(X86_SREG_CS),
     11326                                                                  kIemNativeGstRegUse_ReadOnly);
     11327    uint8_t const  idxRegRight  = iemNativeRegAllocTmp(pReNative, &off);
     11328
     11329#ifdef RT_ARCH_ARM64
     11330    pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
     11331    Assert(Armv8A64ConvertImmRImmS2Mask32(11, 0) == GUEST_PAGE_OFFSET_MASK);
     11332    pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(idxRegRight, idxRegCsBase, 11, 0, false /*f64Bit*/);
     11333    pu32CodeBuf[off++] = Armv8A64MkInstrNeg(idxRegRight);
     11334    pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegRight, idxRegRight, (X86_PAGE_SIZE + 16) / 2);
     11335    pu32CodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxRegRight, idxRegRight, (X86_PAGE_SIZE + 16) / 2);
     11336    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     11337
     11338#else
     11339    off = iemNativeEmitLoadGprImm32(pReNative, off, idxRegRight, GUEST_PAGE_OFFSET_MASK);
     11340    off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, idxRegRight, idxRegCsBase);
     11341    off = iemNativeEmitNegGpr(pReNative, off, idxRegRight);
     11342    off = iemNativeEmitAddGprImm(pReNative, off, idxRegRight, X86_PAGE_SIZE + 16);
     11343#endif
     11344
     11345    iemNativeRegFreeTmp(pReNative, idxRegCsBase);
     11346
     11347    /* Compare the two and jump out if we're too close to the limit. */
     11348    off = iemNativeEmitCmpGprWithGpr(pReNative, off, idxRegLeft, idxRegRight);
     11349    off = iemNativeEmitJlToNewLabel(pReNative, off, kIemNativeLabelType_NeedCsLimChecking);
     11350
     11351    iemNativeRegFreeTmp(pReNative, idxRegRight);
     11352    iemNativeRegFreeTmp(pReNative, idxRegLeft);
     11353    return off;
     11354}
     11355
     11356
     11357
     11358/**
    1124611359 * Macro that implements opcode (re-)checking.
    1124711360 */
    1124811361#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) \
     11362    RT_NOREF(cbInstr); \
    1124911363    off = iemNativeEmitBltInCheckOpcodes(pReNative, off, (a_pTb), (a_idxRange), (a_offRange))
    1125011364
     
    1177511889{
    1177611890    PCIEMTB const  pTb      = pReNative->pTbOrg;
    11777     //uint32_t const cbInstr  = (uint32_t)pCallEntry->auParams[0];
     11891    uint32_t const cbInstr  = (uint32_t)pCallEntry->auParams[0];
    1177811892    uint32_t const idxRange = (uint32_t)pCallEntry->auParams[1];
    1177911893    uint32_t const offRange = (uint32_t)pCallEntry->auParams[2];
     
    1252312637                                case kIemNativeLabelType_ObsoleteTb:
    1252412638                                    pszName = "ObsoleteTb";
     12639                                    break;
     12640                                case kIemNativeLabelType_NeedCsLimChecking:
     12641                                    pszName = "NeedCsLimChecking";
    1252512642                                    break;
    1252612643                                case kIemNativeLabelType_If:
     
    1292413041        if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_ObsoleteTb))
    1292513042            off = iemNativeEmitObsoleteTb(pReNative, off, idxReturnLabel);
     13043        if (pReNative->bmLabelTypes & RT_BIT_64(kIemNativeLabelType_NeedCsLimChecking))
     13044            off = iemNativeEmitNeedCsLimChecking(pReNative, off, idxReturnLabel);
    1292613045    }
    1292713046    IEMNATIVE_CATCH_LONGJMP_BEGIN(pReNative, rc);
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp

    r101640 r102624  
    200200                  pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
    201201            return iemRaiseGeneralProtectionFault0(pVCpu); \
     202        } \
     203    } while(0)
     204
     205/**
     206 * Macro that considers whether we need CS.LIM checking after a branch or
     207 * crossing over to a new page.
     208 */
     209#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
     210        int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
     211        if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
     212        { /* likely */ } \
     213        else \
     214        { \
     215            Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
     216                  (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
     217                  pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
     218            RT_NOREF(a_pTb, a_cbInstr); \
     219            STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
     220            return VINF_IEM_REEXEC_BREAK; \
    202221        } \
    203222    } while(0)
     
    351370    } while(0)
    352371
    353 /**
    354  * Macro that considers whether we need CS.LIM checking after a branch or
    355  * crossing over to a new page.
    356  *
    357  * This may long jump if we're raising a \#PF, \#GP or similar trouble.
    358  */
    359 #define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
    360         int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
    361         if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
    362         { /* likely */ } \
    363         else \
    364         { \
    365             Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
    366                   (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
    367                   pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
    368             RT_NOREF(a_pTb, a_cbInstr); \
    369             STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
    370             return VINF_IEM_REEXEC_BREAK; \
    371         } \
    372     } while(0)
    373 
    374372
    375373
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py

    r102623 r102624  
    19681968        ( 'CheckCsLimAndOpcodes',                               3, True  ),
    19691969        ( 'CheckOpcodes',                                       3, True  ),
    1970         ( 'CheckOpcodesConsiderCsLim',                          3, False ),
     1970        ( 'CheckOpcodesConsiderCsLim',                          3, True ),
    19711971
    19721972        ( 'CheckCsLimAndPcAndOpcodes',                          3, False ),
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r102603 r102624  
    311311    kIemNativeLabelType_RaiseGp0,
    312312    kIemNativeLabelType_ObsoleteTb,
     313    kIemNativeLabelType_NeedCsLimChecking,
    313314    /* Labels with data, potentially multiple instances per TB: */
    314315    kIemNativeLabelType_FirstWithMultipleInstances,
  • trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h

    r102623 r102624  
    16691669*********************************************************************************************************************************/
    16701670
     1671/**
     1672 * Emits subtracting a 64-bit GPR from another, storing the result in the first.
     1673 * @note The AMD64 version sets flags.
     1674 */
     1675DECL_INLINE_THROW(uint32_t)
     1676iemNativeEmitSubTwoGprs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSubtrahend)
     1677{
     1678#if defined(RT_ARCH_AMD64)
     1679    /* sub Gv,Ev */
     1680    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
     1681    pbCodeBuf[off++] = (iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R)
     1682                     | (iGprSubtrahend < 8 ? 0 : X86_OP_REX_B);
     1683    pbCodeBuf[off++] = 0x2b;
     1684    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSubtrahend & 7);
     1685
     1686#elif defined(RT_ARCH_ARM64)
     1687    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1688    pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(iGprDst, iGprDst, iGprSubtrahend);
     1689
     1690#else
     1691# error "Port me"
     1692#endif
     1693    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1694    return off;
     1695}
     1696
    16711697
    16721698#ifdef RT_ARCH_AMD64
     
    17051731
    17061732/**
    1707  * Emits adding a 64-bit GPR to another, storing the result in the frist.
     1733 * Emits adding a 64-bit GPR to another, storing the result in the first.
    17081734 * @note The AMD64 version sets flags.
    17091735 */
     
    19151941        iemNativeRegFreeTmpImm(pReNative, iTmpReg);
    19161942    }
     1943
     1944#else
     1945# error "Port me"
     1946#endif
     1947    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     1948    return off;
     1949}
     1950
     1951
     1952/*********************************************************************************************************************************
     1953*   Unary Operations                                                                                                             *
     1954*********************************************************************************************************************************/
     1955
     1956/**
     1957 * Emits code for clearing bits 16 thru 63 in the GPR.
     1958 */
     1959DECL_INLINE_THROW(uint32_t)
     1960iemNativeEmitNegGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst)
     1961{
     1962#if defined(RT_ARCH_AMD64)
     1963    /* neg Ev */
     1964    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
     1965    pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
     1966    pbCodeBuf[off++] = 0xf7;
     1967    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 3, iGprDst & 7);
     1968
     1969#elif defined(RT_ARCH_ARM64)
     1970    /* sub dst, xzr, dst */
     1971    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1972    pu32CodeBuf[off++] = Armv8A64MkInstrNeg(iGprDst);
    19171973
    19181974#else
     
    28082864#elif defined(RT_ARCH_ARM64)
    28092865    return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Hi);
     2866#else
     2867# error "Port me!"
     2868#endif
     2869}
     2870
     2871
     2872/**
     2873 * Emits a JL/JNGE rel32 / B.LT imm19 to the given label.
     2874 */
     2875DECL_INLINE_THROW(uint32_t) iemNativeEmitJlToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
     2876{
     2877#ifdef RT_ARCH_AMD64
     2878    return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kIemNativeInstrCond_l);
     2879#elif defined(RT_ARCH_ARM64)
     2880    return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kArmv8InstrCond_Lt);
     2881#else
     2882# error "Port me!"
     2883#endif
     2884}
     2885
     2886/**
     2887 * Emits a JA/JNGE rel32 / B.HI imm19 to a new label.
     2888 */
     2889DECL_INLINE_THROW(uint32_t) iemNativeEmitJlToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
     2890                                                      IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
     2891{
     2892#ifdef RT_ARCH_AMD64
     2893    return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kIemNativeInstrCond_l);
     2894#elif defined(RT_ARCH_ARM64)
     2895    return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Lt);
    28102896#else
    28112897# error "Port me!"
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette