VirtualBox

Changeset 102065 in vbox for trunk/src


Ignore:
Timestamp:
Nov 10, 2023 4:14:51 PM (18 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
160174
Message:

VMM/IEM: Native translation of IEM_MC_STORE_GREG_U16. Fixed a bunch of variable, register & stack allocator issues. bugref:10371

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py

    r102012 r102065  
    30613061    'IEM_MC_STORE_FPUREG_R80_SRC_REF':                           (McBlock.parseMcGeneric,           True,  False, ),
    30623062    'IEM_MC_STORE_GREG_I64':                                     (McBlock.parseMcGeneric,           True,  False, ),
    3063     'IEM_MC_STORE_GREG_U16':                                     (McBlock.parseMcGeneric,           True,  False, ),
     3063    'IEM_MC_STORE_GREG_U16':                                     (McBlock.parseMcGeneric,           True,  True, ),
    30643064    'IEM_MC_STORE_GREG_U16_CONST':                               (McBlock.parseMcGeneric,           True,  False, ),
    30653065    'IEM_MC_STORE_GREG_U32':                                     (McBlock.parseMcGeneric,           True,  False, ),
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r102022 r102065  
    140140static uint32_t iemNativeEmitGuestRegValueCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off,
    141141                                                uint8_t idxReg, IEMNATIVEGSTREG enmGstReg);
     142static void iemNativeRegAssertSanity(PIEMRECOMPILERSTATE pReNative);
    142143#endif
    143144#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
     
    145146static void iemNativeDbgInfoAddLabel(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType, uint16_t uData);
    146147#endif
     148DECL_FORCE_INLINE(void) iemNativeRegClearGstRegShadowing(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, uint32_t off);
    147149
    148150
     
    22592261{
    22602262    Assert(!(fRegMask & ~IEMNATIVE_HST_GREG_MASK));
    2261     Assert(!(fRegMask & ~IEMNATIVE_REG_FIXED_MASK));
     2263    Assert(!(fRegMask & IEMNATIVE_REG_FIXED_MASK));
    22622264
    22632265    /*
     
    23112313                if (pReNative->Core.aVars[idxVar].enmKind == kIemNativeVarKind_Stack)
    23122314                {
    2313                     AssertStmt(pReNative->Core.aVars[idxVar].idxStackSlot != UINT8_MAX,
     2315                    AssertStmt(pReNative->Core.aVars[idxVar].idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS,
    23142316                               IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_8));
    2315                     *poff = iemNativeEmitStoreGprByBp(pReNative, *poff,
    2316                                                         pReNative->Core.aVars[idxVar].idxStackSlot * sizeof(uint64_t)
    2317                                                       - IEMNATIVE_FP_OFF_STACK_VARS,
    2318                                                       idxReg);
     2317                    *poff = iemNativeEmitStoreGprByBp(pReNative, *poff, iemNativeVarCalcBpDisp(pReNative, idxVar), idxReg);
    23192318                }
    23202319
     
    23792378            fRegs &= ~pReNative->Core.bmHstRegsWithGstShadow;
    23802379        unsigned const idxRegNew = ASMBitFirstSetU32(fRegs) - 1;
     2380        iemNativeRegClearGstRegShadowing(pReNative, idxRegNew, off);
    23812381
    23822382        uint64_t fGstRegShadows = pReNative->Core.aHstRegs[idxRegOld].fGstRegShadows;
     2383        Log12(("iemNativeRegMoveOrSpillStackVar: moving idxVar=%d from %s to %s (fGstRegShadows=%RX64)\n",
     2384               idxVar,  g_apszIemNativeHstRegNames[idxRegOld], g_apszIemNativeHstRegNames[idxRegNew], fGstRegShadows));
    23832385        pReNative->Core.aHstRegs[idxRegNew].fGstRegShadows = fGstRegShadows;
    23842386        pReNative->Core.aHstRegs[idxRegNew].enmWhat        = kIemNativeWhat_Var;
     
    24052407    else
    24062408    {
    2407         AssertStmt(pReNative->Core.aVars[idxVar].idxStackSlot != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_7));
    2408         off = iemNativeEmitStoreGprByBp(pReNative, off,
    2409                                           pReNative->Core.aVars[idxVar].idxStackSlot * sizeof(uint64_t)
    2410                                         - IEMNATIVE_FP_OFF_STACK_VARS,
    2411                                         idxRegOld);
     2409        uint8_t const idxStackSlot = pReNative->Core.aVars[idxVar].idxStackSlot;
     2410        Log12(("iemNativeRegMoveOrSpillStackVar: spilling idxVar=%d/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n",
     2411               idxVar, idxRegOld, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off));
     2412        AssertStmt(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_7));
     2413        off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxRegOld);
    24122414
    24132415        pReNative->Core.bmHstRegsWithGstShadow &= ~RT_BIT_32(idxRegOld);
     
    29632965    { /* likely */ }
    29642966    else
     2967    {
     2968        Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: fRegsToMove=%#x\n", fRegsToMove));
    29652969        while (fRegsToMove != 0)
    29662970        {
     
    29762980                    Assert(pReNative->Core.bmVars & RT_BIT_32(idxVar));
    29772981                    Assert(pReNative->Core.aVars[idxVar].idxReg == idxReg);
     2982                    Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: idxVar=%d enmKind=%d idxReg=%d\n",
     2983                           idxVar, pReNative->Core.aVars[idxVar].enmKind, pReNative->Core.aVars[idxVar].idxReg));
    29782984                    if (pReNative->Core.aVars[idxVar].enmKind != kIemNativeVarKind_Stack)
    29792985                        pReNative->Core.aVars[idxVar].idxReg = UINT8_MAX;
     
    30023008            AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_2));
    30033009        }
     3010    }
    30043011
    30053012    /*
    30063013     * Do the actual freeing.
    30073014     */
     3015    if (pReNative->Core.bmHstRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK)
     3016        Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: bmHstRegs %#x -> %#x\n", pReNative->Core.bmHstRegs, pReNative->Core.bmHstRegs & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK));
    30083017    pReNative->Core.bmHstRegs &= ~IEMNATIVE_CALL_VOLATILE_GREG_MASK;
    30093018
     
    30133022    if (fHstRegsWithGstShadow)
    30143023    {
     3024        Log12(("iemNativeRegMoveAndFreeAndFlushAtCall: bmHstRegsWithGstShadow %#RX32 -> %#RX32; removed %#RX32\n",
     3025               pReNative->Core.bmHstRegsWithGstShadow, pReNative->Core.bmHstRegsWithGstShadow & ~IEMNATIVE_CALL_VOLATILE_GREG_MASK, fHstRegsWithGstShadow));
    30153026        pReNative->Core.bmHstRegsWithGstShadow &= ~fHstRegsWithGstShadow;
    30163027        do
    30173028        {
    30183029            unsigned const idxReg = ASMBitFirstSetU32(fHstRegsWithGstShadow) - 1;
    3019             fHstRegsWithGstShadow = ~RT_BIT_32(idxReg);
    3020 
    3021             Assert(pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0);
     3030            fHstRegsWithGstShadow &= ~RT_BIT_32(idxReg);
     3031
     3032            AssertMsg(pReNative->Core.aHstRegs[idxReg].fGstRegShadows != 0, ("idxReg=%#x\n", idxReg));
    30223033            pReNative->Core.bmGstRegShadows &= ~pReNative->Core.aHstRegs[idxReg].fGstRegShadows;
    30233034            pReNative->Core.aHstRegs[idxReg].fGstRegShadows = 0;
     
    30463057    if (fGstRegs)
    30473058    {
     3059        Log12(("iemNativeRegFlushGuestShadows: flushing %#RX64 (%#RX64 -> %#RX64)\n",
     3060               fGstRegs, pReNative->Core.bmGstRegShadows, pReNative->Core.bmGstRegShadows & ~fGstRegs));
    30483061        pReNative->Core.bmGstRegShadows &= ~fGstRegs;
    30493062        if (pReNative->Core.bmGstRegShadows)
     
    31053118
    31063119
     3120#ifdef VBOX_STRICT
     3121/**
     3122 * Does internal register allocator sanity checks.
     3123 */
     3124static void iemNativeRegAssertSanity(PIEMRECOMPILERSTATE pReNative)
     3125{
     3126    /*
     3127     * Iterate host registers building a guest shadowing set.
     3128     */
     3129    uint64_t bmGstRegShadows        = 0;
     3130    uint32_t bmHstRegsWithGstShadow = pReNative->Core.bmHstRegsWithGstShadow;
     3131    AssertMsg(!(bmHstRegsWithGstShadow & IEMNATIVE_REG_FIXED_MASK), ("%#RX32\n", bmHstRegsWithGstShadow));
     3132    while (bmHstRegsWithGstShadow)
     3133    {
     3134        unsigned const idxHstReg = ASMBitFirstSetU32(bmHstRegsWithGstShadow) - 1;
     3135        Assert(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs));
     3136        bmHstRegsWithGstShadow &= ~RT_BIT_32(idxHstReg);
     3137
     3138        uint64_t fThisGstRegShadows = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows;
     3139        AssertMsg(fThisGstRegShadows != 0, ("idxHstReg=%d\n", idxHstReg));
     3140        bmGstRegShadows |= fThisGstRegShadows;
     3141        while (fThisGstRegShadows)
     3142        {
     3143            unsigned const idxGstReg = ASMBitFirstSetU64(fThisGstRegShadows) - 1;
     3144            fThisGstRegShadows &= ~RT_BIT_64(idxGstReg);
     3145            AssertMsg(pReNative->Core.aidxGstRegShadows[idxGstReg] == idxHstReg,
     3146                      ("idxHstReg=%d aidxGstRegShadows[idxGstReg=%d]=%d\n",
     3147                       idxHstReg, idxGstReg, pReNative->Core.aidxGstRegShadows[idxGstReg]));
     3148        }
     3149    }
     3150    AssertMsg(bmGstRegShadows == pReNative->Core.bmGstRegShadows,
     3151              ("%RX64 vs %RX64; diff %RX64\n", bmGstRegShadows, pReNative->Core.bmGstRegShadows,
     3152               bmGstRegShadows ^ pReNative->Core.bmGstRegShadows));
     3153
     3154    /*
     3155     * Now the other way around, checking the guest to host index array.
     3156     */
     3157    bmHstRegsWithGstShadow = 0;
     3158    bmGstRegShadows        = pReNative->Core.bmGstRegShadows;
     3159    while (bmGstRegShadows)
     3160    {
     3161        unsigned const idxGstReg = ASMBitFirstSetU64(bmGstRegShadows) - 1;
     3162        Assert(idxGstReg < RT_ELEMENTS(pReNative->Core.aidxGstRegShadows));
     3163        bmGstRegShadows &= ~RT_BIT_64(idxGstReg);
     3164
     3165        uint8_t const idxHstReg = pReNative->Core.aidxGstRegShadows[idxGstReg];
     3166        AssertMsg(idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs), ("aidxGstRegShadows[%d]=%d\n", idxGstReg, idxHstReg));
     3167        AssertMsg(pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows & RT_BIT_64(idxGstReg),
     3168                  ("idxGstReg=%d idxHstReg=%d fGstRegShadows=%RX64\n",
     3169                   idxGstReg, idxHstReg, pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows));
     3170        bmHstRegsWithGstShadow |= RT_BIT_32(idxHstReg);
     3171    }
     3172    AssertMsg(bmHstRegsWithGstShadow == pReNative->Core.bmHstRegsWithGstShadow,
     3173              ("%RX64 vs %RX64; diff %RX64\n", bmHstRegsWithGstShadow, pReNative->Core.bmHstRegsWithGstShadow,
     3174               bmHstRegsWithGstShadow ^ pReNative->Core.bmHstRegsWithGstShadow));
     3175}
     3176#endif
     3177
     3178
    31073179/*********************************************************************************************************************************
    31083180*   Code Emitters (larger snippets)                                                                                              *
     
    35733645        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x9, pCallEntry->auParams[1]);
    35743646    if (cParams > 2)
     3647    {
    35753648        off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_x10, pCallEntry->auParams[2]);
    3576     off = iemNativeEmitStoreGprByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, X86_GREG_x10);
     3649        off = iemNativeEmitStoreGprByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, X86_GREG_x10);
     3650    }
    35773651    off = iemNativeEmitLeaGprByBp(pReNative, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */
    35783652#  endif /* VBOXSTRICTRC_STRICT_ENABLED */
     
    39314005#define IEM_MC_BEGIN(a_cArgs, a_cLocals, a_fMcFlags, a_fCImplFlags) \
    39324006    { \
     4007        Assert(pReNative->Core.bmVars     == 0); \
     4008        Assert(pReNative->Core.u64ArgVars == UINT64_MAX); \
     4009        Assert(pReNative->Core.bmStack    == 0); \
    39334010        pReNative->fMc    = (a_fMcFlags); \
    39344011        pReNative->fCImpl = (a_fCImplFlags); \
     
    39384015 * generate code for all the IEM_MC_IF_XXX branches. */
    39394016#define IEM_MC_END() \
     4017        iemNativeVarFreeAll(pReNative); \
    39404018    } return off
    39414019
     
    43994477                    || idxHstReg != pOther->aidxGstRegShadows[idxGstReg])
    44004478                {
    4401                     Log12(("iemNativeEmitEndIf: dropping gst %#RX64 from hst %s\n",
     4479                    Log12(("iemNativeEmitEndIf: dropping gst %s from hst %s\n",
    44024480                           g_aGstShadowInfo[idxGstReg].pszName, g_apszIemNativeHstRegNames[idxHstReg]));
    44034481                    iemNativeRegClearGstRegShadowing(pReNative, idxHstReg, off);
     
    50085086                pReNative->Core.bmStack |= RT_BIT_32(iSlot);
    50095087                pReNative->Core.aVars[idxVar].idxStackSlot = iSlot;
     5088                Log11(("iemNativeVarSetKindToStack: idxVar=%d iSlot=%#x\n", idxVar, iSlot));
    50105089                return;
    50115090            }
     
    50265105                        pReNative->Core.bmStack |= (fBitAllocMask << iSlot);
    50275106                        pReNative->Core.aVars[idxVar].idxStackSlot = iSlot;
     5107                        Log11(("iemNativeVarSetKindToStack: idxVar=%d iSlot=%#x/%#x (cbVar=%#x)\n",
     5108                               idxVar, iSlot, fBitAllocMask, pReNative->Core.aVars[idxVar].cbVar));
    50285109                        return;
    50295110                    }
     
    52015282}
    52025283
     5284
     5285/**
     5286 * Worker that frees the stack slots for variable @a idxVar if any allocated.
     5287 *
     5288 * This is used both by iemNativeVarFreeOneWorker and iemNativeEmitCallCommon.
     5289 */
     5290DECL_FORCE_INLINE(void) iemNativeVarFreeStackSlots(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
     5291{
     5292    uint8_t const idxStackSlot = pReNative->Core.aVars[idxVar].idxStackSlot;
     5293    Assert(idxStackSlot == UINT8_MAX || idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS);
     5294    if (idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS)
     5295    {
     5296        uint8_t const  cbVar      = pReNative->Core.aVars[idxVar].cbVar;
     5297        uint8_t const  cSlots     = (cbVar + sizeof(uint64_t) - 1) / sizeof(uint64_t);
     5298        uint32_t const fAllocMask = (uint32_t)(RT_BIT_32(cSlots) - 1U);
     5299        Assert(cSlots > 0);
     5300        Assert(((pReNative->Core.bmStack >> idxStackSlot) & fAllocMask) == fAllocMask);
     5301        Log11(("iemNativeVarFreeStackSlots: idxVar=%d iSlot=%#x/%#x (cbVar=%#x)\n", idxVar, idxStackSlot, fAllocMask, cbVar));
     5302        pReNative->Core.bmStack &= ~(fAllocMask << idxStackSlot);
     5303        pReNative->Core.aVars[idxVar].idxStackSlot = UINT8_MAX;
     5304    }
     5305}
     5306
     5307
     5308/**
     5309 * Worker that frees a single variable.
     5310 *
     5311 * ASSUMES that @a idxVar is valid.
     5312 */
     5313DECLINLINE(void) iemNativeVarFreeOneWorker(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
     5314{
     5315    Assert(   pReNative->Core.aVars[idxVar].enmKind > kIemNativeVarKind_Invalid
     5316           && pReNative->Core.aVars[idxVar].enmKind < kIemNativeVarKind_End);
     5317
     5318    /* Free the host register first if any assigned. */
     5319    uint8_t const idxHstReg = pReNative->Core.aVars[idxVar].idxReg;
     5320    if (idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
     5321    {
     5322        Assert(pReNative->Core.aHstRegs[idxHstReg].idxVar == idxVar);
     5323        pReNative->Core.aHstRegs[idxHstReg].idxVar = UINT8_MAX;
     5324        pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
     5325    }
     5326
     5327    /* Free argument mapping. */
     5328    uint8_t const uArgNo = pReNative->Core.aVars[idxVar].uArgNo;
     5329    if (idxHstReg < RT_ELEMENTS(pReNative->Core.aidxArgVars))
     5330        pReNative->Core.aidxArgVars[uArgNo] = UINT8_MAX;
     5331
     5332    /* Free the stack slots. */
     5333    iemNativeVarFreeStackSlots(pReNative, idxVar);
     5334
     5335    /* Free the actual variable. */
     5336    pReNative->Core.aVars[idxVar].enmKind = kIemNativeVarKind_Invalid;
     5337    pReNative->Core.bmVars &= ~RT_BIT_32(idxVar);
     5338}
     5339
     5340
     5341/**
     5342 * Worker for iemNativeVarFreeAll that's called when there is anything to do.
     5343 */
     5344DECLINLINE(void) iemNativeVarFreeAllSlow(PIEMRECOMPILERSTATE pReNative, uint32_t bmVars)
     5345{
     5346    while (bmVars != 0)
     5347    {
     5348        uint8_t const idxVar = ASMBitFirstSetU32(bmVars) - 1;
     5349        bmVars &= ~RT_BIT_32(idxVar);
     5350
     5351#if 1 /** @todo optimize by simplifying this later... */
     5352        iemNativeVarFreeOneWorker(pReNative, idxVar);
     5353#else
     5354        /* Only need to free the host register, the rest is done as bulk updates below. */
     5355        uint8_t const idxHstReg = pReNative->Core.aVars[idxVar].idxReg;
     5356        if (idxHstReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
     5357        {
     5358            Assert(pReNative->Core.aHstRegs[idxHstReg].idxVar == idxVar);
     5359            pReNative->Core.aHstRegs[idxHstReg].idxVar = UINT8_MAX;
     5360            pReNative->Core.bmHstRegs &= ~RT_BIT_32(idxHstReg);
     5361        }
     5362#endif
     5363    }
     5364#if 0 /** @todo optimize by simplifying this later... */
     5365    pReNative->Core.bmVars     = 0;
     5366    pReNative->Core.bmStack    = 0;
     5367    pReNative->Core.u64ArgVars = UINT64_MAX;
     5368#endif
     5369}
     5370
     5371
     5372/**
     5373 * This is called by IEM_MC_END() to clean up all variables.
     5374 */
     5375DECL_FORCE_INLINE(void) iemNativeVarFreeAll(PIEMRECOMPILERSTATE pReNative)
     5376{
     5377    uint32_t const bmVars = pReNative->Core.bmVars;
     5378    if (bmVars != 0)
     5379        iemNativeVarFreeAllSlow(pReNative, bmVars);
     5380    Assert(pReNative->Core.u64ArgVars == UINT64_MAX);
     5381    Assert(pReNative->Core.bmStack    == 0);
     5382}
    52035383
    52045384
     
    53965576                            AssertStmt(pReNative->Core.aVars[idxVar].idxStackSlot != UINT8_MAX,
    53975577                                       IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_3));
    5398                             off = iemNativeEmitLoadGprByBp(pReNative, off, idxArgReg,
    5399                                                              IEMNATIVE_FP_OFF_STACK_VARS
    5400                                                            + pReNative->Core.aVars[idxVar].idxStackSlot * sizeof(uint64_t));
     5578                            off = iemNativeEmitLoadGprByBp(pReNative, off, idxArgReg, iemNativeVarCalcBpDisp(pReNative, idxVar));
    54015579                            continue;
    54025580
     
    54125590                                       IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_4));
    54135591                            off = iemNativeEmitLeaGprByBp(pReNative, off, idxArgReg,
    5414                                                             IEMNATIVE_FP_OFF_STACK_VARS
    5415                                                           + pReNative->Core.aVars[idxOtherVar].idxStackSlot * sizeof(uint64_t));
     5592                                                          iemNativeStackCalcBpDisp(pReNative->Core.aVars[idxOtherVar].idxStackSlot));
    54165593                            continue;
    54175594                        }
     
    54685645                                   IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_3));
    54695646                        off = iemNativeEmitLoadGprByBp(pReNative, off, IEMNATIVE_CALL_ARG0_GREG /* is free */,
    5470                                                          IEMNATIVE_FP_OFF_STACK_VARS
    5471                                                        + pReNative->Core.aVars[idxVar].idxStackSlot * sizeof(uint64_t));
     5647                                                       iemNativeVarCalcBpDisp(pReNative, idxVar));
    54725648                        off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, IEMNATIVE_CALL_ARG0_GREG);
    54735649                        continue;
     
    54845660                                   IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_4));
    54855661                        off = iemNativeEmitLeaGprByBp(pReNative, off, IEMNATIVE_CALL_ARG0_GREG,
    5486                                                         IEMNATIVE_FP_OFF_STACK_VARS
    5487                                                       + pReNative->Core.aVars[idxOtherVar].idxStackSlot * sizeof(uint64_t));
     5662                                                      iemNativeStackCalcBpDisp(pReNative->Core.aVars[idxOtherVar].idxStackSlot));
    54885663                        off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, IEMNATIVE_CALL_ARG0_GREG);
    54895664                        continue;
     
    55165691    /** @todo There is a special with IEM_MC_MEM_MAP_U16_RW and friends requiring
    55175692     *        a IEM_MC_MEM_COMMIT_AND_UNMAP_RW after a AIMPL call typically with
    5518      *        an argument value. */
     5693     *        an argument value.  There is also some FPU stuff. */
    55195694    for (uint32_t i = cHiddenArgs; i < cArgs; i++)
    55205695    {
    5521         uint8_t idxVar = pReNative->Core.aidxArgVars[i];
     5696        uint8_t const idxVar = pReNative->Core.aidxArgVars[i];
    55225697        Assert(idxVar < RT_ELEMENTS(pReNative->Core.aVars));
     5698
     5699        /* no need to free registers: */
     5700        AssertMsg(i < IEMNATIVE_CALL_ARG_GREG_COUNT
     5701                  ?    pReNative->Core.aVars[idxVar].idxReg == g_aidxIemNativeCallRegs[i]
     5702                    || pReNative->Core.aVars[idxVar].idxReg == UINT8_MAX
     5703                  : pReNative->Core.aVars[idxVar].idxReg == UINT8_MAX,
     5704                  ("i=%d idxVar=%d idxReg=%d, expected %d\n", i, idxVar, pReNative->Core.aVars[idxVar].idxReg,
     5705                   i < IEMNATIVE_CALL_ARG_GREG_COUNT ? g_aidxIemNativeCallRegs[i] : UINT8_MAX));
     5706
    55235707        pReNative->Core.aidxArgVars[i] = UINT8_MAX;
    55245708        pReNative->Core.bmVars        &= ~RT_BIT_32(idxVar);
     5709        iemNativeVarFreeStackSlots(pReNative, idxVar);
    55255710    }
    55265711    Assert(pReNative->Core.u64ArgVars == UINT64_MAX);
     
    58276012
    58286013
    5829 #if 0
    58306014#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) \
    5831     off = iemNativeEmitStoreGregU16Const(pReNative, off, a_iGReg, a_u16Value)
     6015    off = iemNativeEmitStoreGregU16(pReNative, off, a_iGReg, a_u16Value)
     6016
     6017/** Emits code for IEM_MC_STORE_GREG_U16. */
     6018DECL_INLINE_THROW(uint32_t)
     6019iemNativeEmitStoreGregU16Const(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint16_t uValue)
     6020{
     6021pReNative->pInstrBuf[off++] = 0xcc;
     6022    Assert(iGReg < 16);
     6023    uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off,
     6024                                                                 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + iGReg),
     6025                                                                 kIemNativeGstRegUse_ForUpdate);
     6026#ifdef RT_ARCH_AMD64
     6027    /* mov reg16, imm16 */
     6028    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
     6029    pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
     6030    if (idxGstTmpReg >= 8)
     6031        pbCodeBuf[off++] = X86_OP_REX_B;
     6032    pbCodeBuf[off++] = 0xb8 + (idxGstTmpReg & 7);
     6033    pbCodeBuf[off++] = RT_BYTE1(uValue);
     6034    pbCodeBuf[off++] = RT_BYTE2(uValue);
     6035
     6036#elif defined(RT_ARCH_ARM64)
     6037    /* movk xdst, #uValue, lsl #0 */
     6038    uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     6039    pu32CodeBuf[off++] = Armv8A64MkInstrMovK(idxGstTmpReg, uValue);
     6040
     6041#else
     6042# error "Port me!"
     6043#endif
     6044
     6045    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     6046
     6047    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg & 15]));
     6048    iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
     6049    return off;
     6050}
     6051
    58326052
    58336053/** Emits code for IEM_MC_STORE_GREG_U16. */
     
    58356055iemNativeEmitStoreGregU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGReg, uint8_t idxValueVar)
    58366056{
    5837     Assert(iGReg < 16)
     6057    Assert(iGReg < 16);
     6058    IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxValueVar);
     6059
     6060    /*
     6061     * If it's a constant value (unlikely) we treat this as a
     6062     * IEM_MC_STORE_GREG_U16_CONST statement.
     6063     */
     6064    if (pReNative->Core.aVars[idxValueVar].enmKind == kIemNativeVarKind_Stack)
     6065    { /* likely */ }
     6066    else
     6067    {
     6068        AssertStmt(pReNative->Core.aVars[idxValueVar].enmKind != kIemNativeVarKind_Immediate,
     6069                   IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
     6070        return iemNativeEmitStoreGregU16Const(pReNative, off, iGReg, (uint16_t)pReNative->Core.aVars[idxValueVar].u.uValue);
     6071    }
     6072
    58386073    uint8_t const idxGstTmpReg = iemNativeRegAllocTmpForGuestReg(pReNative, &off,
    58396074                                                                 (IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + iGReg),
    58406075                                                                 kIemNativeGstRegUse_ForUpdate);
    58416076
    5842 
    58436077#ifdef RT_ARCH_AMD64
     6078    /* mov reg16, reg16 or [mem16] */
    58446079    uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 12);
    5845 
    5846     /* To the lowest byte of the register: mov r8, imm8 */
    5847     if (iGRegEx < 16)
    5848     {
     6080    pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
     6081    if (pReNative->Core.aVars[idxValueVar].idxReg < RT_ELEMENTS(pReNative->Core.aHstRegs))
     6082    {
     6083        if (idxGstTmpReg >= 8 || pReNative->Core.aVars[idxValueVar].idxReg >= 8)
     6084            pbCodeBuf[off++] = (idxGstTmpReg >= 8                              ? X86_OP_REX_R : 0)
     6085                             | (pReNative->Core.aVars[idxValueVar].idxReg >= 8 ? X86_OP_REX_B : 0);
     6086        pbCodeBuf[off++] = 0x8b;
     6087        pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxGstTmpReg & 7, pReNative->Core.aVars[idxValueVar].idxReg & 7);
     6088    }
     6089    else
     6090    {
     6091        AssertStmt(pReNative->Core.aVars[idxValueVar].idxStackSlot != UINT8_MAX,
     6092                   IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_4));
    58496093        if (idxGstTmpReg >= 8)
    5850             pbCodeBuf[off++] = X86_OP_REX_B;
    5851         else if (idxGstTmpReg >= 4)
    5852             pbCodeBuf[off++] = X86_OP_REX;
    5853         pbCodeBuf[off++] = 0xb0 + (idxGstTmpReg & 7);
    5854         pbCodeBuf[off++] = u8Value;
    5855     }
    5856     /* Otherwise it's to ah, ch, dh or bh: use mov r8, imm8 if we can, otherwise, we rotate. */
    5857     else if (idxGstTmpReg < 4)
    5858     {
    5859         pbCodeBuf[off++] = 0xb4 + idxGstTmpReg;
    5860         pbCodeBuf[off++] = u8Value;
    5861     }
    5862     else
    5863     {
    5864         /* ror reg64, 8 */
    5865         pbCodeBuf[off++] = X86_OP_REX_W | (idxGstTmpReg < 8 ? 0 : X86_OP_REX_B);
    5866         pbCodeBuf[off++] = 0xc1;
    5867         pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, idxGstTmpReg & 7);
    5868         pbCodeBuf[off++] = 8;
    5869 
    5870         /* mov reg8, imm8  */
    5871         if (idxGstTmpReg >= 8)
    5872             pbCodeBuf[off++] = X86_OP_REX_B;
    5873         else if (idxGstTmpReg >= 4)
    5874             pbCodeBuf[off++] = X86_OP_REX;
    5875         pbCodeBuf[off++] = 0xb0 + (idxGstTmpReg & 7);
    5876         pbCodeBuf[off++] = u8Value;
    5877 
    5878         /* rol reg64, 8 */
    5879         pbCodeBuf[off++] = X86_OP_REX_W | (idxGstTmpReg < 8 ? 0 : X86_OP_REX_B);
    5880         pbCodeBuf[off++] = 0xc1;
    5881         pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, idxGstTmpReg & 7);
    5882         pbCodeBuf[off++] = 8;
     6094            pbCodeBuf[off++] = X86_OP_REX_R;
     6095        pbCodeBuf[off++] = 0x8b;
     6096        off = iemNativeEmitGprByBpDisp(pbCodeBuf, off, idxGstTmpReg, iemNativeVarCalcBpDisp(pReNative, idxValueVar), pReNative);
    58836097    }
    58846098
    58856099#elif defined(RT_ARCH_ARM64)
    5886     uint8_t const    idxImmReg   = iemNativeRegAllocTmpImm(pReNative, &off, u8Value);
    5887     uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
    5888     if (iGRegEx < 16)
    5889         /* bfi w1, w2, 0, 8 - moves bits 7:0 from idxImmReg to idxGstTmpReg bits 7:0. */
    5890         pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxImmReg, 0, 8);
    5891     else
    5892         /* bfi w1, w2, 8, 8 - moves bits 7:0 from idxImmReg to idxGstTmpReg bits 15:8. */
    5893         pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxImmReg, 8, 8);
    5894     iemNativeRegFreeTmp(pReNative, idxImmReg);
     6100    /* bfi w1, w2, 0, 16 - moves bits 15:0 from idxVarReg to idxGstTmpReg bits 15:0. */
     6101    uint8_t const    idxVarReg   = iemNativeVarAllocRegister(pReNative, idxDstVar, &off);
     6102    uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     6103    pu32CodeBuf[off++] = Armv8A64MkInstrBfi(idxGstTmpReg, idxVarReg, 0, 16);
    58956104
    58966105#else
     
    59006109    IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
    59016110
    5902     off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGRegEx & 15]));
    5903 
     6111    off = iemNativeEmitStoreGprToVCpuU64(pReNative, off, idxGstTmpReg, RT_UOFFSETOF_DYN(VMCPU, cpum.GstCtx.aGRegs[iGReg & 15]));
    59046112    iemNativeRegFreeTmp(pReNative, idxGstTmpReg);
    59056113    return off;
    59066114}
    5907 #endif
    59086115
    59096116
     
    68007007            iemNativeDbgInfoAddThreadedCall(pReNative, (IEMTHREADEDFUNCS)pCallEntry->enmFunction, pfnRecom != NULL);
    68017008#endif
    6802 #if defined(VBOX_STRICT) && 1
     7009#if defined(VBOX_STRICT)
    68037010            off = iemNativeEmitMarker(pReNative, off,
    68047011                                      RT_MAKE_U32((pTb->Thrd.cCalls - cCallsLeft - 1) | (pfnRecom ? 0x8000 : 0),
    68057012                                                  pCallEntry->enmFunction));
    68067013#endif
     7014#if defined(VBOX_STRICT)
     7015            iemNativeRegAssertSanity(pReNative);
     7016#endif
    68077017
    68087018            /*
    68097019             * Actual work.
    68107020             */
     7021            Log2(("%u[%u]: %s%s\n", pTb->Thrd.cCalls - cCallsLeft - 1, pCallEntry->idxInstr,
     7022                  g_apszIemThreadedFunctions[pCallEntry->enmFunction], pfnRecom ? "" : "(todo)"));
    68117023            if (pfnRecom) /** @todo stats on this.   */
    68127024            {
  • trunk/src/VBox/VMM/include/IEMN8veRecompiler.h

    r102022 r102065  
    804804/**
    805805 * Checks that we didn't exceed the space requested in the last
    806  * iemNativeInstrBufEnsure() call. */
     806 * iemNativeInstrBufEnsure() call.
     807 */
    807808#define IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(a_pReNative, a_off) \
    808809    AssertMsg((a_off) <= (a_pReNative)->offInstrBufChecked, \
    809810              ("off=%#x offInstrBufChecked=%#x\n", (a_off), (a_pReNative)->offInstrBufChecked))
    810811
     812/**
     813 * Checks that a variable index is valid.
     814 */
     815#define IEMNATIVE_ASSERT_VAR_IDX(a_pReNative, a_idxVar) \
     816    AssertMsg(   (unsigned)(a_idxVar) < RT_ELEMENTS((a_pReNative)->Core.aVars) \
     817              && ((a_pReNative)->Core.bmVars & RT_BIT_32(a_idxVar)), ("%s=%d\n", #a_idxVar, a_idxVar))
     818
     819/**
     820 * Calculates the stack address of a variable as a [r]BP displacement value.
     821 */
     822DECL_FORCE_INLINE(int32_t)
     823iemNativeStackCalcBpDisp(uint8_t idxStackSlot)
     824{
     825    Assert(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS);
     826    return idxStackSlot * sizeof(uint64_t) + IEMNATIVE_FP_OFF_STACK_VARS;
     827}
     828
     829/**
     830 * Calculates the stack address of a variable as a [r]BP displacement value.
     831 */
     832DECL_FORCE_INLINE(int32_t)
     833iemNativeVarCalcBpDisp(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar)
     834{
     835    return iemNativeStackCalcBpDisp(pReNative->Core.aVars[idxVar].idxStackSlot);
     836}
     837
    811838/** @} */
    812839
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette