VirtualBox

Changeset 106427 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Oct 17, 2024 10:59:12 AM (6 weeks ago)
Author:
vboxsync
Message:

VMM/IEM: Reduced the paramters for iemNativeEmitRetn. bugref:10720

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veLiveness.h

    r106191 r106427  
    387387#define IEM_MC_IND_CALL_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewRIP, a_cbInstr)     do { IEM_LIVENESS_PC64_JMP_WITH_FLAGS(); IEM_LIVENESS_STACK(); } while (0)
    388388
    389 #define IEM_MC_RETN_AND_FINISH_THREADED_PC16(a_i16, a_cbInstr)                              do { IEM_LIVENESS_PC16_JMP_NO_FLAGS();   IEM_LIVENESS_STACK(); } while (0)
    390 #define IEM_MC_RETN_AND_FINISH_THREADED_PC32(a_i16, a_cbInstr, a_enmEffOpSize)              do { IEM_LIVENESS_PC32_JMP_NO_FLAGS();   IEM_LIVENESS_STACK(); } while (0)
    391 #define IEM_MC_RETN_AND_FINISH_THREADED_PC64(a_i16, a_cbInstr, a_enmEffOpSize)              do { IEM_LIVENESS_PC64_JMP_NO_FLAGS();   IEM_LIVENESS_STACK(); } while (0)
    392 #define IEM_MC_RETN_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_i16, a_cbInstr)                   do { IEM_LIVENESS_PC16_JMP_WITH_FLAGS(); IEM_LIVENESS_STACK(); } while (0)
    393 #define IEM_MC_RETN_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_i16, a_cbInstr, a_enmEffOpSize)  do { IEM_LIVENESS_PC32_JMP_WITH_FLAGS(); IEM_LIVENESS_STACK(); } while (0)
    394 #define IEM_MC_RETN_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i16, a_cbInstr, a_enmEffOpSize)  do { IEM_LIVENESS_PC64_JMP_WITH_FLAGS(); IEM_LIVENESS_STACK(); } while (0)
     389#define IEM_MC_RETN_AND_FINISH_THREADED_PC16(a_cbPopArgs, a_cbInstr)                            do { IEM_LIVENESS_PC16_JMP_NO_FLAGS();   IEM_LIVENESS_STACK(); } while (0)
     390#define IEM_MC_RETN_AND_FINISH_THREADED_PC32(a_cbPopArgs, a_cbInstr, a_enmEffOpSize)            do { IEM_LIVENESS_PC32_JMP_NO_FLAGS();   IEM_LIVENESS_STACK(); } while (0)
     391#define IEM_MC_RETN_AND_FINISH_THREADED_PC64(a_cbPopArgs, a_cbInstr, a_enmEffOpSize)            do { IEM_LIVENESS_PC64_JMP_NO_FLAGS();   IEM_LIVENESS_STACK(); } while (0)
     392#define IEM_MC_RETN_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbPopArgs, a_cbInstr)                 do { IEM_LIVENESS_PC16_JMP_WITH_FLAGS(); IEM_LIVENESS_STACK(); } while (0)
     393#define IEM_MC_RETN_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) do { IEM_LIVENESS_PC32_JMP_WITH_FLAGS(); IEM_LIVENESS_STACK(); } while (0)
     394#define IEM_MC_RETN_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) do { IEM_LIVENESS_PC64_JMP_WITH_FLAGS(); IEM_LIVENESS_STACK(); } while (0)
    395395
    396396/* Effective address stuff is rather complicated... */
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h

    r106423 r106427  
    22772277
    22782278/** Variant of IEM_MC_RETN_AND_FINISH for pre-386 targets. */
    2279 #define IEM_MC_RETN_AND_FINISH_THREADED_PC16(a_u16Pop, a_cbInstr) \
    2280     off = iemNativeEmitRetn(pReNative, off, (a_cbInstr), (a_u16Pop), false /*f64Bit*/, IEMMODE_16BIT, pCallEntry->idxInstr)
     2279#define IEM_MC_RETN_AND_FINISH_THREADED_PC16(a_cbPopArgs, a_cbInstr) \
     2280    off = iemNativeEmitRetn<IEMMODE_16BIT, false>(pReNative, off, (a_cbInstr), (a_cbPopArgs), pCallEntry->idxInstr)
    22812281
    22822282/** Variant of IEM_MC_RETN_AND_FINISH for 386+ targets. */
    2283 #define IEM_MC_RETN_AND_FINISH_THREADED_PC32(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
    2284     off = iemNativeEmitRetn(pReNative, off, (a_cbInstr), (a_u16Pop), false /*f64Bit*/, (a_enmEffOpSize), pCallEntry->idxInstr)
     2283#define IEM_MC_RETN_AND_FINISH_THREADED_PC32(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
     2284    Assert((a_enmEffOpSize) == IEMMODE_32BIT || (a_enmEffOpSize) == IEMMODE_16BIT); \
     2285    off = (a_enmEffOpSize) == IEMMODE_32BIT \
     2286        ? iemNativeEmitRetn<IEMMODE_32BIT, false>(pReNative, off, (a_cbInstr), (a_cbPopArgs), pCallEntry->idxInstr) \
     2287        : iemNativeEmitRetn<IEMMODE_16BIT, false>(pReNative, off, (a_cbInstr), (a_cbPopArgs), pCallEntry->idxInstr)
    22852288
    22862289/** Variant of IEM_MC_RETN_AND_FINISH for use in 64-bit code. */
    2287 #define IEM_MC_RETN_AND_FINISH_THREADED_PC64(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
    2288     off = iemNativeEmitRetn(pReNative, off, (a_cbInstr), (a_u16Pop), true /*f64Bit*/, (a_enmEffOpSize), pCallEntry->idxInstr)
     2290#define IEM_MC_RETN_AND_FINISH_THREADED_PC64(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
     2291    Assert((a_enmEffOpSize) == IEMMODE_64BIT || (a_enmEffOpSize) == IEMMODE_16BIT); \
     2292    off = (a_enmEffOpSize) == IEMMODE_64BIT \
     2293        ? iemNativeEmitRetn<IEMMODE_64BIT, true>(pReNative, off, (a_cbInstr), (a_cbPopArgs), pCallEntry->idxInstr) \
     2294        : iemNativeEmitRetn<IEMMODE_16BIT, true>(pReNative, off, (a_cbInstr), (a_cbPopArgs), pCallEntry->idxInstr)
    22892295
    22902296/** Variant of IEM_MC_RETN_AND_FINISH for pre-386 targets that checks and
    22912297 *  clears flags. */
    2292 #define IEM_MC_RETN_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16Pop, a_cbInstr) \
    2293     IEM_MC_RETN_AND_FINISH_THREADED_PC16(a_u16Pop, a_cbInstr); \
     2298#define IEM_MC_RETN_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbPopArgs, a_cbInstr) \
     2299    IEM_MC_RETN_AND_FINISH_THREADED_PC16(a_cbPopArgs, a_cbInstr); \
    22942300    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off)
    22952301
    22962302/** Variant of IEM_MC_RETN_AND_FINISH for 386+ targets that checks and
    22972303 *  clears flags. */
    2298 #define IEM_MC_RETN_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
    2299     IEM_MC_RETN_AND_FINISH_THREADED_PC32(a_u16Pop, a_cbInstr, a_enmEffOpSize); \
     2304#define IEM_MC_RETN_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
     2305    IEM_MC_RETN_AND_FINISH_THREADED_PC32(a_cbPopArgs, a_cbInstr, a_enmEffOpSize); \
    23002306    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off)
    23012307
    23022308/** Variant of IEM_MC_RETN_AND_FINISH for use in 64-bit code that checks and
    23032309 *  clears flags. */
    2304 #define IEM_MC_RETN_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
    2305     IEM_MC_RETN_AND_FINISH_THREADED_PC64(a_u16Pop, a_cbInstr, a_enmEffOpSize); \
     2310#define IEM_MC_RETN_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
     2311    IEM_MC_RETN_AND_FINISH_THREADED_PC64(a_cbPopArgs, a_cbInstr, a_enmEffOpSize); \
    23062312    off = iemNativeEmitFinishInstructionFlagsCheck(pReNative, off)
    23072313
    23082314/** IEM_MC[|_FLAT32|_FLAT64]_RETN_AND_FINISH */
    2309 DECL_INLINE_THROW(uint32_t)
    2310 iemNativeEmitRetn(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, uint16_t cbPop, bool f64Bit,
    2311                   IEMMODE enmEffOpSize, uint8_t idxInstr)
     2315template<IEMMODE const a_enmEffOpSize, bool const a_f64Bit>
     2316DECL_INLINE_THROW(uint32_t)
     2317iemNativeEmitRetn(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cbInstr, uint16_t cbPopArgs, uint8_t idxInstr)
    23122318{
    23132319    RT_NOREF(cbInstr);
     2320    AssertCompile(a_enmEffOpSize == IEMMODE_64BIT || a_enmEffOpSize == IEMMODE_32BIT || a_enmEffOpSize == IEMMODE_16BIT);
    23142321
    23152322#ifdef VBOX_STRICT
     
    23302337     * For FLAT modes we'll do this in TlbDone as we'll be using the incoming RSP
    23312338     * directly as the effective stack pointer.
     2339     *
    23322340     * (Code structure is very similar to that of PUSH)
    23332341     *
     
    23362344     *       aren't commonly used (or useful) and thus not in need of optimizing.
    23372345     *
    2338      * Note! For non flat modes the guest RSP is not allocated for update but rather for calculation
    2339      *       as the shadowed register would remain modified even if the return address throws a \#GP(0)
    2340      *       due to being outside the CS limit causing a wrong stack pointer value in the guest (see
    2341      *       the near return testcase in bs3-cpu-basic-2). If no exception is thrown the shadowing is transfered
    2342      *       to the new register returned by iemNativeRegAllocTmpForGuestReg() at the end.
     2346     * Note! For non-flat modes the guest RSP is not allocated for update but
     2347     *       rather for calculation as the shadowed register would remain modified
     2348     *       even if the return address throws a #GP(0) due to being outside the
     2349     *       CS limit causing a wrong stack pointer value in the guest (see the
     2350     *       near return testcase in bs3-cpu-basic-2). If no exception is thrown
     2351     *       the shadowing is transfered to the new register returned by
     2352     *       iemNativeRegAllocTmpForGuestReg() at the end.
    23432353     */
    2344     uint8_t   const cbMem           =   enmEffOpSize == IEMMODE_64BIT
     2354    RT_CONSTEXPR
     2355    uint8_t   const cbMem           =   a_enmEffOpSize == IEMMODE_64BIT
    23452356                                      ? sizeof(uint64_t)
    2346                                       : enmEffOpSize == IEMMODE_32BIT
     2357                                      : a_enmEffOpSize == IEMMODE_32BIT
    23472358                                      ? sizeof(uint32_t)
    23482359                                      : sizeof(uint16_t);
    2349     bool      const fFlat           = IEM_F_MODE_X86_IS_FLAT(pReNative->fExec) && enmEffOpSize != IEMMODE_16BIT; /* see note */
     2360/** @todo the basic flatness should be detected by the threaded compiler step
     2361 *        like for the other macros... */
     2362    bool      const fFlat           = IEM_F_MODE_X86_IS_FLAT(pReNative->fExec) && a_enmEffOpSize != IEMMODE_16BIT; /* see note */
    23502363    uintptr_t const pfnFunction     = fFlat
    2351                                       ?   enmEffOpSize == IEMMODE_64BIT
     2364                                      ?   a_enmEffOpSize == IEMMODE_64BIT
    23522365                                        ? (uintptr_t)iemNativeHlpStackFlatFetchU64
    23532366                                        : (uintptr_t)iemNativeHlpStackFlatFetchU32
    2354                                       :   enmEffOpSize == IEMMODE_32BIT
     2367                                      :   a_enmEffOpSize == IEMMODE_32BIT
    23552368                                        ? (uintptr_t)iemNativeHlpStackFetchU32
    23562369                                        : (uintptr_t)iemNativeHlpStackFetchU16;
     
    23802393        iemNativeRegFreeTmp(pReNative, idxRegSsAttr);
    23812394        offFixupJumpToUseOtherBitSp = off;
    2382         if (enmEffOpSize == IEMMODE_32BIT)
     2395        if RT_CONSTEXPR_IF(a_enmEffOpSize == IEMMODE_32BIT)
    23832396        {
    23842397            off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off /*8-bit suffices*/, kIemNativeInstrCond_e); /* jump if zero */
    2385             off = iemNativeEmitStackPopForRetnUse32Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem, cbPop);
     2398            off = iemNativeEmitStackPopForRetnUse32Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem, cbPopArgs);
    23862399        }
    23872400        else
    23882401        {
    2389             Assert(enmEffOpSize == IEMMODE_16BIT);
     2402            Assert(a_enmEffOpSize == IEMMODE_16BIT);
    23902403            off = iemNativeEmitJccToFixedEx(pCodeBuf, off, off /*8-bit suffices*/, kIemNativeInstrCond_ne); /* jump if not zero */
    2391             off = iemNativeEmitStackPopForRetnUse16Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem, cbPop,
     2404            off = iemNativeEmitStackPopForRetnUse16Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem, cbPopArgs,
    23922405                                                      idxRegMemResult);
    23932406        }
     
    24262439        iemNativeFixupFixedJump(pReNative, offFixupJumpToUseOtherBitSp, off);
    24272440        if ((pReNative->fExec & IEM_F_MODE_CPUMODE_MASK) == IEMMODE_32BIT)
    2428             off = iemNativeEmitStackPopForRetnUse16Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem, cbPop,
     2441            off = iemNativeEmitStackPopForRetnUse16Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem, cbPopArgs,
    24292442                                                      idxRegMemResult);
    24302443        else
    2431             off = iemNativeEmitStackPopForRetnUse32Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem, cbPop);
     2444            off = iemNativeEmitStackPopForRetnUse32Sp(pCodeBuf, off, idxRegRsp, idxRegEffSp, cbMem, cbPopArgs);
    24322445        off = iemNativeEmitJmpToFixedEx(pCodeBuf, off, offLabelSpUpdateEnd);
    24332446        IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
     
    25252538
    25262539    /* Check limit before committing RIP and RSP (may #GP(0) + exit TB). */
    2527     if (!f64Bit)
     2540    if RT_CONSTEXPR_IF(!a_f64Bit)
    25282541/** @todo we can skip this test in FLAT 32-bit mode. */
    25292542        off = iemNativeEmitCheckGpr32AgainstCsSegLimitMaybeRaiseGp0(pReNative, off, idxRegMemResult, idxInstr);
    25302543    /* Check that the address is canonical, raising #GP(0) + exit TB if it isn't. */
    2531     else if (enmEffOpSize == IEMMODE_64BIT)
     2544    else if RT_CONSTEXPR_IF(a_enmEffOpSize == IEMMODE_64BIT)
    25322545        off = iemNativeEmitCheckGprCanonicalMaybeRaiseGp0(pReNative, off, idxRegMemResult, idxInstr);
    25332546
     
    25352548    if (idxRegEffSp == idxRegRsp)
    25362549    {
    2537         if (enmEffOpSize == IEMMODE_64BIT)
    2538             off = iemNativeEmitAddGprImm(pReNative, off, idxRegRsp, sizeof(uint64_t) + cbPop);
     2550        if RT_CONSTEXPR_IF(a_enmEffOpSize == IEMMODE_64BIT)
     2551            off = iemNativeEmitAddGprImm(pReNative, off, idxRegRsp, sizeof(uint64_t) + cbPopArgs);
    25392552        else
    25402553        {
    2541             Assert(enmEffOpSize == IEMMODE_32BIT);
    2542             off = iemNativeEmitAddGpr32Imm(pReNative, off, idxRegRsp, sizeof(uint32_t) + cbPop);
     2554            Assert(a_enmEffOpSize == IEMMODE_32BIT);
     2555            off = iemNativeEmitAddGpr32Imm(pReNative, off, idxRegRsp, sizeof(uint32_t) + cbPopArgs);
    25432556        }
    25442557    }
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncs.cpp

    r106061 r106427  
    523523
    524524/** Variant of IEM_MC_RETN_AND_FINISH for pre-386 targets. */
    525 #define IEM_MC_RETN_AND_FINISH_THREADED_PC16(a_u16Pop, a_cbInstr) \
    526     return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_u16Pop), IEMMODE_16BIT)
     525#define IEM_MC_RETN_AND_FINISH_THREADED_PC16(a_cbPopArgs, a_cbInstr) \
     526    return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_cbPopArgs), IEMMODE_16BIT)
    527527
    528528/** Variant of IEM_MC_RETN_AND_FINISH for 386+ targets. */
    529 #define IEM_MC_RETN_AND_FINISH_THREADED_PC32(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
    530     return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_u16Pop), (a_enmEffOpSize))
     529#define IEM_MC_RETN_AND_FINISH_THREADED_PC32(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
     530    return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
    531531
    532532/** Variant of IEM_MC_RETN_AND_FINISH for use in 64-bit code. */
    533 #define IEM_MC_RETN_AND_FINISH_THREADED_PC64(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
    534     return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_u16Pop), (a_enmEffOpSize))
     533#define IEM_MC_RETN_AND_FINISH_THREADED_PC64(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
     534    return iemRegRipNearReturnAndFinishNoFlags((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
    535535
    536536/** Variant of IEM_MC_RETN_AND_FINISH for pre-386 targets that checks and
    537537 *  clears flags. */
    538 #define IEM_MC_RETN_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16Pop, a_cbInstr) \
    539     return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_u16Pop), IEMMODE_16BIT)
     538#define IEM_MC_RETN_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_cbPopArgs, a_cbInstr) \
     539    return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_cbPopArgs), IEMMODE_16BIT)
    540540
    541541/** Variant of IEM_MC_RETN_AND_FINISH for 386+ targets that checks and
    542542 *  clears flags. */
    543 #define IEM_MC_RETN_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
    544     return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_u16Pop), (a_enmEffOpSize))
     543#define IEM_MC_RETN_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
     544    return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
    545545
    546546/** Variant of IEM_MC_RETN_AND_FINISH for use in 64-bit code that checks and
    547547 *  clears flags. */
    548 #define IEM_MC_RETN_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16Pop, a_cbInstr, a_enmEffOpSize) \
    549     return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_u16Pop), (a_enmEffOpSize))
     548#define IEM_MC_RETN_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_cbPopArgs, a_cbInstr, a_enmEffOpSize) \
     549    return iemRegRipNearReturnAndFinishClearingRF((pVCpu), a_cbInstr, (a_cbPopArgs), (a_enmEffOpSize))
    550550
    551551#undef IEM_MC_RETN_AND_FINISH
  • trunk/src/VBox/VMM/include/IEMMc.h

    r106179 r106427  
    105105/** Fetches the near return address from the stack, sets RIP and RSP (may trigger
    106106 * \#GP or \#SS), finishes the instruction and returns. */
    107 #define IEM_MC_RETN_AND_FINISH(a_u16Pop) \
    108     return iemRegRipNearReturnAndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_u16Pop), pVCpu->iem.s.enmEffOpSize)
     107#define IEM_MC_RETN_AND_FINISH(a_cbPopArgs) \
     108    return iemRegRipNearReturnAndFinishClearingRF((pVCpu), IEM_GET_INSTR_LEN(pVCpu), (a_cbPopArgs), pVCpu->iem.s.enmEffOpSize)
    109109
    110110
  • trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp

    r106179 r106427  
    617617#define IEM_MC_IND_CALL_U32_AND_FINISH(a_u32NewIP)      do { (void)fMcBegin; CHK_TYPE(uint32_t, a_u32NewIP); return VINF_SUCCESS; } while (0)
    618618#define IEM_MC_IND_CALL_U64_AND_FINISH(a_u64NewIP)      do { (void)fMcBegin; CHK_TYPE(uint64_t, a_u64NewIP); return VINF_SUCCESS; } while (0)
    619 #define IEM_MC_RETN_AND_FINISH(a_u16Pop)                do { (void)fMcBegin; return VINF_SUCCESS; } while (0)
     619#define IEM_MC_RETN_AND_FINISH(a_cbPopArgs)             do { (void)fMcBegin; return VINF_SUCCESS; } while (0)
    620620#define IEM_MC_RAISE_DIVIDE_ERROR_IF_LOCAL_IS_ZERO(a_uVar) do { (void)fMcBegin; CHK_VAR(a_uVar); if (a_uVar == 0) return VERR_TRPM_ACTIVE_TRAP; } while (0)
    621621#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE()       do { (void)fMcBegin; } while (0)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette