VirtualBox

Changeset 100734 in vbox


Ignore:
Timestamp:
Jul 29, 2023 2:04:22 AM (16 months ago)
Author:
vboxsync
Message:

VMM/IEM: Generate TBs for invalid instruction encodings as well. This involved special casing recompiler call generation for C instruction implementation function that doesn't take any extra arguments, so that we can catch all the deeply hidden IEMOP_RAISE_INVALID_OPCODE_RET invocations and similar. Also had to clean up hacky decoding of effective address related opcode bytes for undefined opcodes, introducing IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) to hide the uglyness. bugref:10369

Location:
trunk/src/VBox/VMM
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstCommon.cpp.h

    r100733 r100734  
    730730{
    731731    IEMOP_MNEMONIC(InvalidWithRMNeedDecode, "InvalidWithRMNeedDecode");
    732     if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
    733     {
    734 #ifndef TST_IEM_CHECK_MC
    735         if (IEM_IS_MODRM_MEM_MODE(bRm))
    736         {
    737             RTGCPTR      GCPtrEff;
    738             VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
    739             if (rcStrict != VINF_SUCCESS)
    740                 return rcStrict;
    741         }
    742 #endif
    743     }
     732    if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL && IEM_IS_MODRM_MEM_MODE(bRm))
     733        IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
    744734    IEMOP_HLP_DONE_DECODING();
    745735    IEMOP_RAISE_INVALID_OPCODE_RET();
     
    752742{
    753743    IEMOP_MNEMONIC(InvalidWithRMAllNeeded, "InvalidWithRMAllNeeded");
    754 #ifndef TST_IEM_CHECK_MC
    755744    if (IEM_IS_MODRM_MEM_MODE(bRm))
    756     {
    757         RTGCPTR      GCPtrEff;
    758         VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
    759         if (rcStrict != VINF_SUCCESS)
    760             return rcStrict;
    761     }
    762 #endif
     745        IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
    763746    IEMOP_HLP_DONE_DECODING();
    764747    IEMOP_RAISE_INVALID_OPCODE_RET();
     
    773756    if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
    774757    {
    775 #ifndef TST_IEM_CHECK_MC
    776758        if (IEM_IS_MODRM_MEM_MODE(bRm))
    777         {
    778             RTGCPTR      GCPtrEff;
    779             VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
    780             if (rcStrict != VINF_SUCCESS)
    781                 return rcStrict;
    782         }
    783 #endif
    784         uint8_t bImm8;  IEM_OPCODE_GET_NEXT_U8(&bImm8);  RT_NOREF(bRm);
     759            IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
     760        uint8_t bImm8;  IEM_OPCODE_GET_NEXT_U8(&bImm8);  RT_NOREF(bImm8);
    785761    }
    786762    IEMOP_HLP_DONE_DECODING();
     
    794770{
    795771    IEMOP_MNEMONIC(InvalidWithRMAllNeedImm8, "InvalidWithRMAllNeedImm8");
    796 #ifndef TST_IEM_CHECK_MC
    797772    if (IEM_IS_MODRM_MEM_MODE(bRm))
    798     {
    799         RTGCPTR      GCPtrEff;
    800         VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
    801         if (rcStrict != VINF_SUCCESS)
    802             return rcStrict;
    803     }
    804 #endif
    805     uint8_t bImm8;  IEM_OPCODE_GET_NEXT_U8(&bImm8);  RT_NOREF(bRm);
     773        IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
     774    uint8_t bImm8;  IEM_OPCODE_GET_NEXT_U8(&bImm8);  RT_NOREF(bImm8);
    806775    IEMOP_HLP_DONE_DECODING();
    807776    IEMOP_RAISE_INVALID_OPCODE_RET();
     
    816785    {
    817786        uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
    818 #ifndef TST_IEM_CHECK_MC
    819787        if (IEM_IS_MODRM_MEM_MODE(bRm))
    820         {
    821             RTGCPTR      GCPtrEff;
    822             VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
    823             if (rcStrict != VINF_SUCCESS)
    824                 return rcStrict;
    825         }
    826 #endif
     788            IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
    827789    }
    828790    IEMOP_HLP_DONE_DECODING();
     
    836798    IEMOP_MNEMONIC(InvalidAllNeedRM, "InvalidAllNeedRM");
    837799    uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
    838 #ifndef TST_IEM_CHECK_MC
    839800    if (IEM_IS_MODRM_MEM_MODE(bRm))
    840     {
    841         RTGCPTR      GCPtrEff;
    842         VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
    843         if (rcStrict != VINF_SUCCESS)
    844             return rcStrict;
    845     }
    846 #endif
     801        IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
    847802    IEMOP_HLP_DONE_DECODING();
    848803    IEMOP_RAISE_INVALID_OPCODE_RET();
     
    858813    {
    859814        uint8_t bRm;  IEM_OPCODE_GET_NEXT_U8(&bRm);  RT_NOREF(bRm);
    860 #ifndef TST_IEM_CHECK_MC
    861815        if (IEM_IS_MODRM_MEM_MODE(bRm))
    862         {
    863             RTGCPTR      GCPtrEff;
    864             VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
    865             if (rcStrict != VINF_SUCCESS)
    866                 return rcStrict;
    867         }
    868 #endif
     816            IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
    869817        uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); RT_NOREF(bImm);
    870818    }
     
    883831        uint8_t b3rd; IEM_OPCODE_GET_NEXT_U8(&b3rd); RT_NOREF(b3rd);
    884832        uint8_t bRm;  IEM_OPCODE_GET_NEXT_U8(&bRm);  RT_NOREF(bRm);
    885 #ifndef TST_IEM_CHECK_MC
    886833        if (IEM_IS_MODRM_MEM_MODE(bRm))
    887         {
    888             RTGCPTR      GCPtrEff;
    889             VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
    890             if (rcStrict != VINF_SUCCESS)
    891                 return rcStrict;
    892         }
    893 #endif
     834            IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
    894835    }
    895836    IEMOP_HLP_DONE_DECODING();
     
    907848        uint8_t b3rd; IEM_OPCODE_GET_NEXT_U8(&b3rd); RT_NOREF(b3rd);
    908849        uint8_t bRm;  IEM_OPCODE_GET_NEXT_U8(&bRm);  RT_NOREF(bRm);
    909 #ifndef TST_IEM_CHECK_MC
    910850        if (IEM_IS_MODRM_MEM_MODE(bRm))
    911         {
    912             RTGCPTR      GCPtrEff;
    913             VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 1, &GCPtrEff);
    914             if (rcStrict != VINF_SUCCESS)
    915                 return rcStrict;
    916         }
    917 #endif
     851            IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
    918852        uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm); RT_NOREF(bImm);
    919         IEMOP_HLP_DONE_DECODING();
    920853    }
    921     IEMOP_RAISE_INVALID_OPCODE_RET();
    922 }
    923 
     854    IEMOP_HLP_DONE_DECODING();
     855    IEMOP_RAISE_INVALID_OPCODE_RET();
     856}
     857
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py

    r100733 r100734  
    30333033        self.oReHashDefine3 = re.compile('(?s)\A\s*([A-Za-z_][A-Za-z0-9_]*)[^(]\s*(.*)\Z');        ##< Simple, no arguments.
    30343034        self.oReHashUndef   = re.compile('^\s*#\s*undef\s+(.*)$');
    3035         self.oReMcBeginEnd  = re.compile(r'\bIEM_MC_(BEGIN|END|DEFER_TO_CIMPL_[0-5]_RET)\s*\(');
     3035        self.oReMcBeginEnd  = re.compile(r'\bIEM_MC_(BEGIN|END|DEFER_TO_CIMPL_[1-5]_RET)\s*\('); ##> Not DEFER_TO_CIMPL_0_RET!
    30363036        self.fDebug         = True;
    30373037        self.fDebugMc       = False;
     
    47684768    def workerIemMcDeferToCImplXRet(self, sCode, offBeginStatementInCodeStr, offBeginStatementInLine, cParams):
    47694769        """
    4770         Process a IEM_MC_DEFER_TO_CIMPL_[0-5]_RET macro invocation.
     4770        Process a IEM_MC_DEFER_TO_CIMPL_[1-5]_RET macro invocation.
    47714771        """
    47724772        sStmt = 'IEM_MC_DEFER_TO_CIMPL_%d_RET' % (cParams,);
     
    50335033        oMatch = self.oReHashDefine2.match(sRest);
    50345034        if oMatch:
    5035             asArgs = [sParam.strip() for sParam in oMatch.group(2).split(',')];
     5035            sAllArgs = oMatch.group(2).strip();
     5036            asArgs = [sParam.strip() for sParam in sAllArgs.split(',')] if sAllArgs else None;
    50365037            sBody  = oMatch.group(3);
    50375038        else:
     
    50495050        # Is this of any interest to us?  We do NOT support MC blocks wihtin
    50505051        # nested macro expansion, just to avoid lots of extra work.
     5052        #
     5053        # Note! IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX and other macros someone making
     5054        #       use of IEMOP_RAISE_INVALID_LOCK_PREFIX_RET() will be ignored here and
     5055        #       dealt with by overriding IEMOP_RAISE_INVALID_LOCK_PREFIX_RET and its
     5056        #       siblings in the recompiler.  This is a lot simpler than nested macro
     5057        #       expansion and lots of heuristics for locating all the relevant macros.
     5058        #       Also, this way we don't produce lots of unnecessary threaded functions.
    50515059        #
    50525060        if sBody.find("IEM_MC_BEGIN") < 0:
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstTwoByte0f.cpp.h

    r100733 r100734  
    1384313843    {
    1384413844        uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
    13845 #ifndef TST_IEM_CHECK_MC
    1384613845        if (IEM_IS_MODRM_MEM_MODE(bRm))
    13847         {
    13848             RTGCPTR      GCPtrEff;
    13849             VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
    13850             if (rcStrict != VINF_SUCCESS)
    13851                 return rcStrict;
    13852         }
    13853 #endif
    13854         IEMOP_HLP_DONE_DECODING();
    13855     }
     13846            IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
     13847    }
     13848    IEMOP_HLP_DONE_DECODING();
    1385613849    IEMOP_RAISE_INVALID_OPCODE_RET();
    1385713850}
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstVexMap1.cpp.h

    r100733 r100734  
    54105410FNIEMOP_DEF(iemOp_vud0)
    54115411{
     5412/** @todo testcase: vud0 */
    54125413    IEMOP_MNEMONIC(vud0, "vud0");
    54135414    if (pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
    54145415    {
    54155416        uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); RT_NOREF(bRm);
    5416 #ifndef TST_IEM_CHECK_MC
    5417         RTGCPTR      GCPtrEff;
    5418         VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff);
    5419         if (rcStrict != VINF_SUCCESS)
    5420             return rcStrict;
    5421 #endif
    5422         IEMOP_HLP_DONE_DECODING();
    5423     }
     5417        if (IEM_IS_MODRM_MEM_MODE(bRm))
     5418            IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(bRm);
     5419    }
     5420    IEMOP_HLP_DONE_DECODING();
    54245421    IEMOP_RAISE_INVALID_OPCODE_RET();
    54255422}
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp

    r100732 r100734  
    7777}
    7878
     79
     80/**
     81 * Built-in function that calls a C-implemention function taking zero arguments.
     82 */
     83IEM_DECL_IMPL_DEF(VBOXSTRICTRC, iemThreadedFunc_BltIn_DeferToCImpl0,
     84                  (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2))
     85{
     86    PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
     87    uint8_t const      cbInstr  = (uint8_t)uParam1;
     88    RT_NOREF(uParam2);
     89    return pfnCImpl(pVCpu, cbInstr);
     90}
    7991
    8092
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py

    r100733 r100734  
    199199    ## The value indicates whether it terminates the TB or not. The goal is to
    200200    ## improve the recompiler so all but END_TB will be False.
     201    ##
     202    ## @note iemThreadedRecompilerMcDeferToCImpl0 duplicates info found here.
    201203    kdCImplFlags = {
    202204        'IEM_CIMPL_F_MODE':                 True,
     
    914916        # Emit fEndTb = true or fTbBranched = true if any of the CIMPL flags
    915917        # indicates we should do so.
     918        # Note! iemThreadedRecompilerMcDeferToCImpl0 duplicates work done here.
    916919        asEndTbFlags      = [];
    917920        asTbBranchedFlags = [];
     
    922925                asTbBranchedFlags.append(sFlag);
    923926        if asTbBranchedFlags:
    924             aoStmts.extend([
    925                 iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
    926                                  % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
    927                                  cchIndent = cchIndent), # Using the inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
    928                 #iai.McCppGeneric('pVCpu->iem.s.fTbBranched = %s;'
    929                 #                 % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
    930                 #                 cchIndent = cchIndent),
    931                 #iai.McCppGeneric('pVCpu->iem.s.GCPhysTbBranchSrcBuf = pVCpu->iem.s.GCPhysInstrBuf;', cchIndent = cchIndent),
    932                 #iai.McCppGeneric('pVCpu->iem.s.GCVirtTbBranchSrcBuf = pVCpu->iem.s.uInstrBufPc;', cchIndent = cchIndent),
    933             ]);
     927            aoStmts.append(iai.McCppGeneric('iemThreadedSetBranched(pVCpu, %s);'
     928                                            % ((' | '.join(asTbBranchedFlags)).replace('IEM_CIMPL_F_BRANCH', 'IEMBRANCHED_F'),),
     929                                            cchIndent = cchIndent)); # Inline fn saves ~2 seconds for gcc 13/dbg (1m13s vs 1m15s).
    934930        if asEndTbFlags:
    935931            aoStmts.append(iai.McCppGeneric('pVCpu->iem.s.fEndTb = true; /* %s */' % (','.join(asEndTbFlags),),
     
    12891285            '     * Predefined',
    12901286            '     */',
     1287            '    kIemThreadedFunc_DeferToCImpl0,',
    12911288            '    kIemThreadedFunc_CheckMode,',
    12921289            '    kIemThreadedFunc_CheckCsLim,',
     
    14601457                   + '     * Predefined.\n'
    14611458                   + '     */'
     1459                   + '    iemThreadedFunc_BltIn_DeferToCImpl0,\n'
    14621460                   + '    iemThreadedFunc_BltIn_CheckMode,\n'
    14631461                   + '    iemThreadedFunc_BltIn_CheckCsLim,\n'
     
    15051503                   + '     * Predefined.\n'
    15061504                   + '     */'
     1505                   + '    "BltIn_DeferToCImpl0",\n'
    15071506                   + '    "BltIn_CheckMode",\n'
    15081507                   + '    "BltIn_CheckCsLim",\n'
  • trunk/src/VBox/VMM/VMMAll/IEMAllThrdRecompiler.cpp

    r100733 r100734  
    137137    ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &uEffAddrInfo))
    138138#endif
     139
     140/*
     141 * Likewise override IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES so we fetch all the opcodes.
     142 */
     143#undef IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES
     144#define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
     145        uint64_t uEffAddrInfo; \
     146        (void)iemOpHlpCalcRmEffAddrJmpEx(pVCpu, bRm, 0, &uEffAddrInfo); \
     147    } while (0)
    139148
    140149/*
     
    267276 */
    268277#undef IEM_MC_DEFER_TO_CIMPL_0_RET
    269 #define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_pfnCImpl) return iemThreadedRecompilerMcDeferToCImpl0(pVCpu, a_pfnCImpl)
    270 
    271 typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
    272 typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
    273 
    274 DECLINLINE(VBOXSTRICTRC) iemThreadedRecompilerMcDeferToCImpl0(PVMCPUCC pVCpu, PFNIEMCIMPL0 pfnCImpl)
    275 {
     278#define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_pfnCImpl) \
     279    return iemThreadedRecompilerMcDeferToCImpl0(pVCpu, a_fFlags, a_pfnCImpl)
     280
     281DECLINLINE(VBOXSTRICTRC) iemThreadedRecompilerMcDeferToCImpl0(PVMCPUCC pVCpu, uint32_t fFlags, PFNIEMCIMPL0 pfnCImpl)
     282{
     283    Log8(("CImpl0: %04x:%08RX64 LB %#x: %#x %p\n",
     284          pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IEM_GET_INSTR_LEN(pVCpu), fFlags, pfnCImpl));
     285
     286    IEM_MC2_BEGIN_EMIT_CALLS();
     287    IEM_MC2_EMIT_CALL_2(kIemThreadedFunc_DeferToCImpl0, (uintptr_t)pfnCImpl, IEM_GET_INSTR_LEN(pVCpu));
     288    IEM_MC2_END_EMIT_CALLS(fFlags);
     289
     290    /* We have to repeat work normally done by kdCImplFlags and
     291       ThreadedFunctionVariation.emitThreadedCallStmts here. */
     292    if (fFlags & (IEM_CIMPL_F_END_TB | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_REP))
     293        pVCpu->iem.s.fEndTb = true;
     294
     295    AssertCompile(IEM_CIMPL_F_BRANCH_DIRECT      == IEMBRANCHED_F_DIRECT);
     296    AssertCompile(IEM_CIMPL_F_BRANCH_INDIRECT    == IEMBRANCHED_F_INDIRECT);
     297    AssertCompile(IEM_CIMPL_F_BRANCH_RELATIVE    == IEMBRANCHED_F_RELATIVE);
     298    AssertCompile(IEM_CIMPL_F_BRANCH_CONDITIONAL == IEMBRANCHED_F_CONDITIONAL);
     299    AssertCompile(IEM_CIMPL_F_BRANCH_FAR         == IEMBRANCHED_F_FAR);
     300    if (fFlags & IEM_CIMPL_F_BRANCH_ANY)
     301        pVCpu->iem.s.fTbBranched = fFlags & (IEM_CIMPL_F_BRANCH_ANY | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_CONDITIONAL);
     302
    276303    return pfnCImpl(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
    277304}
  • trunk/src/VBox/VMM/include/IEMInline.h

    r100591 r100734  
    14561456# endif
    14571457
     1458/**
     1459 * For fetching the opcode bytes for an ModR/M effective address, but throw
     1460 * away the result.
     1461 *
     1462 * This is used when decoding undefined opcodes and such where we want to avoid
     1463 * unnecessary MC blocks.
     1464 *
     1465 * @note The recompiler code overrides this one so iemOpHlpCalcRmEffAddrJmpEx is
     1466 *       used instead.  At least for now...
     1467 */
     1468# ifndef IEM_WITH_SETJMP
     1469#  define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
     1470        RTGCPTR      GCPtrEff; \
     1471        VBOXSTRICTRC rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 0, &GCPtrEff); \
     1472        if (rcStrict != VINF_SUCCESS) \
     1473            return rcStrict; \
     1474    } while (0)
     1475# else
     1476#  define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
     1477        (void)iemOpHlpCalcRmEffAddrJmp(pVCpu, bRm, 0); \
     1478    } while (0)
     1479# endif
     1480
    14581481#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
    14591482
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r100731 r100734  
    34923492# define IEM_CIMPL_CALL_0(a_fn)            a_fn(pVCpu, cbInstr)
    34933493
     3494/** Type for a C instruction implementation function taking no extra
     3495 *  arguments. */
     3496typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
     3497/** Function pointer type for a C instruction implementation function taking
     3498 *  no extra arguments. */
     3499typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
     3500
    34943501/**
    34953502 * For typedef'ing or declaring a C instruction implementation function taking
     
    43994406 * Macro for calling iemCImplRaiseDivideError().
    44004407 *
    4401  * This enables us to add/remove arguments and force different levels of
    4402  * inlining as we wish.
     4408 * This is for things that will _always_ decode to an \#DE, taking the
     4409 * recompiler into consideration and everything.
    44034410 *
    44044411 * @return  Strict VBox status code.
     
    44094416 * Macro for calling iemCImplRaiseInvalidLockPrefix().
    44104417 *
    4411  * This enables us to add/remove arguments and force different levels of
    4412  * inlining as we wish.
     4418 * This is for things that will _always_ decode to an \#UD, taking the
     4419 * recompiler into consideration and everything.
    44134420 *
    44144421 * @return  Strict VBox status code.
     
    49434950void            iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb);
    49444951
     4952IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_DeferToCImpl0,
     4953                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
     4954
    49454955IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, iemThreadedFunc_BltIn_CheckMode,
    49464956                    (PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2));
  • trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp

    r100733 r100734  
    144144#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64)              do { *(a_pu64) = g_bRandom; CHK_PTYPE(uint64_t *, a_pu64); } while (0)
    145145#define IEM_OPCODE_GET_NEXT_U64(a_pu64)                     do { *(a_pu64) = g_bRandom; CHK_PTYPE(uint64_t *, a_pu64); } while (0)
     146#define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm)            do { RT_NOREF(a_bRm); } while (0)
    146147#define IEMOP_HLP_MIN_186()                                 do { } while (0)
    147148#define IEMOP_HLP_MIN_286()                                 do { } while (0)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette