VirtualBox

Changeset 99299 in vbox


Ignore:
Timestamp:
Apr 6, 2023 12:06:25 AM (22 months ago)
Author:
vboxsync
Message:

VMM/IEM: More work on processing MC blocks and generating threaded functions from them. IEMThreadedFunctions.cpp compiles now. Did some PC update optimizations when doing addressing variations. bugref:10369

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py

    r99296 r99299  
    45754575
    45764576        #
    4577         # Complete and discard the current block.
    4578         #
    45794577        # HACK ALERT! For blocks orginating from macro expansion the start and
    45804578        #             end line will be the same, but the line has multiple
     
    46014599            asLines = [sLine + '\n' for sLine in sRawLine.split('\n')];
    46024600
     4601        #
     4602        # Strip anything following the IEM_MC_END(); statement in the final line,
     4603        # so that we don't carry on any trailing 'break' after macro expansions
     4604        # like for iemOp_movsb_Xb_Yb.
     4605        #
     4606        while asLines[-1].strip() == '':
     4607            asLines.pop();
     4608        sFinal      = asLines[-1];
     4609        offFinalEnd = sFinal.find('IEM_MC_END');
     4610        if offFinalEnd < 0: self.raiseError('bogus IEM_MC_END: Not in final line: %s' % (sFinal,));
     4611        offFinalEnd += len('IEM_MC_END');
     4612
     4613        while sFinal[offFinalEnd].isspace():
     4614            offFinalEnd += 1;
     4615        if sFinal[offFinalEnd] != '(': self.raiseError('bogus IEM_MC_END: Expected "(" at %s: %s' % (offFinalEnd, sFinal,));
     4616        offFinalEnd += 1;
     4617
     4618        while sFinal[offFinalEnd].isspace():
     4619            offFinalEnd += 1;
     4620        if sFinal[offFinalEnd] != ')': self.raiseError('bogus IEM_MC_END: Expected ")" at %s: %s' % (offFinalEnd, sFinal,));
     4621        offFinalEnd += 1;
     4622
     4623        while sFinal[offFinalEnd].isspace():
     4624            offFinalEnd += 1;
     4625        if sFinal[offFinalEnd] != ';': self.raiseError('bogus IEM_MC_END: Expected ";" at %s: %s' % (offFinalEnd, sFinal,));
     4626        offFinalEnd += 1;
     4627
     4628        asLines[-1] = sFinal[: offFinalEnd];
     4629
     4630        #
     4631        # Complete and discard the current block.
     4632        #
    46034633        self.oCurMcBlock.complete(self.iLine, offEndStatementInLine, asLines);
    46044634        self.oCurMcBlock = None;
  • trunk/src/VBox/VMM/VMMAll/IEMAllThreadedPython.py

    r99298 r99299  
    132132    ## @note Effective operand size is generally handled in the decoder, at present
    133133    ##       we only do variations on addressing and memory accessing.
     134    ## @todo Blocks without addressing should have 64-bit and 32-bit PC update
     135    ##       variations to reduce code size (see iemRegAddToRip).
    134136    ## @{
    135137    ksVariation_Default     = '';               ##< No variations.
     
    370372                            self.dParamRefs['u32Disp'][0].sNewName, self.dParamRefs['cbInstr'][0].sNewName,
    371373                        ];
    372                 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED ...
     374                # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED and maybe *_LM64/_NOT64 ...
    373375                elif oNewStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH',
    374376                                        'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH'):
     
    377379                        oNewStmt.asParams.append(self.dParamRefs['pVCpu->iem.s.enmEffOpSize'][0].sNewName);
    378380                    oNewStmt.sName += '_THREADED';
     381                    if self.sVariation in (self.ksVariation_Addr64, self.ksVariation_Addr64_32):
     382                        oNewStmt.sName += '_LM64';
     383                    elif self.sVariation != self.ksVariation_Default:
     384                        oNewStmt.sName += '_NOT64';
    379385
    380386                # ... and IEM_MC_*_GREG_U8 into *_THREADED w/ reworked index taking REX into account
  • trunk/src/VBox/VMM/VMMAll/IEMThreadedFunctions.cpp

    r99298 r99299  
    8080#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED(a_cbInstr) \
    8181    return iemRegAddToRipAndFinishingClearingRF(pVCpu, a_cbInstr)
    82 #undef IEM_MC_ADVANCE_RIP_AND_FINISH
     82#undef  IEM_MC_ADVANCE_RIP_AND_FINISH
     83
     84/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
     85 *  and only used when we're in 64-bit code. */
     86#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_LM64(a_cbInstr) \
     87    return iemRegAddToRip64AndFinishingClearingRF(pVCpu, a_cbInstr)
     88#undef  IEM_MC_ADVANCE_RIP_AND_FINISH
     89
     90/** Variant of IEM_MC_ADVANCE_RIP_AND_FINISH with instruction length as param
     91 *  and never used in 64-bit code. */
     92#define IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_NOT64(a_cbInstr) \
     93    return iemRegAddToEip32AndFinishingClearingRF(pVCpu, a_cbInstr)
     94#undef  IEM_MC_ADVANCE_RIP_AND_FINISH
    8395
    8496/** Variant of IEM_MC_REL_JMP_S8_AND_FINISH with instruction length as param. */
    8597#define IEM_MC_REL_JMP_S8_AND_FINISH_THREADED(a_i8, a_cbInstr, a_enmEffOpSize) \
    8698    return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, a_cbInstr, (a_i8), a_enmEffOpSize)
    87 #undef IEM_MC_REL_JMP_S8_AND_FINISH
     99#undef  IEM_MC_REL_JMP_S8_AND_FINISH
    88100
    89101/** Variant of IEM_MC_REL_JMP_S16_AND_FINISH with instruction length as param. */
    90102#define IEM_MC_REL_JMP_S16_AND_FINISH_THREADED(a_i16, a_cbInstr) \
    91103    return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, a_cbInstr, (a_i16))
    92 #undef IEM_MC_REL_JMP_S16_AND_FINISH
     104#undef  IEM_MC_REL_JMP_S16_AND_FINISH
    93105
    94106/** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as param. */
    95107#define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED(a_i32, a_cbInstr, a_enmEffOpSize) \
    96108    return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, a_cbInstr, (a_i32), a_enmEffOpSize)
    97 #undef IEM_MC_REL_JMP_S32_AND_FINISH
     109#undef  IEM_MC_REL_JMP_S32_AND_FINISH
    98110
    99111/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
    100 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16(a_GCPtrEff, a_bRm, a_u16Disp) \
     112#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR16(a_GCPtrEff, a_bRm, a_u16Disp) \
    101113    (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr16(pVCpu, a_bRm, a_u16Disp)
     114#undef  IEM_MC_CALC_RM_EFF_ADDR
    102115
    103116/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
    104 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \
     117#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \
    105118    (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp)
     119#undef  IEM_MC_CALC_RM_EFF_ADDR
    106120
    107121/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
    108 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \
     122#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR32FLAT(a_GCPtrEff, a_bRm, a_bSib, a_u32Disp) \
    109123    (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr32(pVCpu, a_bRm, a_bSib, a_u32Disp)
     124#undef  IEM_MC_CALC_RM_EFF_ADDR
    110125
    111126/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
    112 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \
     127#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR64(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \
    113128    (a_GCPtrEff) = iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm)
     129#undef  IEM_MC_CALC_RM_EFF_ADDR
    114130
    115131/** Variant of IEM_MC_CALC_RM_EFF_ADDR with additional parameters. */
    116 # define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR6432(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \
     132#define IEM_MC_CALC_RM_EFF_ADDR_THREADED_ADDR6432(a_GCPtrEff, a_bRmEx, a_bSib, a_u32Disp, a_cbImm) \
    117133    (a_GCPtrEff) = (uint32_t)iemOpHlpCalcRmEffAddrThreadedAddr64(pVCpu, a_bRmEx, a_bSib, a_u32Disp, a_cbImm)
     134#undef  IEM_MC_CALC_RM_EFF_ADDR
    118135
    119136/** Variant of IEM_MC_CALL_CIMPL_1 with explicit instruction length parameter. */
    120 # define IEM_MC_CALL_CIMPL_1_THREADED(a_cbInstr, a_pfnCImpl, a0) \
     137#define IEM_MC_CALL_CIMPL_1_THREADED(a_cbInstr, a_pfnCImpl, a0) \
    121138    return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0)
     139#undef  IEM_MC_CALL_CIMPL_1
    122140
    123141/** Variant of IEM_MC_CALL_CIMPL_2 with explicit instruction length parameter. */
    124 # define IEM_MC_CALL_CIMPL_2_THREADED(a_cbInstr, a_pfnCImpl, a0, a1) \
     142#define IEM_MC_CALL_CIMPL_2_THREADED(a_cbInstr, a_pfnCImpl, a0, a1) \
    125143    return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1)
     144#undef  IEM_MC_CALL_CIMPL_2
    126145
    127146/** Variant of IEM_MC_CALL_CIMPL_3 with explicit instruction length parameter. */
    128 # define IEM_MC_CALL_CIMPL_3_THREADED(a_cbInstr, a_pfnCImpl, a0, a1, a2) \
     147#define IEM_MC_CALL_CIMPL_3_THREADED(a_cbInstr, a_pfnCImpl, a0, a1, a2) \
    129148    return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2)
     149#undef  IEM_MC_CALL_CIMPL_3
    130150
    131151/** Variant of IEM_MC_CALL_CIMPL_4 with explicit instruction length parameter. */
    132 # define IEM_MC_CALL_CIMPL_4_THREADED(a_cbInstr, a_pfnCImpl, a0, a1, a2, a3) \
     152#define IEM_MC_CALL_CIMPL_4_THREADED(a_cbInstr, a_pfnCImpl, a0, a1, a2, a3) \
    133153    return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3)
     154#undef  IEM_MC_CALL_CIMPL_4
    134155
    135156/** Variant of IEM_MC_CALL_CIMPL_5 with explicit instruction length parameter. */
    136 # define IEM_MC_CALL_CIMPL_5_THREADED(a_cbInstr, a_pfnCImpl, a0, a1, a2, a3, a4) \
     157#define IEM_MC_CALL_CIMPL_5_THREADED(a_cbInstr, a_pfnCImpl, a0, a1, a2, a3, a4) \
    137158    return (a_pfnCImpl)(pVCpu, (a_cbInstr), a0, a1, a2, a3, a4)
     159#undef  IEM_MC_CALL_CIMPL_5
    138160
    139161/** Variant of IEM_MC_FETCH_GREG_U8 with extended (20) register index. */
     
    160182#define IEM_MC_FETCH_GREG_U8_SX_U32_THREADED(a_u32Dst, a_iGRegEx) \
    161183    (a_u32Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
     184#undef IEM_MC_FETCH_GREG_U8_SX_U32
    162185
    163186/** Variant of IEM_MC_FETCH_GREG_U8_SX_U64 with extended (20) register index. */
    164187#define IEM_MC_FETCH_GREG_U8_SX_U64_THREADED(a_u64Dst, a_iGRegEx) \
    165188    (a_u64Dst) = (int8_t)iemGRegFetchU8Ex(pVCpu, (a_iGRegEx))
     189#undef IEM_MC_FETCH_GREG_U8_SX_U64
    166190
    167191/** Variant of IEM_MC_STORE_GREG_U8 with extended (20) register index. */
    168192#define IEM_MC_STORE_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
    169193    *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value)
     194#undef IEM_MC_STORE_GREG_U8
    170195
    171196/** Variant of IEM_MC_STORE_GREG_U8 with extended (20) register index. */
    172197#define IEM_MC_STORE_GREG_U8_CONST_THREADED(a_iGRegEx, a_u8Value) \
    173198    *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) = (a_u8Value)
     199#undef IEM_MC_STORE_GREG_U8
    174200
    175201/** Variant of IEM_MC_REF_GREG_U8 with extended (20) register index. */
    176202#define IEM_MC_REF_GREG_U8_THREADED(a_pu8Dst, a_iGRegEx) \
    177203    (a_pu8Dst) = iemGRegRefU8Ex(pVCpu, (a_iGRegEx))
     204#undef IEM_MC_REF_GREG_U8
    178205
    179206/** Variant of IEM_MC_ADD_GREG_U8 with extended (20) register index. */
    180207#define IEM_MC_ADD_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
    181208    *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) += (a_u8Value)
     209#undef IEM_MC_ADD_GREG_U8
    182210
    183211/** Variant of IEM_MC_SUB_GREG_U8 with extended (20) register index. */
    184212#define IEM_MC_SUB_GREG_U8_THREADED(a_iGRegEx,  a_u8Value) \
    185213    *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) -= (a_u8Value)
     214#undef IEM_MC_SUB_GREG_U8
    186215
    187216/** Variant of IEM_MC_ADD_GREG_U8_TO_LOCAL with extended (20) register index. */
    188217#define IEM_MC_ADD_GREG_U8_TO_LOCAL_THREADED(a_u8Value, a_iGRegEx) \
    189     do { (a_u8Value)  += iemGRegFetchU8Ex(pVCpu, (a_iGRegEx)); } while (0)
     218    do { (a_u8Value) += iemGRegFetchU8Ex(pVCpu, (a_iGRegEx)); } while (0)
     219#undef IEM_MC_ADD_GREG_U8_TO_LOCAL
    190220
    191221/** Variant of IEM_MC_AND_GREG_U8 with extended (20) register index. */
    192222#define IEM_MC_AND_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
    193223    *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) &= (a_u8Value)
     224#undef IEM_MC_AND_GREG_U8
    194225
    195226/** Variant of IEM_MC_OR_GREG_U8 with extended (20) register index. */
    196227#define IEM_MC_OR_GREG_U8_THREADED(a_iGRegEx, a_u8Value) \
    197228    *iemGRegRefU8Ex(pVCpu, (a_iGRegEx)) |= (a_u8Value)
     229#undef IEM_MC_OR_GREG_U8
    198230
    199231/**
     
    278310    /* Get the register (or SIB) value. */
    279311    uint32_t u32EffAddr;
     312#ifdef _MSC_VER
     313    u32EffAddr = 0;/* MSC uninitialized variable analysis is too simple, it seems. */
     314#endif
    280315    switch (bRm & X86_MODRM_RM_MASK)
    281316    {
     
    372407    {
    373408        /* Get the register (or SIB) value. */
     409#ifdef _MSC_VER
     410        u64EffAddr = 0; /* MSC uninitialized variable analysis is too simple, it seems. */
     411#endif
    374412        switch (bRmEx & (X86_MODRM_RM_MASK | 0x8)) /* bRmEx[bit 3] = REX.B */
    375413        {
     414            default:
    376415            case  0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
    377416            case  1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
  • trunk/src/VBox/VMM/include/IEMInline.h

    r99298 r99299  
    17711771
    17721772/**
     1773 * Updates the EIP/IP to point to the next instruction - only for 32-bit and
     1774 * 16-bit code.
     1775 *
     1776 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     1777 * @param   cbInstr             The number of bytes to add.
     1778 */
     1779DECL_FORCE_INLINE(void) iemRegAddToEip32(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
     1780{
     1781    /* See comment in iemRegAddToRip. */
     1782    uint32_t const uEipPrev = pVCpu->cpum.GstCtx.eip;
     1783    uint32_t const uEipNext = uEipPrev + cbInstr;
     1784    if (IEM_GET_TARGET_CPU(pVCpu) >= IEMTARGETCPU_386)
     1785        pVCpu->cpum.GstCtx.rip = (uint32_t)uEipNext;
     1786    else
     1787        pVCpu->cpum.GstCtx.rip = (uint16_t)uEipNext;
     1788}
     1789
     1790
     1791/**
    17731792 * Called by iemRegAddToRipAndFinishingClearingRF and others when any of the
    17741793 * following EFLAGS bits are set:
     
    18711890 * @param   cbInstr             The number of bytes to add.
    18721891 */
    1873 DECLINLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
     1892DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRipAndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
    18741893{
    18751894    iemRegAddToRip(pVCpu, cbInstr);
     1895    return iemRegFinishClearingRF(pVCpu);
     1896}
     1897
     1898
     1899/**
     1900 * Updates the RIP to point to the next instruction and clears EFLAGS.RF
     1901 * and CPUMCTX_INHIBIT_SHADOW.
     1902 *
     1903 * Only called from 64-code code.
     1904 *
     1905 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     1906 * @param   cbInstr             The number of bytes to add.
     1907 */
     1908DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToRip64AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
     1909{
     1910    pVCpu->cpum.GstCtx.rip = pVCpu->cpum.GstCtx.rip + cbInstr;
     1911    return iemRegFinishClearingRF(pVCpu);
     1912}
     1913
     1914
     1915/**
     1916 * Updates the EIP to point to the next instruction and clears EFLAGS.RF and
     1917 * CPUMCTX_INHIBIT_SHADOW.
     1918 *
     1919 * This is never from 64-code code.
     1920 *
     1921 * @param   pVCpu               The cross context virtual CPU structure of the calling thread.
     1922 * @param   cbInstr             The number of bytes to add.
     1923 */
     1924DECL_FORCE_INLINE(VBOXSTRICTRC) iemRegAddToEip32AndFinishingClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr) RT_NOEXCEPT
     1925{
     1926    iemRegAddToEip32(pVCpu, cbInstr);
    18761927    return iemRegFinishClearingRF(pVCpu);
    18771928}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette