VirtualBox

Changeset 101304 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Sep 29, 2023 1:02:02 AM (15 months ago)
Author:
vboxsync
Message:

VMM/IEM: Emit native code for pure defer-CImpl instructions. Tested on linux.amd64 only and this doesn't cover cases with zero parameters. bugref:10371

Location:
trunk/src/VBox/VMM/VMMAll
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp

    r100787 r101304  
    98899889 *
    98909890 * @param   iStReg              The other stack register.
    9891  * @param   pfnAImpl            The assembly comparison implementation.
     9891 * @param   fUCmp               true for FUCOMI[P], false for FCOMI[P].
    98929892 * @param   uPopAndFpuOpcode    Bits 15-0: The FPU opcode.
    98939893 *                              Bit  31: Whether we should pop the stack when
    98949894 *                              done or not.
    98959895 */
    9896 IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, uint32_t, uPopAndFpuOpcode)
     9896IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode)
    98979897{
    98989898    Assert(iStReg < 8);
     
    99189918    if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
    99199919    {
    9920         uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
     9920        uint32_t u32Eflags;
     9921        if (!fUCmp)
     9922            u32Eflags = iemAImpl_fcomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
     9923        else
     9924            u32Eflags = iemAImpl_fucomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80);
    99219925
    99229926        pFpuCtx->FSW &= ~X86_FSW_C1;
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h

    r100858 r101304  
    1053810538    IEMOP_MNEMONIC(fucomi_st0_stN, "fucomi st0,stN");
    1053910539    IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS,
    10540                                 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fucomi_r80_by_r80,
     10540                                iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), true /*fUCmp*/,
    1054110541                                0 /*fPop*/ | pVCpu->iem.s.uFpuOpcode);
    1054210542}
     
    1054810548    IEMOP_MNEMONIC(fcomi_st0_stN, "fcomi st0,stN");
    1054910549    IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS,
    10550                                 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,
     10550                                iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/,
    1055110551                                false /*fPop*/ | pVCpu->iem.s.uFpuOpcode);
    1055210552}
     
    1146711467    IEMOP_MNEMONIC(fucomip_st0_stN, "fucomip st0,stN");
    1146811468    IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS,
    11469                                 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,
     11469                                iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/,
    1147011470                                RT_BIT_32(31) /*fPop*/ | pVCpu->iem.s.uFpuOpcode);
    1147111471}
     
    1147711477    IEMOP_MNEMONIC(fcomip_st0_stN, "fcomip st0,stN");
    1147811478    IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS,
    11479                                 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,
     11479                                iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/,
    1148011480                                RT_BIT_32(31) /*fPop*/ | pVCpu->iem.s.uFpuOpcode);
    1148111481}
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py

    r101275 r101304  
    3737__version__ = "$Revision$"
    3838
    39 # Standard python imports.
     39# Standard python imports:
    4040#import sys;
    4141
    42 #import IEMAllInstPython as iai;
     42# Out python imports:
     43import IEMAllInstPython as iai;
    4344
    4445
     
    4748    Class that deals with transforming a threaded function variation into a
    4849    native recompiler function.
     50
     51    This base class doesn't do any transforming and just renders the same
     52    code as for the threaded function.
    4953    """
    5054
     
    5862        (for the selected host architecture).
    5963        """
    60         return False;
     64        return True;
    6165
    6266    def renderCode(self, cchIndent):
     
    6468        Returns the native recompiler function body for this threaded variant.
    6569        """
    66         return ' ' * cchIndent + '    AssertFailed();';
     70        aoStmts = self.oVariation.aoStmtsForThreadedFunction # type: list(McStmt)
     71        return iai.McStmt.renderCodeForList(aoStmts, cchIndent);
    6772
    6873
     
    7883    """
    7984
    80     _ = oVariation;
    81     _ = sHostArch;
     85    #
     86    # Analyze the statements.
     87    #
     88    aoStmts = oVariation.aoStmtsForThreadedFunction # type: list(McStmt)
     89
     90    # The simplest case are the IEM_MC_DEFER_TO_CIMPL_*_RET_THREADED ones, just pass them thru:
     91    if (    len(aoStmts) == 1
     92        and aoStmts[0].sName.startswith('IEM_MC_DEFER_TO_CIMPL_')
     93        and aoStmts[0].sName.endswith('_RET_THREADED')
     94        and sHostArch in ('amd64',)):
     95        return NativeRecompFunctionVariation(oVariation, sHostArch);
    8296
    8397    return None;
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r101275 r101304  
    14051405
    14061406/**
     1407 * Emits a call to a CImpl function or something similar.
     1408 */
     1409static int32_t iemNativeEmitCImplCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,
     1410                                      uintptr_t pfnCImpl, uint8_t cbInstr, uint8_t cAddParams,
     1411                                      uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
     1412{
     1413#ifdef VBOX_STRICT
     1414    off = iemNativeEmitMarker(pReNative, off);
     1415    AssertReturn(off != UINT32_MAX, UINT32_MAX);
     1416#endif
     1417
     1418    /*
     1419     * Load the parameters.
     1420     */
     1421#if defined(RT_OS_WINDOWS) && defined(VBOXSTRICTRC_STRICT_ENABLED)
     1422    /* Special code the hidden VBOXSTRICTRC pointer. */
     1423    off = iemNativeEmitLoadGprFromGpr(  pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
     1424    off = iemNativeEmitLoadGprImm64(    pReNative, off, IEMNATIVE_CALL_ARG2_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */
     1425    if (cAddParams > 0)
     1426        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam0);
     1427    if (cAddParams > 1)
     1428        off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam1);
     1429    if (cAddParams > 2)
     1430        off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG1, uParam2);
     1431    off = iemNativeEmitLeaGrpByBp(pReNative, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */
     1432
     1433#else
     1434    AssertCompile(IEMNATIVE_CALL_ARG_GREG_COUNT >= 4);
     1435    off = iemNativeEmitLoadGprFromGpr(  pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU);
     1436    off = iemNativeEmitLoadGprImm64(    pReNative, off, IEMNATIVE_CALL_ARG1_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */
     1437    if (cAddParams > 0)
     1438        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, uParam0);
     1439    if (cAddParams > 1)
     1440        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam1);
     1441    if (cAddParams > 2)
     1442# if IEMNATIVE_CALL_ARG_GREG_COUNT >= 5
     1443        off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG4_GREG, uParam2);
     1444# else
     1445        off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam2);
     1446# endif
     1447#endif
     1448    AssertReturn(off != UINT32_MAX, off);
     1449
     1450    /*
     1451     * Make the call.
     1452     */
     1453#ifdef RT_ARCH_AMD64
     1454    off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, pfnCImpl);
     1455
     1456    uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
     1457    AssertReturn(pbCodeBuf, UINT32_MAX);
     1458    pbCodeBuf[off++] = 0xff;                    /* call rax */
     1459    pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX);
     1460
     1461# if defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS)
     1462    off = iemNativeEmitLoadGprByBpU32(pReNative, off, X86_GREG_xAX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict (see above) */
     1463# endif
     1464
     1465#elif defined(RT_ARCH_ARM64)
     1466    off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, pfnCImpl);
     1467
     1468    uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
     1469    AssertReturn(pu32CodeBuf, UINT32_MAX);
     1470    pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0);
     1471
     1472#else
     1473# error "Port me!"
     1474#endif
     1475
     1476    /*
     1477     * Check the status code.
     1478     */
     1479    return iemNativeEmitCheckCallRetAndPassUp(pReNative, off, idxInstr);
     1480}
     1481
     1482
     1483/**
    14071484 * Emits a call to a threaded worker function.
    14081485 */
     
    15241601
    15251602/**
    1526  * Emits a standard epilog.
     1603 * Emits the RC fiddling code for handling non-zero return code or rcPassUp.
    15271604 */
    15281605static uint32_t iemNativeEmitRcFiddling(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel)
     
    18081885
    18091886
     1887DECLINLINE(uint32_t) iemNativeEmitCImplCall1(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,
     1888                                             uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0)
     1889{
     1890    return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 1, uArg0, 0, 0);
     1891}
     1892
     1893
     1894DECLINLINE(uint32_t) iemNativeEmitCImplCall2(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,
     1895                                             uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1)
     1896{
     1897    return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 2, uArg0, uArg1, 0);
     1898}
     1899
     1900
     1901DECLINLINE(uint32_t) iemNativeEmitCImplCall3(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,
     1902                                             uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1, uint64_t uArg2)
     1903{
     1904    return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 3, uArg0, uArg1, uArg2);
     1905}
     1906
     1907
     1908/*
     1909 * MC definitions for the native recompiler.
     1910 */
     1911
     1912#define IEM_MC_DEFER_TO_CIMPL_0_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl) \
     1913    return iemNativeEmitCImplCall0(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr) /** @todo not used ... */
     1914
     1915#define IEM_MC_DEFER_TO_CIMPL_1_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0) \
     1916    return iemNativeEmitCImplCall1(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0)
     1917
     1918#define IEM_MC_DEFER_TO_CIMPL_2_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1) \
     1919    return iemNativeEmitCImplCall2(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1)
     1920
     1921#define IEM_MC_DEFER_TO_CIMPL_3_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1, a2) \
     1922    return iemNativeEmitCImplCall3(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1, a2)
     1923
     1924
     1925
    18101926/*
    18111927 * Include g_apfnIemNativeRecompileFunctions and associated functions.
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette