Changeset 101304 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Sep 29, 2023 1:02:02 AM (15 months ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r100787 r101304 9889 9889 * 9890 9890 * @param iStReg The other stack register. 9891 * @param pfnAImpl The assembly comparison implementation.9891 * @param fUCmp true for FUCOMI[P], false for FCOMI[P]. 9892 9892 * @param uPopAndFpuOpcode Bits 15-0: The FPU opcode. 9893 9893 * Bit 31: Whether we should pop the stack when 9894 9894 * done or not. 9895 9895 */ 9896 IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, uint32_t, uPopAndFpuOpcode)9896 IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode) 9897 9897 { 9898 9898 Assert(iStReg < 8); … … 9918 9918 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2))) 9919 9919 { 9920 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80); 9920 uint32_t u32Eflags; 9921 if (!fUCmp) 9922 u32Eflags = iemAImpl_fcomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80); 9923 else 9924 u32Eflags = iemAImpl_fucomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80); 9921 9925 9922 9926 pFpuCtx->FSW &= ~X86_FSW_C1; -
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r100858 r101304 10538 10538 IEMOP_MNEMONIC(fucomi_st0_stN, "fucomi st0,stN"); 10539 10539 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS, 10540 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fucomi_r80_by_r80,10540 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), true /*fUCmp*/, 10541 10541 0 /*fPop*/ | pVCpu->iem.s.uFpuOpcode); 10542 10542 } … … 10548 10548 IEMOP_MNEMONIC(fcomi_st0_stN, "fcomi st0,stN"); 10549 10549 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS, 10550 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,10550 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/, 10551 10551 false /*fPop*/ | pVCpu->iem.s.uFpuOpcode); 10552 10552 } … … 11467 11467 IEMOP_MNEMONIC(fucomip_st0_stN, "fucomip st0,stN"); 11468 11468 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS, 11469 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,11469 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/, 11470 11470 RT_BIT_32(31) /*fPop*/ | pVCpu->iem.s.uFpuOpcode); 11471 11471 } … … 11477 11477 IEMOP_MNEMONIC(fcomip_st0_stN, "fcomip st0,stN"); 11478 11478 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS, 11479 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,11479 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/, 11480 11480 RT_BIT_32(31) /*fPop*/ | pVCpu->iem.s.uFpuOpcode); 11481 11481 } -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r101275 r101304 37 37 __version__ = "$Revision$" 38 38 39 # Standard python imports .39 # Standard python imports: 40 40 #import sys; 41 41 42 #import IEMAllInstPython as iai; 42 # Out python imports: 43 import IEMAllInstPython as iai; 43 44 44 45 … … 47 48 Class that deals with transforming a threaded function variation into a 48 49 native recompiler function. 50 51 This base class doesn't do any transforming and just renders the same 52 code as for the threaded function. 49 53 """ 50 54 … … 58 62 (for the selected host architecture). 59 63 """ 60 return False;64 return True; 61 65 62 66 def renderCode(self, cchIndent): … … 64 68 Returns the native recompiler function body for this threaded variant. 65 69 """ 66 return ' ' * cchIndent + ' AssertFailed();'; 70 aoStmts = self.oVariation.aoStmtsForThreadedFunction # type: list(McStmt) 71 return iai.McStmt.renderCodeForList(aoStmts, cchIndent); 67 72 68 73 … … 78 83 """ 79 84 80 _ = oVariation; 81 _ = sHostArch; 85 # 86 # Analyze the statements. 87 # 88 aoStmts = oVariation.aoStmtsForThreadedFunction # type: list(McStmt) 89 90 # The simplest case are the IEM_MC_DEFER_TO_CIMPL_*_RET_THREADED ones, just pass them thru: 91 if ( len(aoStmts) == 1 92 and aoStmts[0].sName.startswith('IEM_MC_DEFER_TO_CIMPL_') 93 and aoStmts[0].sName.endswith('_RET_THREADED') 94 and sHostArch in ('amd64',)): 95 return NativeRecompFunctionVariation(oVariation, sHostArch); 82 96 83 97 return None; -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r101275 r101304 1405 1405 1406 1406 /** 1407 * Emits a call to a CImpl function or something similar. 1408 */ 1409 static int32_t iemNativeEmitCImplCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 1410 uintptr_t pfnCImpl, uint8_t cbInstr, uint8_t cAddParams, 1411 uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) 1412 { 1413 #ifdef VBOX_STRICT 1414 off = iemNativeEmitMarker(pReNative, off); 1415 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1416 #endif 1417 1418 /* 1419 * Load the parameters. 1420 */ 1421 #if defined(RT_OS_WINDOWS) && defined(VBOXSTRICTRC_STRICT_ENABLED) 1422 /* Special code the hidden VBOXSTRICTRC pointer. */ 1423 off = iemNativeEmitLoadGprFromGpr( pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 1424 off = iemNativeEmitLoadGprImm64( pReNative, off, IEMNATIVE_CALL_ARG2_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */ 1425 if (cAddParams > 0) 1426 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam0); 1427 if (cAddParams > 1) 1428 off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam1); 1429 if (cAddParams > 2) 1430 off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG1, uParam2); 1431 off = iemNativeEmitLeaGrpByBp(pReNative, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */ 1432 1433 #else 1434 AssertCompile(IEMNATIVE_CALL_ARG_GREG_COUNT >= 4); 1435 off = iemNativeEmitLoadGprFromGpr( pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 1436 off = iemNativeEmitLoadGprImm64( pReNative, off, IEMNATIVE_CALL_ARG1_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */ 1437 if (cAddParams > 0) 1438 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, uParam0); 1439 if (cAddParams > 1) 1440 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam1); 1441 if (cAddParams > 2) 1442 # if IEMNATIVE_CALL_ARG_GREG_COUNT >= 5 1443 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG4_GREG, uParam2); 1444 # else 1445 off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam2); 1446 # endif 1447 #endif 1448 AssertReturn(off != UINT32_MAX, off); 1449 1450 /* 1451 * Make the call. 1452 */ 1453 #ifdef RT_ARCH_AMD64 1454 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, pfnCImpl); 1455 1456 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 1457 AssertReturn(pbCodeBuf, UINT32_MAX); 1458 pbCodeBuf[off++] = 0xff; /* call rax */ 1459 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX); 1460 1461 # if defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS) 1462 off = iemNativeEmitLoadGprByBpU32(pReNative, off, X86_GREG_xAX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict (see above) */ 1463 # endif 1464 1465 #elif defined(RT_ARCH_ARM64) 1466 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, pfnCImpl); 1467 1468 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1469 AssertReturn(pu32CodeBuf, UINT32_MAX); 1470 pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0); 1471 1472 #else 1473 # error "Port me!" 1474 #endif 1475 1476 /* 1477 * Check the status code. 1478 */ 1479 return iemNativeEmitCheckCallRetAndPassUp(pReNative, off, idxInstr); 1480 } 1481 1482 1483 /** 1407 1484 * Emits a call to a threaded worker function. 1408 1485 */ … … 1524 1601 1525 1602 /** 1526 * Emits a standard epilog.1603 * Emits the RC fiddling code for handling non-zero return code or rcPassUp. 1527 1604 */ 1528 1605 static uint32_t iemNativeEmitRcFiddling(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel) … … 1808 1885 1809 1886 1887 DECLINLINE(uint32_t) iemNativeEmitCImplCall1(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 1888 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0) 1889 { 1890 return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 1, uArg0, 0, 0); 1891 } 1892 1893 1894 DECLINLINE(uint32_t) iemNativeEmitCImplCall2(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 1895 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1) 1896 { 1897 return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 2, uArg0, uArg1, 0); 1898 } 1899 1900 1901 DECLINLINE(uint32_t) iemNativeEmitCImplCall3(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 1902 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1, uint64_t uArg2) 1903 { 1904 return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 3, uArg0, uArg1, uArg2); 1905 } 1906 1907 1908 /* 1909 * MC definitions for the native recompiler. 1910 */ 1911 1912 #define IEM_MC_DEFER_TO_CIMPL_0_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl) \ 1913 return iemNativeEmitCImplCall0(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr) /** @todo not used ... */ 1914 1915 #define IEM_MC_DEFER_TO_CIMPL_1_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0) \ 1916 return iemNativeEmitCImplCall1(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0) 1917 1918 #define IEM_MC_DEFER_TO_CIMPL_2_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1) \ 1919 return iemNativeEmitCImplCall2(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1) 1920 1921 #define IEM_MC_DEFER_TO_CIMPL_3_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1, a2) \ 1922 return iemNativeEmitCImplCall3(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1, a2) 1923 1924 1925 1810 1926 /* 1811 1927 * Include g_apfnIemNativeRecompileFunctions and associated functions.
Note:
See TracChangeset
for help on using the changeset viewer.