Changeset 101304 in vbox for trunk/src/VBox
- Timestamp:
- Sep 29, 2023 1:02:02 AM (16 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r100787 r101304 9889 9889 * 9890 9890 * @param iStReg The other stack register. 9891 * @param pfnAImpl The assembly comparison implementation.9891 * @param fUCmp true for FUCOMI[P], false for FCOMI[P]. 9892 9892 * @param uPopAndFpuOpcode Bits 15-0: The FPU opcode. 9893 9893 * Bit 31: Whether we should pop the stack when 9894 9894 * done or not. 9895 9895 */ 9896 IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, uint32_t, uPopAndFpuOpcode)9896 IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode) 9897 9897 { 9898 9898 Assert(iStReg < 8); … … 9918 9918 if ((pFpuCtx->FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2))) 9919 9919 { 9920 uint32_t u32Eflags = pfnAImpl(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80); 9920 uint32_t u32Eflags; 9921 if (!fUCmp) 9922 u32Eflags = iemAImpl_fcomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80); 9923 else 9924 u32Eflags = iemAImpl_fucomi_r80_by_r80(pFpuCtx, &u16Fsw, &pFpuCtx->aRegs[0].r80, &pFpuCtx->aRegs[iStReg].r80); 9921 9925 9922 9926 pFpuCtx->FSW &= ~X86_FSW_C1; -
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r100858 r101304 10538 10538 IEMOP_MNEMONIC(fucomi_st0_stN, "fucomi st0,stN"); 10539 10539 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS, 10540 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fucomi_r80_by_r80,10540 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), true /*fUCmp*/, 10541 10541 0 /*fPop*/ | pVCpu->iem.s.uFpuOpcode); 10542 10542 } … … 10548 10548 IEMOP_MNEMONIC(fcomi_st0_stN, "fcomi st0,stN"); 10549 10549 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS, 10550 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,10550 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/, 10551 10551 false /*fPop*/ | pVCpu->iem.s.uFpuOpcode); 10552 10552 } … … 11467 11467 IEMOP_MNEMONIC(fucomip_st0_stN, "fucomip st0,stN"); 11468 11468 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS, 11469 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,11469 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/, 11470 11470 RT_BIT_32(31) /*fPop*/ | pVCpu->iem.s.uFpuOpcode); 11471 11471 } … … 11477 11477 IEMOP_MNEMONIC(fcomip_st0_stN, "fcomip st0,stN"); 11478 11478 IEM_MC_DEFER_TO_CIMPL_3_RET(IEM_CIMPL_F_FPU | IEM_CIMPL_F_STATUS_FLAGS, 11479 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), iemAImpl_fcomi_r80_by_r80,11479 iemCImpl_fcomi_fucomi, IEM_GET_MODRM_RM_8(bRm), false /*fUCmp*/, 11480 11480 RT_BIT_32(31) /*fPop*/ | pVCpu->iem.s.uFpuOpcode); 11481 11481 } -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r101275 r101304 37 37 __version__ = "$Revision$" 38 38 39 # Standard python imports .39 # Standard python imports: 40 40 #import sys; 41 41 42 #import IEMAllInstPython as iai; 42 # Out python imports: 43 import IEMAllInstPython as iai; 43 44 44 45 … … 47 48 Class that deals with transforming a threaded function variation into a 48 49 native recompiler function. 50 51 This base class doesn't do any transforming and just renders the same 52 code as for the threaded function. 49 53 """ 50 54 … … 58 62 (for the selected host architecture). 59 63 """ 60 return False;64 return True; 61 65 62 66 def renderCode(self, cchIndent): … … 64 68 Returns the native recompiler function body for this threaded variant. 65 69 """ 66 return ' ' * cchIndent + ' AssertFailed();'; 70 aoStmts = self.oVariation.aoStmtsForThreadedFunction # type: list(McStmt) 71 return iai.McStmt.renderCodeForList(aoStmts, cchIndent); 67 72 68 73 … … 78 83 """ 79 84 80 _ = oVariation; 81 _ = sHostArch; 85 # 86 # Analyze the statements. 87 # 88 aoStmts = oVariation.aoStmtsForThreadedFunction # type: list(McStmt) 89 90 # The simplest case are the IEM_MC_DEFER_TO_CIMPL_*_RET_THREADED ones, just pass them thru: 91 if ( len(aoStmts) == 1 92 and aoStmts[0].sName.startswith('IEM_MC_DEFER_TO_CIMPL_') 93 and aoStmts[0].sName.endswith('_RET_THREADED') 94 and sHostArch in ('amd64',)): 95 return NativeRecompFunctionVariation(oVariation, sHostArch); 82 96 83 97 return None; -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r101275 r101304 1405 1405 1406 1406 /** 1407 * Emits a call to a CImpl function or something similar. 1408 */ 1409 static int32_t iemNativeEmitCImplCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 1410 uintptr_t pfnCImpl, uint8_t cbInstr, uint8_t cAddParams, 1411 uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) 1412 { 1413 #ifdef VBOX_STRICT 1414 off = iemNativeEmitMarker(pReNative, off); 1415 AssertReturn(off != UINT32_MAX, UINT32_MAX); 1416 #endif 1417 1418 /* 1419 * Load the parameters. 1420 */ 1421 #if defined(RT_OS_WINDOWS) && defined(VBOXSTRICTRC_STRICT_ENABLED) 1422 /* Special code the hidden VBOXSTRICTRC pointer. */ 1423 off = iemNativeEmitLoadGprFromGpr( pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 1424 off = iemNativeEmitLoadGprImm64( pReNative, off, IEMNATIVE_CALL_ARG2_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */ 1425 if (cAddParams > 0) 1426 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam0); 1427 if (cAddParams > 1) 1428 off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam1); 1429 if (cAddParams > 2) 1430 off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG1, uParam2); 1431 off = iemNativeEmitLeaGrpByBp(pReNative, off, X86_GREG_xCX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict */ 1432 1433 #else 1434 AssertCompile(IEMNATIVE_CALL_ARG_GREG_COUNT >= 4); 1435 off = iemNativeEmitLoadGprFromGpr( pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_REG_FIXED_PVMCPU); 1436 off = iemNativeEmitLoadGprImm64( pReNative, off, IEMNATIVE_CALL_ARG1_GREG, cbInstr); /** @todo 8-bit reg load opt for amd64 */ 1437 if (cAddParams > 0) 1438 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, uParam0); 1439 if (cAddParams > 1) 1440 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG3_GREG, uParam1); 1441 if (cAddParams > 2) 1442 # if IEMNATIVE_CALL_ARG_GREG_COUNT >= 5 1443 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_CALL_ARG4_GREG, uParam2); 1444 # else 1445 off = iemNativeEmitStoreImm64ByBp(pReNative, off, IEMNATIVE_FP_OFF_STACK_ARG0, uParam2); 1446 # endif 1447 #endif 1448 AssertReturn(off != UINT32_MAX, off); 1449 1450 /* 1451 * Make the call. 1452 */ 1453 #ifdef RT_ARCH_AMD64 1454 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, pfnCImpl); 1455 1456 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2); 1457 AssertReturn(pbCodeBuf, UINT32_MAX); 1458 pbCodeBuf[off++] = 0xff; /* call rax */ 1459 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX); 1460 1461 # if defined(VBOXSTRICTRC_STRICT_ENABLED) && defined(RT_OS_WINDOWS) 1462 off = iemNativeEmitLoadGprByBpU32(pReNative, off, X86_GREG_xAX, IEMNATIVE_FP_OFF_IN_SHADOW_ARG0); /* rcStrict (see above) */ 1463 # endif 1464 1465 #elif defined(RT_ARCH_ARM64) 1466 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, pfnCImpl); 1467 1468 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 1469 AssertReturn(pu32CodeBuf, UINT32_MAX); 1470 pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0); 1471 1472 #else 1473 # error "Port me!" 1474 #endif 1475 1476 /* 1477 * Check the status code. 1478 */ 1479 return iemNativeEmitCheckCallRetAndPassUp(pReNative, off, idxInstr); 1480 } 1481 1482 1483 /** 1407 1484 * Emits a call to a threaded worker function. 1408 1485 */ … … 1524 1601 1525 1602 /** 1526 * Emits a standard epilog.1603 * Emits the RC fiddling code for handling non-zero return code or rcPassUp. 1527 1604 */ 1528 1605 static uint32_t iemNativeEmitRcFiddling(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxReturnLabel) … … 1808 1885 1809 1886 1887 DECLINLINE(uint32_t) iemNativeEmitCImplCall1(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 1888 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0) 1889 { 1890 return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 1, uArg0, 0, 0); 1891 } 1892 1893 1894 DECLINLINE(uint32_t) iemNativeEmitCImplCall2(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 1895 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1) 1896 { 1897 return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 2, uArg0, uArg1, 0); 1898 } 1899 1900 1901 DECLINLINE(uint32_t) iemNativeEmitCImplCall3(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr, 1902 uintptr_t pfnCImpl, uint8_t cbInstr, uint64_t uArg0, uint64_t uArg1, uint64_t uArg2) 1903 { 1904 return iemNativeEmitCImplCall(pReNative, off, idxInstr, pfnCImpl, cbInstr, 3, uArg0, uArg1, uArg2); 1905 } 1906 1907 1908 /* 1909 * MC definitions for the native recompiler. 1910 */ 1911 1912 #define IEM_MC_DEFER_TO_CIMPL_0_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl) \ 1913 return iemNativeEmitCImplCall0(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr) /** @todo not used ... */ 1914 1915 #define IEM_MC_DEFER_TO_CIMPL_1_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0) \ 1916 return iemNativeEmitCImplCall1(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0) 1917 1918 #define IEM_MC_DEFER_TO_CIMPL_2_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1) \ 1919 return iemNativeEmitCImplCall2(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1) 1920 1921 #define IEM_MC_DEFER_TO_CIMPL_3_RET_THREADED(a_cbInstr, a_fFlags, a_pfnCImpl, a0, a1, a2) \ 1922 return iemNativeEmitCImplCall3(pReNative, off, pCallEntry->idxInstr, (uintptr_t)a_pfnCImpl, a_cbInstr, a0, a1, a2) 1923 1924 1925 1810 1926 /* 1811 1927 * Include g_apfnIemNativeRecompileFunctions and associated functions. -
trunk/src/VBox/VMM/include/IEMInternal.h
r101262 r101304 5092 5092 IEM_CIMPL_PROTO_1(iemCImpl_fldcw, uint16_t, u16Fcw); 5093 5093 IEM_CIMPL_PROTO_2(iemCImpl_fxch_underflow, uint8_t, iStReg, uint16_t, uFpuOpcode); 5094 IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, uint32_t, uPopAndFpuOpcode);5094 IEM_CIMPL_PROTO_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, bool, fUCmp, uint32_t, uPopAndFpuOpcode); 5095 5095 /** @} */ 5096 5096 -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r101275 r101304 114 114 /** @def IEMNATIVE_REG_FIXED_PVMCPU 115 115 * The register number hold in pVCpu pointer. */ 116 /** @def IEMNATIVE_REG_FIXED_TMP0 117 * Dedicated temporary register. 118 * @todo replace this by a register allocator and content tracker. */ 116 119 #ifdef RT_ARCH_AMD64 117 120 # define IEMNATIVE_REG_FIXED_PVMCPU X86_GREG_xBX 121 # define IEMNATIVE_REG_FIXED_TMP0 X86_GREG_x11 118 122 119 123 #elif defined(RT_ARCH_ARM64) 120 124 # define IEMNATIVE_REG_FIXED_PVMCPU ARMV8_A64_REG_X28 121 /** Dedicated temporary register.122 * @todo replace this by a register allocator and content tracker. */123 125 # define IEMNATIVE_REG_FIXED_TMP0 ARMV8_A64_REG_X15 124 126 … … 584 586 /* mov gprdst, qword [rbp + offDisp] */ 585 587 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 588 AssertReturn(pbCodeBuf, UINT32_MAX); 586 589 if (iGprDst < 8) 587 590 pbCodeBuf[off++] = X86_OP_REX_W; … … 602 605 /* mov gprdst, dword [rbp + offDisp] */ 603 606 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 607 AssertReturn(pbCodeBuf, UINT32_MAX); 604 608 if (iGprDst >= 8) 605 609 pbCodeBuf[off++] = X86_OP_REX_R; … … 618 622 /* lea gprdst, [rbp + offDisp] */ 619 623 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 624 AssertReturn(pbCodeBuf, UINT32_MAX); 620 625 if (iGprDst < 8) 621 626 pbCodeBuf[off++] = X86_OP_REX_W; … … 628 633 629 634 630 #ifdef RT_ARCH_AMD64631 635 /** 632 636 * Emits a 64-bit GPR store with an BP relative destination address. 637 * 638 * @note May trash IEMNATIVE_REG_FIXED_TMP0. 633 639 */ 634 640 DECLINLINE(uint32_t) iemNativeEmitStoreGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint8_t iGprSrc) 635 641 { 642 #ifdef RT_ARCH_AMD64 636 643 /* mov qword [rbp + offDisp], gprdst */ 637 644 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 645 AssertReturn(pbCodeBuf, UINT32_MAX); 638 646 if (iGprSrc < 8) 639 647 pbCodeBuf[off++] = X86_OP_REX_W; … … 642 650 pbCodeBuf[off++] = 0x89; 643 651 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprSrc, offDisp); 644 } 645 #endif 652 653 #elif defined(RT_ARCH_ARM64) 654 if (offDisp >= 0 && offDisp < 4096 * 8 && !((uint32_t)offDisp & 7)) 655 { 656 /* str w/ unsigned imm12 (scaled) */ 657 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 658 AssertReturn(pu32CodeBuf, UINT32_MAX); 659 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, iGprSrc, 660 ARMV8_A64_BP, (uint32_t)offDisp / 8); 661 } 662 else if (offDisp >= -256 offDisp <= 256) 663 { 664 /* stur w/ signed imm9 (unscaled) */ 665 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 666 AssertReturn(pu32CodeBuf, UINT32_MAX); 667 pu32CodeBuf[off++] = Armv8A64MkInstrSturLdur(kArmv8A64InstrLdStType_St_Dword, iGprSrc, ARMV8_A64_BP, offDisp); 668 } 669 else 670 { 671 /* Use temporary indexing register. */ 672 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, (uint32_t)offDisp); 673 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1); 674 AssertReturn(pu32CodeBuf, UINT32_MAX); 675 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Dword, iGprSrc, ARMV8_A64_BP, 676 IEMNATIVE_REG_FIXDE_TMP0, kArmv8A64InstrLdStExtend_Sxtw); 677 } 678 return off; 679 680 #else 681 # error "Port me!" 682 #endif 683 } 684 685 686 /** 687 * Emits a 64-bit immediate store with an BP relative destination address. 688 * 689 * @note May trash IEMNATIVE_REG_FIXED_TMP0. 690 */ 691 DECLINLINE(uint32_t) iemNativeEmitStoreImm64ByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint64_t uImm64) 692 { 693 #ifdef RT_ARCH_AMD64 694 if ((int64_t)uImm64 == (int32_t)uImm64) 695 { 696 /* mov qword [rbp + offDisp], imm32 - sign extended */ 697 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 11); 698 AssertReturn(pbCodeBuf, UINT32_MAX); 699 700 pbCodeBuf[off++] = X86_OP_REX_W; 701 pbCodeBuf[off++] = 0xc7; 702 if (offDisp < 128 && offDisp >= -128) 703 { 704 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, 0, X86_GREG_xBP); 705 pbCodeBuf[off++] = (uint8_t)offDisp; 706 } 707 else 708 { 709 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, 0, X86_GREG_xBP); 710 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp); 711 pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp); 712 pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp); 713 pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp); 714 } 715 pbCodeBuf[off++] = RT_BYTE1(uImm64); 716 pbCodeBuf[off++] = RT_BYTE2(uImm64); 717 pbCodeBuf[off++] = RT_BYTE3(uImm64); 718 pbCodeBuf[off++] = RT_BYTE4(uImm64); 719 return off; 720 } 721 #endif 722 723 /* Load tmp0, imm64; Store tmp to bp+disp. */ 724 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, uImm64); 725 return iemNativeEmitStoreGprByBp(pReNative, off, offDisp, IEMNATIVE_REG_FIXED_TMP0); 726 } 646 727 647 728 … … 654 735 /* sub gprdst, imm8/imm32 */ 655 736 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7); 737 AssertReturn(pbCodeBuf, UINT32_MAX); 656 738 if (iGprDst < 7) 657 739 pbCodeBuf[off++] = X86_OP_REX_W;
Note:
See TracChangeset
for help on using the changeset viewer.