Changeset 103677 in vbox
- Timestamp:
- Mar 5, 2024 9:06:08 AM (13 months ago)
- svn:sync-xref-src-repo-rev:
- 162039
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r103675 r103677 1487 1487 FNIEMOP_DEF(iemOp_sub_Gb_Eb) 1488 1488 { 1489 /** @todo sub reg,samreg */ 1489 1490 IEMOP_MNEMONIC2(RM, SUB, sub, Gb, Eb, DISOPTYPE_HARMLESS, IEMOPHINT_IGNORES_OP_SIZES); 1490 1491 IEMOP_BODY_BINARY_r8_rm(iemAImpl_sub_u8, sub, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64); … … 1776 1777 IEMOP_MNEMONIC(cmp_Eb_Gb, "cmp Eb,Gb"); 1777 1778 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1778 IEMOP_BODY_BINARY_rm_r8_RO(bRm, iemAImpl_cmp_u8, cmp, 0);1779 IEMOP_BODY_BINARY_rm_r8_RO(bRm, iemAImpl_cmp_u8, cmp, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64); 1779 1780 } 1780 1781 … … 1788 1789 IEMOP_MNEMONIC(cmp_Ev_Gv, "cmp Ev,Gv"); 1789 1790 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1790 IEMOP_BODY_BINARY_rm_rv_RO(bRm, iemAImpl_cmp_u16, iemAImpl_cmp_u32, iemAImpl_cmp_u64, cmp, 0);1791 IEMOP_BODY_BINARY_rm_rv_RO(bRm, iemAImpl_cmp_u16, iemAImpl_cmp_u32, iemAImpl_cmp_u64, cmp, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64); 1791 1792 } 1792 1793 … … 1799 1800 { 1800 1801 IEMOP_MNEMONIC(cmp_Gb_Eb, "cmp Gb,Eb"); 1801 IEMOP_BODY_BINARY_r8_rm(iemAImpl_cmp_u8, cmp, 0);1802 IEMOP_BODY_BINARY_r8_rm(iemAImpl_cmp_u8, cmp, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64); 1802 1803 } 1803 1804 … … 1811 1812 IEMOP_MNEMONIC(cmp_Gv_Ev, "cmp Gv,Ev"); 1812 1813 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1813 IEMOP_BODY_BINARY_rv_rm(bRm, iemAImpl_cmp_u16, iemAImpl_cmp_u32, iemAImpl_cmp_u64, 0, 0, cmp, 0);1814 IEMOP_BODY_BINARY_rv_rm(bRm, iemAImpl_cmp_u16, iemAImpl_cmp_u32, iemAImpl_cmp_u64, 0, 0, cmp, RT_ARCH_VAL_AMD64 | RT_ARCH_VAL_ARM64); 1814 1815 } 1815 1816 -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veEmit-x86.h
r103675 r103677 565 565 OF, CF, ZF and SF. */ 566 566 567 /* Copy idxRegDst. */ 568 uint8_t const idxRegDstIn = iemNativeRegAllocTmp(pReNative, &off); 569 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 570 571 /* Do the SUB setting flags. */ 567 uint8_t const idxRegDstIn = iemNativeRegAllocTmp(pReNative, &off); 568 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4); 572 569 if (cOpBits >= 32) 573 570 { … … 584 581 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegDstIn, idxRegDstIn, cShift, false /*f64Bit*/); 585 582 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegDst, idxRegDst, cShift, false /*f64Bit*/); 586 }587 583 cOpBits = 32; 584 } 588 585 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 589 586 590 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, RT_MAX(cOpBits, 32), idxRegDst,587 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, cOpBits, idxRegDst, 591 588 idxRegDstIn, idxRegSrc, true /*fNativeFlags*/); 592 589 … … 596 593 597 594 #elif defined(RT_ARCH_AMD64) 598 /* On AMD64 we must use the correctly sized AND instructions to get the 599 right EFLAGS.SF value, while the rest will just lump 16-bit and 8-bit 600 in the 32-bit ones. */ 595 /* On AMD64 we just use the correctly sized SUB instruction to get the right EFLAGS.SF value. */ 601 596 off = iemNativeEmitAmd64ModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off, 602 597 0x2a, 0x2b, cOpBits, idxRegDst, idxRegSrc); … … 619 614 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits) 620 615 { 616 /* 617 * The SUB instruction will set all flags. 618 */ 619 uint8_t const idxRegDst = iemNativeVarRegisterAcquire(pReNative, idxVarDst, &off, true /*fInitialized*/); 620 uint8_t const idxRegSrc = iemNativeVarRegisterAcquire(pReNative, idxVarSrc, &off, true /*fInitialized*/); 621 622 #ifdef RT_ARCH_ARM64 623 /* On ARM64 we'll need the actual result as well as both input operands in order 624 to calculate the right flags, even if we use SUBS and translates NZCV into 625 OF, CF, ZF and SF. */ 626 627 uint8_t const idxRegResult = iemNativeRegAllocTmp(pReNative, &off); 628 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 629 if (cOpBits >= 32) 630 pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegResult, idxRegDst, idxRegSrc, cOpBits > 32 /*f64Bit*/, true /*fSetFlags*/); 631 else 632 { 633 /* Shift the operands up so we can perform a 32-bit operation and get all four flags. */ 634 uint32_t const cShift = 32 - cOpBits; 635 pCodeBuf[off++] = Armv8A64MkInstrOrr(idxRegResult, ARMV8_A64_REG_XZR, idxRegDst, false /*f64Bit*/, cShift); 636 pCodeBuf[off++] = Armv8A64MkInstrSubReg(idxRegResult, idxRegResult, idxRegSrc, cOpBits > 32 /*f64Bit*/, 637 true /*fSetFlags*/, cShift); 638 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(idxRegResult, idxRegResult, cShift, false /*f64Bit*/); 639 cOpBits = 32; 640 } 641 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 642 643 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl, cOpBits, idxRegResult, 644 idxRegDst, idxRegSrc, true /*fNativeFlags*/); 645 646 iemNativeRegFreeTmp(pReNative, idxRegResult); 647 iemNativeVarRegisterRelease(pReNative, idxVarSrc); 648 iemNativeVarRegisterRelease(pReNative, idxVarDst); 649 650 #elif defined(RT_ARCH_AMD64) 651 /* On AMD64 we just use the correctly sized CMP instruction to get the right EFLAGS.SF value. */ 652 off = iemNativeEmitAmd64ModRmInstrRREx(iemNativeInstrBufEnsure(pReNative, off, 4), off, 653 0x3a, 0x3b, cOpBits, idxRegDst, idxRegSrc); 654 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 655 656 iemNativeVarRegisterRelease(pReNative, idxVarSrc); 657 iemNativeVarRegisterRelease(pReNative, idxVarDst); 658 659 off = iemNativeEmitEFlagsForArithmetic(pReNative, off, idxVarEfl); 660 661 #else 662 # error "port me" 663 #endif 664 return off; 665 } 666 667 668 DECL_INLINE_THROW(uint32_t) 669 iemNativeEmit_sbb_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 670 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits) 671 { 621 672 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits); 622 673 AssertFailed(); … … 626 677 627 678 DECL_INLINE_THROW(uint32_t) 628 iemNativeEmit_ sbb_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,629 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)679 iemNativeEmit_imul_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 680 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits) 630 681 { 631 682 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits); … … 636 687 637 688 DECL_INLINE_THROW(uint32_t) 638 iemNativeEmit_ imul_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,639 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)689 iemNativeEmit_popcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 690 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits) 640 691 { 641 692 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits); … … 646 697 647 698 DECL_INLINE_THROW(uint32_t) 648 iemNativeEmit_ popcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,649 699 iemNativeEmit_tzcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 700 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits) 650 701 { 651 702 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits); … … 656 707 657 708 DECL_INLINE_THROW(uint32_t) 658 iemNativeEmit_ tzcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,709 iemNativeEmit_lzcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off, 659 710 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits) 660 711 { … … 665 716 666 717 667 DECL_INLINE_THROW(uint32_t)668 iemNativeEmit_lzcnt_r_r_efl(PIEMRECOMPILERSTATE pReNative, uint32_t off,669 uint8_t idxVarDst, uint8_t idxVarSrc, uint8_t idxVarEfl, uint8_t cOpBits)670 {671 RT_NOREF(idxVarDst, idxVarSrc, idxVarEfl, cOpBits);672 AssertFailed();673 return iemNativeEmitBrk(pReNative, off, 0x666);674 }675 676 677 718 #endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllN8veEmit_x86_h */
Note:
See TracChangeset
for help on using the changeset viewer.