Changeset 104240 in vbox
- Timestamp:
- Apr 8, 2024 10:48:08 PM (8 months ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl-arm64.S
r104239 r104240 741 741 * Calculate EFLAGS - only CF and OF. 742 742 */ 743 bfi w0, w9, #0, #1 /* CF = last bit rotated around */743 bfi w0, w9, #0, #1 /* CF = last bit rotated around (new bottom bit) */ 744 744 745 745 .ifne \a_fIntelFlags … … 815 815 ROL_64 iemAImpl_rol_u64_amd, 0 816 816 817 818 /* 819 * Rotate Right. 820 */ 821 822 /* uint32_t iemAImpl_ror_u8( uint32_t fEFlagsIn, uint8_t *pu8Dst, uint8_t cShift); */ 823 /* uint32_t iemAImpl_ror_u16(uint32_t fEFlagsIn, uint16_t *pu16Dst, uint8_t cShift); */ 824 /* uint32_t iemAImpl_ror_u32(uint32_t fEFlagsIn, uint16_t *pu32Dst, uint8_t cShift); */ 825 .macro ROR_8_16_32, a_Name, a_cBits, a_fIntelFlags, a_LdStSuff 826 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 827 BEGINPROC_HIDDEN \a_Name 828 .cfi_startproc 829 830 /* Do we need to rotate anything at all? */ 831 and w2, w2, #0x1f 832 cbz w2, 99f 833 834 /* 835 * Do the shifting 836 */ 837 .ifne \a_cBits < 32 838 and w2, w2, #(\a_cBits - 1) 839 neg w3, w2 840 and w3, w3, #(\a_cBits - 1) 841 ldr\a_LdStSuff w8, [x1] 842 lsrv w9, w8, w2 843 lslv w10, w8, w3 844 orr w9, w9, w10 845 str\a_LdStSuff w9, [x1] 846 and w9, w9, #(RT_BIT_32(\a_cBits) - 1) 847 .else 848 ldr\a_LdStSuff w8, [x1] 849 rorv w9, w8, w2 850 str\a_LdStSuff w9, [x1] 851 .endif 852 853 /* 854 * Calculate EFLAGS - only CF and OF. 855 */ 856 bfxil w0, w9, #(\a_cBits - 1), #1 /* CF = last bit rotated around (new top bit) */ 857 858 .ifne \a_fIntelFlags 859 /* Intel: OF = first rotate step: X86_EFL_GET_OF_ ## a_cBitsWidth(uDst ^ (uDst << (a_cBitsWidth - 1))); */ 860 eor w11, w8, w8, LSR #(\a_cBits - 1) 861 bfi w0, w11, #X86_EFL_OF_BIT, #1 862 .else 863 /* AMD: OF = last rotate step: fEFlags |= (((uResult >> ((a_cBitsWidth) - 2)) ^ fCarry) & 1) << X86_EFL_OF_BIT; */ 864 eor w11, w0, w9, LSR #(\a_cBits - 2) 865 bfi w0, w11, #X86_EFL_OF_BIT, #1 866 .endif 867 868 99: 869 ret 870 .cfi_endproc 871 .endm 872 873 ROR_8_16_32 iemAImpl_ror_u8, 8, 1, b 874 ROR_8_16_32 iemAImpl_ror_u8_intel, 8, 1, b 875 ROR_8_16_32 iemAImpl_ror_u8_amd, 8, 0, b 876 877 ROR_8_16_32 iemAImpl_ror_u16, 16, 1, h 878 ROR_8_16_32 iemAImpl_ror_u16_intel, 16, 1, h 879 ROR_8_16_32 iemAImpl_ror_u16_amd, 16, 0, h 880 881 ROR_8_16_32 iemAImpl_ror_u32, 32, 1, 882 ROR_8_16_32 iemAImpl_ror_u32_intel, 32, 1, 883 ROR_8_16_32 iemAImpl_ror_u32_amd, 32, 0, 884 885 /** @todo this is slightly slower than the C version (release) on an M2. Investigate why. */ 886 /* uint32_t iemAImpl_ror_u64(uint32_t fEFlagsIn, uint16_t *pu64Dst, uint8_t cShift); */ 887 .macro ROR_64, a_Name, a_fIntelFlags 888 ALIGNCODE(IEM_AIMPL_FUNCTION_ALIGNMENT) 889 BEGINPROC_HIDDEN \a_Name 890 .cfi_startproc 891 892 /* Do we need to shift anything at all? */ 893 and w2, w2, #0x3f 894 cbz w2, 99f 895 896 /* 897 * Do the shifting 898 */ 899 ldr x8, [x1] 900 rorv x9, x8, x2 901 str x9, [x1] 902 903 /* 904 * Calculate EFLAGS - only CF and OF. 905 */ 906 bfxil x0, x9, #(64 - 1), #1 /* CF = last bit rotated around (new top bit) */ 907 908 .ifne \a_fIntelFlags 909 /* Intel: OF = first rotate step: X86_EFL_GET_OF_ ## a_cBitsWidth(uDst ^ (uDst << (a_cBitsWidth - 1))); */ 910 eor x11, x8, x8, LSR #(64 - 1) 911 bfi w0, w11, #X86_EFL_OF_BIT, #1 912 .else 913 /* AMD: OF = last rotate step: fEFlags |= (((uResult >> ((a_cBitsWidth) - 2)) ^ fCarry) & 1) << X86_EFL_OF_BIT; */ 914 eor x11, x0, x9, LSR #(64 - 2) 915 bfi w0, w11, #X86_EFL_OF_BIT, #1 916 .endif 917 918 99: 919 ret 920 .cfi_endproc 921 .endm 922 923 ROR_64 iemAImpl_ror_u64, 1 924 ROR_64 iemAImpl_ror_u64_intel, 1 925 ROR_64 iemAImpl_ror_u64_amd, 0 926 -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r104239 r104240 3180 3180 } 3181 3181 3182 #if !defined(RT_ARCH_AMD64) || defined(IEM_WITHOUT_ASSEMBLY) 3182 #ifndef RT_ARCH_ARM64 3183 3184 # if !defined(RT_ARCH_AMD64) || defined(IEM_WITHOUT_ASSEMBLY) 3183 3185 EMIT_ROR(64, uint64_t, RT_NOTHING, 1, ASMRotateRightU64) 3184 # endif3186 # endif 3185 3187 EMIT_ROR(64, uint64_t, _intel, 1, ASMRotateRightU64) 3186 3188 EMIT_ROR(64, uint64_t, _amd, 0, ASMRotateRightU64) 3187 3189 3188 # if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY)3190 # if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY) 3189 3191 EMIT_ROR(32, uint32_t, RT_NOTHING, 1, ASMRotateRightU32) 3190 # endif3192 # endif 3191 3193 EMIT_ROR(32, uint32_t, _intel, 1, ASMRotateRightU32) 3192 3194 EMIT_ROR(32, uint32_t, _amd, 0, ASMRotateRightU32) … … 3196 3198 return (uValue >> cShift) | (uValue << (16 - cShift)); 3197 3199 } 3198 # if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY)3200 # if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY) 3199 3201 EMIT_ROR(16, uint16_t, RT_NOTHING, 1, iemAImpl_ror_u16_hlp) 3200 # endif3202 # endif 3201 3203 EMIT_ROR(16, uint16_t, _intel, 1, iemAImpl_ror_u16_hlp) 3202 3204 EMIT_ROR(16, uint16_t, _amd, 0, iemAImpl_ror_u16_hlp) … … 3206 3208 return (uValue >> cShift) | (uValue << (8 - cShift)); 3207 3209 } 3208 # if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY)3210 # if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY) 3209 3211 EMIT_ROR(8, uint8_t, RT_NOTHING, 1, iemAImpl_ror_u8_hlp) 3210 # endif3212 # endif 3211 3213 EMIT_ROR(8, uint8_t, _intel, 1, iemAImpl_ror_u8_hlp) 3212 3214 EMIT_ROR(8, uint8_t, _amd, 0, iemAImpl_ror_u8_hlp) 3213 3215 3216 #endif /* !RT_ARCH_ARM64 */ 3214 3217 3215 3218 /*
Note:
See TracChangeset
for help on using the changeset viewer.