Changeset 95308 in vbox
- Timestamp:
- Jun 19, 2022 8:40:26 PM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 151898
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r94440 r95308 531 531 %endmacro 532 532 533 ; instr,lock, modified-flags.533 ; instr,lock, modified-flags, undefined flags 534 534 IEMIMPL_BIN_OP add, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 535 535 IEMIMPL_BIN_OP adc, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 536 536 IEMIMPL_BIN_OP sub, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 537 537 IEMIMPL_BIN_OP sbb, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 538 IEMIMPL_BIN_OP or, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF539 IEMIMPL_BIN_OP xor, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF540 IEMIMPL_BIN_OP and, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF538 IEMIMPL_BIN_OP or, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF 539 IEMIMPL_BIN_OP xor, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF 540 IEMIMPL_BIN_OP and, 1, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF 541 541 IEMIMPL_BIN_OP cmp, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 0 542 IEMIMPL_BIN_OP test, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF 542 IEMIMPL_BIN_OP test, 0, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), X86_EFL_AF 543 544 545 ;; 546 ; Macro for implementing a binary operator, VEX variant with separate input/output. 547 ; 548 ; This will generate code for the 32 and 64 bit accesses, except on 32-bit system 549 ; where the 64-bit accesses requires hand coding. 550 ; 551 ; All the functions takes a pointer to the destination memory operand in A0, 552 ; the first source register operand in A1, the second source register operand 553 ; in A2 and a pointer to eflags in A3. 554 ; 555 ; @param 1 The instruction mnemonic. 556 ; @param 2 The modified flags. 557 ; @param 3 The undefined flags. 558 ; 559 %macro IEMIMPL_VEX_BIN_OP 3 560 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 16 561 PROLOGUE_4_ARGS 562 IEM_MAYBE_LOAD_FLAGS A3, %2, %3 563 %1 T0_32, A1_32, A2_32 564 mov [A0], T0_32 565 IEM_SAVE_FLAGS A3, %2, %3 566 EPILOGUE_4_ARGS 567 ENDPROC iemAImpl_ %+ %1 %+ _u32 568 569 %ifdef RT_ARCH_AMD64 570 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 16 571 PROLOGUE_4_ARGS 572 IEM_MAYBE_LOAD_FLAGS A3, %2, %3 573 %1 T0, A1, A2 574 mov [A0], T0 575 IEM_SAVE_FLAGS A3, %2, %3 576 EPILOGUE_4_ARGS 577 ENDPROC iemAImpl_ %+ %1 %+ _u64 578 %endif ; RT_ARCH_AMD64 579 %endmacro 580 581 ; instr, modified-flags, undefined-flags 582 IEMIMPL_VEX_BIN_OP andn, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_CF), (X86_EFL_AF | X86_EFL_PF) 583 IEMIMPL_VEX_BIN_OP bextr, (X86_EFL_OF | X86_EFL_ZF | X86_EFL_CF), (X86_EFL_SF | X86_EFL_AF | X86_EFL_PF) 584 585 586 ;; 587 ; Macro for implementing a binary operator w/o flags, VEX variant with separate input/output. 588 ; 589 ; This will generate code for the 32 and 64 bit accesses, except on 32-bit system 590 ; where the 64-bit accesses requires hand coding. 591 ; 592 ; All the functions takes a pointer to the destination memory operand in A0, 593 ; the first source register operand in A1, the second source register operand 594 ; in A2 and a pointer to eflags in A3. 595 ; 596 ; @param 1 The instruction mnemonic. 597 ; 598 %macro IEMIMPL_VEX_BIN_OP_NOEFL 2 599 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32, 12 600 PROLOGUE_3_ARGS 601 %1 T0_32, A1_32, A2_32 602 mov [A0], T0_32 603 EPILOGUE_3_ARGS 604 ENDPROC iemAImpl_ %+ %1 %+ _u32 605 606 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u32_fallback, 12 607 PROLOGUE_3_ARGS 608 %ifdef ASM_CALL64_GCC 609 mov cl, A2_8 610 %2 A1_32, cl 611 mov [A0], A1_32 612 %else 613 xchg A2, A0 614 %2 A1_32, cl 615 mov [A2], A1_32 616 %endif 617 EPILOGUE_3_ARGS 618 ENDPROC iemAImpl_ %+ %1 %+ _u32_fallback 619 620 %ifdef RT_ARCH_AMD64 621 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64, 12 622 PROLOGUE_3_ARGS 623 %1 T0, A1, A2 624 mov [A0], T0 625 EPILOGUE_3_ARGS 626 ENDPROC iemAImpl_ %+ %1 %+ _u64 627 628 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u64_fallback, 12 629 PROLOGUE_3_ARGS 630 %ifdef ASM_CALL64_GCC 631 mov cl, A2_8 632 %2 A1, cl 633 mov [A0], A1_32 634 %else 635 xchg A2, A0 636 %2 A1, cl 637 mov [A2], A1_32 638 %endif 639 mov [A0], A1 640 EPILOGUE_3_ARGS 641 ENDPROC iemAImpl_ %+ %1 %+ _u64_fallback 642 %endif ; RT_ARCH_AMD64 643 %endmacro 644 645 ; instr, fallback instr 646 IEMIMPL_VEX_BIN_OP_NOEFL sarx, sar 647 IEMIMPL_VEX_BIN_OP_NOEFL shlx, shl 648 IEMIMPL_VEX_BIN_OP_NOEFL shrx, shr 649 650 651 ; 652 ; RORX uses a immediate byte for the shift count, so we only do 653 ; fallback implementation of that one. 654 ; 655 BEGINPROC_FASTCALL iemAImpl_rorx_u32, 12 656 PROLOGUE_3_ARGS 657 %ifdef ASM_CALL64_GCC 658 mov cl, A2_8 659 ror A1_32, cl 660 mov [A0], A1_32 661 %else 662 xchg A2, A0 663 ror A1_32, cl 664 mov [A2], A1_32 665 %endif 666 EPILOGUE_3_ARGS 667 ENDPROC iemAImpl_rorx_u32 668 669 %ifdef RT_ARCH_AMD64 670 BEGINPROC_FASTCALL iemAImpl_rorx_u64, 12 671 PROLOGUE_3_ARGS 672 %ifdef ASM_CALL64_GCC 673 mov cl, A2_8 674 ror A1, cl 675 mov [A0], A1_32 676 %else 677 xchg A2, A0 678 ror A1, cl 679 mov [A2], A1_32 680 %endif 681 mov [A0], A1 682 EPILOGUE_3_ARGS 683 ENDPROC iemAImpl_rorx_u64 684 %endif ; RT_ARCH_AMD64 543 685 544 686 … … 638 780 ; @param 2 The modified flags. 639 781 ; @param 3 The undefined flags. 640 ; 641 %macro IEMIMPL_BIT_OP 3 782 ; @param 4 Non-zero if destination isn't written when ZF=1. Zero if always written. 783 ; 784 %macro IEMIMPL_BIT_OP2 4 642 785 BEGINCODE 643 786 BEGINPROC_FASTCALL iemAImpl_ %+ %1 %+ _u16, 12 … … 645 788 IEM_MAYBE_LOAD_FLAGS A2, %2, %3 646 789 %1 T0_16, A1_16 790 %if %4 != 0 647 791 jz .unchanged_dst 792 %endif 648 793 mov [A0], T0_16 649 794 .unchanged_dst: … … 655 800 PROLOGUE_3_ARGS 656 801 %1 T1_16, A1_16 802 %if %4 != 0 657 803 jz .unchanged_dst 804 %endif 658 805 mov [A0], T1_16 659 806 IEM_ADJUST_FLAGS_WITH_PARITY A2, X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_CF | X86_EFL_ZF, 0, T1 … … 667 814 PROLOGUE_3_ARGS 668 815 %1 T0_16, A1_16 816 %if %4 != 0 669 817 jz .unchanged_dst 818 %endif 670 819 mov [A0], T0_16 671 820 .unchanged_dst: … … 679 828 IEM_MAYBE_LOAD_FLAGS A2, %2, %3 680 829 %1 T0_32, A1_32 830 %if %4 != 0 681 831 jz .unchanged_dst 832 %endif 682 833 mov [A0], T0_32 683 834 .unchanged_dst: … … 689 840 PROLOGUE_3_ARGS 690 841 %1 T1_32, A1_32 842 %if %4 != 0 691 843 jz .unchanged_dst 844 %endif 692 845 mov [A0], T1_32 693 846 IEM_ADJUST_FLAGS_WITH_PARITY A2, X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_CF | X86_EFL_ZF, 0, T1 … … 701 854 PROLOGUE_3_ARGS 702 855 %1 T0_32, A1_32 856 %if %4 != 0 703 857 jz .unchanged_dst 858 %endif 704 859 mov [A0], T0_32 705 860 .unchanged_dst: … … 715 870 IEM_MAYBE_LOAD_FLAGS A2, %2, %3 716 871 %1 T0, A1 872 %if %4 != 0 717 873 jz .unchanged_dst 874 %endif 718 875 mov [A0], T0 719 876 .unchanged_dst: … … 726 883 IEM_MAYBE_LOAD_FLAGS A2, %2, %3 727 884 %1 T1, A1 885 %if %4 != 0 728 886 jz .unchanged_dst 887 %endif 729 888 mov [A0], T1 730 889 IEM_ADJUST_FLAGS_WITH_PARITY A2, X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_CF | X86_EFL_ZF, 0, T1 … … 738 897 PROLOGUE_3_ARGS 739 898 %1 T0, A1 899 %if %4 != 0 740 900 jz .unchanged_dst 901 %endif 741 902 mov [A0], T0 742 903 .unchanged_dst: … … 748 909 %endmacro 749 910 750 IEMIMPL_BIT_OP bsf, (X86_EFL_ZF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF) 751 IEMIMPL_BIT_OP bsr, (X86_EFL_ZF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF) 911 IEMIMPL_BIT_OP2 bsf, (X86_EFL_ZF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 1 912 IEMIMPL_BIT_OP2 bsr, (X86_EFL_ZF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF), 1 913 IEMIMPL_BIT_OP2 tzcnt, (X86_EFL_ZF | X86_EFL_CF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF), 0 914 IEMIMPL_BIT_OP2 lzcnt, (X86_EFL_ZF | X86_EFL_CF), (X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF), 0 752 915 753 916 -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r94698 r95308 912 912 IEM_DECL_IMPL_DEF(void, iemAImpl_and_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) 913 913 { 914 uint64_t uResult = *puDst & uSrc;914 uint64_t const uResult = *puDst & uSrc; 915 915 *puDst = uResult; 916 916 IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uResult, 64, 0); … … 921 921 IEM_DECL_IMPL_DEF(void, iemAImpl_and_u32,(uint32_t *puDst, uint32_t uSrc, uint32_t *pfEFlags)) 922 922 { 923 uint32_t uResult = *puDst & uSrc;923 uint32_t const uResult = *puDst & uSrc; 924 924 *puDst = uResult; 925 925 IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uResult, 32, 0); … … 929 929 IEM_DECL_IMPL_DEF(void, iemAImpl_and_u16,(uint16_t *puDst, uint16_t uSrc, uint32_t *pfEFlags)) 930 930 { 931 uint16_t uResult = *puDst & uSrc;931 uint16_t const uResult = *puDst & uSrc; 932 932 *puDst = uResult; 933 933 IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uResult, 16, 0); … … 937 937 IEM_DECL_IMPL_DEF(void, iemAImpl_and_u8,(uint8_t *puDst, uint8_t uSrc, uint32_t *pfEFlags)) 938 938 { 939 uint8_t uResult = *puDst & uSrc;939 uint8_t const uResult = *puDst & uSrc; 940 940 *puDst = uResult; 941 941 IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uResult, 8, 0); … … 943 943 944 944 # endif /* !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) */ 945 #endif /* !defined(RT_ARCH_AMD64) || defined(IEM_WITHOUT_ASSEMBLY) */ 946 947 /* 948 * ANDN (BMI1 instruction) 949 */ 950 951 IEM_DECL_IMPL_DEF(void, iemAImpl_andn_u64_fallback,(uint64_t *puDst, uint64_t uSrc1, uint64_t uSrc2, uint32_t *pfEFlags)) 952 { 953 uint64_t const uResult = ~uSrc1 & uSrc2; 954 *puDst = uResult; 955 IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uResult, 64, 0); 956 } 957 958 959 IEM_DECL_IMPL_DEF(void, iemAImpl_andn_u32_fallback,(uint32_t *puDst, uint32_t uSrc1, uint32_t uSrc2, uint32_t *pfEFlags)) 960 { 961 uint32_t const uResult = ~uSrc1 & uSrc2; 962 *puDst = uResult; 963 IEM_EFL_UPDATE_STATUS_BITS_FOR_LOGIC(pfEFlags, uResult, 32, 0); 964 } 965 966 967 #if defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) 968 IEM_DECL_IMPL_DEF(void, iemAImpl_andn_u64,(uint64_t *puDst, uint64_t uSrc1, uint64_t uSrc2, uint32_t *pfEFlags)) 969 { 970 iemAImpl_andn_u64_fallback(puDst, uSrc1, uSrc2, pfEFlags); 971 } 972 #endif 973 974 975 #if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY) 976 IEM_DECL_IMPL_DEF(void, iemAImpl_andn_u32,(uint32_t *puDst, uint32_t uSrc1, uint32_t uSrc2, uint32_t *pfEFlags)) 977 { 978 iemAImpl_andn_u32_fallback(puDst, uSrc1, uSrc2, pfEFlags); 979 } 980 #endif 981 982 #if !defined(RT_ARCH_AMD64) || defined(IEM_WITHOUT_ASSEMBLY) 945 983 946 984 /* … … 1469 1507 # endif /* !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) */ 1470 1508 1509 1510 /* 1511 * Helpers for LZCNT and TZCNT. 1512 */ 1513 #define SET_BIT_CNT_SEARCH_RESULT_INTEL(a_puDst, a_uSrc, a_pfEFlags, a_uResult) do { \ 1514 unsigned const uResult = (a_uResult); \ 1515 *(a_puDst) = uResult; \ 1516 uint32_t fEfl = *(a_pfEFlags) & ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF); \ 1517 if (uResult) \ 1518 fEfl |= g_afParity[uResult]; \ 1519 else \ 1520 fEfl |= X86_EFL_ZF | X86_EFL_PF; \ 1521 if (!a_uSrc) \ 1522 fEfl |= X86_EFL_CF; \ 1523 *(a_pfEFlags) = fEfl; \ 1524 } while (0) 1525 #define SET_BIT_CNT_SEARCH_RESULT_AMD(a_puDst, a_uSrc, a_pfEFlags, a_uResult) do { \ 1526 unsigned const uResult = (a_uResult); \ 1527 *(a_puDst) = uResult; \ 1528 uint32_t fEfl = *(a_pfEFlags) & ~(X86_EFL_ZF | X86_EFL_CF); \ 1529 if (!uResult) \ 1530 fEfl |= X86_EFL_ZF; \ 1531 if (!a_uSrc) \ 1532 fEfl |= X86_EFL_CF; \ 1533 *(a_pfEFlags) = fEfl; \ 1534 } while (0) 1535 1536 1537 /* 1538 * LZCNT - count leading zero bits. 1539 */ 1540 IEM_DECL_IMPL_DEF(void, iemAImpl_lzcnt_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) 1541 { 1542 iemAImpl_lzcnt_u64_intel(puDst, uSrc, pfEFlags); 1543 } 1544 1545 IEM_DECL_IMPL_DEF(void, iemAImpl_lzcnt_u64_intel,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) 1546 { 1547 SET_BIT_CNT_SEARCH_RESULT_INTEL(puDst, uSrc, pfEFlags, ASMCountLeadingZerosU64(uSrc)); 1548 } 1549 1550 IEM_DECL_IMPL_DEF(void, iemAImpl_lzcnt_u64_amd,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) 1551 { 1552 SET_BIT_CNT_SEARCH_RESULT_AMD(puDst, uSrc, pfEFlags, ASMCountLeadingZerosU64(uSrc)); 1553 } 1554 1555 # if !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) 1556 1557 IEM_DECL_IMPL_DEF(void, iemAImpl_lzcnt_u32,(uint32_t *puDst, uint32_t uSrc, uint32_t *pfEFlags)) 1558 { 1559 iemAImpl_lzcnt_u32_intel(puDst, uSrc, pfEFlags); 1560 } 1561 1562 IEM_DECL_IMPL_DEF(void, iemAImpl_lzcnt_u32_intel,(uint32_t *puDst, uint32_t uSrc, uint32_t *pfEFlags)) 1563 { 1564 SET_BIT_CNT_SEARCH_RESULT_INTEL(puDst, uSrc, pfEFlags, ASMCountLeadingZerosU32(uSrc)); 1565 } 1566 1567 IEM_DECL_IMPL_DEF(void, iemAImpl_lzcnt_u32_amd,(uint32_t *puDst, uint32_t uSrc, uint32_t *pfEFlags)) 1568 { 1569 SET_BIT_CNT_SEARCH_RESULT_AMD(puDst, uSrc, pfEFlags, ASMCountLeadingZerosU32(uSrc)); 1570 } 1571 1572 1573 IEM_DECL_IMPL_DEF(void, iemAImpl_lzcnt_u16,(uint16_t *puDst, uint16_t uSrc, uint32_t *pfEFlags)) 1574 { 1575 iemAImpl_lzcnt_u16_intel(puDst, uSrc, pfEFlags); 1576 } 1577 1578 IEM_DECL_IMPL_DEF(void, iemAImpl_lzcnt_u16_intel,(uint16_t *puDst, uint16_t uSrc, uint32_t *pfEFlags)) 1579 { 1580 SET_BIT_CNT_SEARCH_RESULT_INTEL(puDst, uSrc, pfEFlags, ASMCountLeadingZerosU16(uSrc)); 1581 } 1582 1583 IEM_DECL_IMPL_DEF(void, iemAImpl_lzcnt_u16_amd,(uint16_t *puDst, uint16_t uSrc, uint32_t *pfEFlags)) 1584 { 1585 SET_BIT_CNT_SEARCH_RESULT_AMD(puDst, uSrc, pfEFlags, ASMCountLeadingZerosU16(uSrc)); 1586 } 1587 1588 # endif /* !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) */ 1589 1590 1591 /* 1592 * TZCNT - count leading zero bits. 1593 */ 1594 IEM_DECL_IMPL_DEF(void, iemAImpl_tzcnt_u64,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) 1595 { 1596 iemAImpl_tzcnt_u64_intel(puDst, uSrc, pfEFlags); 1597 } 1598 1599 IEM_DECL_IMPL_DEF(void, iemAImpl_tzcnt_u64_intel,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) 1600 { 1601 SET_BIT_CNT_SEARCH_RESULT_INTEL(puDst, uSrc, pfEFlags, ASMCountTrailingZerosU64(uSrc)); 1602 } 1603 1604 IEM_DECL_IMPL_DEF(void, iemAImpl_tzcnt_u64_amd,(uint64_t *puDst, uint64_t uSrc, uint32_t *pfEFlags)) 1605 { 1606 SET_BIT_CNT_SEARCH_RESULT_AMD(puDst, uSrc, pfEFlags, ASMCountTrailingZerosU64(uSrc)); 1607 } 1608 1609 # if !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) 1610 1611 IEM_DECL_IMPL_DEF(void, iemAImpl_tzcnt_u32,(uint32_t *puDst, uint32_t uSrc, uint32_t *pfEFlags)) 1612 { 1613 iemAImpl_tzcnt_u32_intel(puDst, uSrc, pfEFlags); 1614 } 1615 1616 IEM_DECL_IMPL_DEF(void, iemAImpl_tzcnt_u32_intel,(uint32_t *puDst, uint32_t uSrc, uint32_t *pfEFlags)) 1617 { 1618 SET_BIT_CNT_SEARCH_RESULT_INTEL(puDst, uSrc, pfEFlags, ASMCountTrailingZerosU32(uSrc)); 1619 } 1620 1621 IEM_DECL_IMPL_DEF(void, iemAImpl_tzcnt_u32_amd,(uint32_t *puDst, uint32_t uSrc, uint32_t *pfEFlags)) 1622 { 1623 SET_BIT_CNT_SEARCH_RESULT_AMD(puDst, uSrc, pfEFlags, ASMCountTrailingZerosU32(uSrc)); 1624 } 1625 1626 1627 IEM_DECL_IMPL_DEF(void, iemAImpl_tzcnt_u16,(uint16_t *puDst, uint16_t uSrc, uint32_t *pfEFlags)) 1628 { 1629 iemAImpl_tzcnt_u16_intel(puDst, uSrc, pfEFlags); 1630 } 1631 1632 IEM_DECL_IMPL_DEF(void, iemAImpl_tzcnt_u16_intel,(uint16_t *puDst, uint16_t uSrc, uint32_t *pfEFlags)) 1633 { 1634 SET_BIT_CNT_SEARCH_RESULT_INTEL(puDst, uSrc, pfEFlags, ASMCountTrailingZerosU16(uSrc)); 1635 } 1636 1637 IEM_DECL_IMPL_DEF(void, iemAImpl_tzcnt_u16_amd,(uint16_t *puDst, uint16_t uSrc, uint32_t *pfEFlags)) 1638 { 1639 SET_BIT_CNT_SEARCH_RESULT_AMD(puDst, uSrc, pfEFlags, ASMCountTrailingZerosU16(uSrc)); 1640 } 1641 1642 # endif /* !defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) */ 1643 #endif /* !defined(RT_ARCH_AMD64) || defined(IEM_WITHOUT_ASSEMBLY) */ 1644 1645 /* 1646 * BEXTR (BMI1 instruction) 1647 */ 1648 #define EMIT_BEXTR(a_cBits, a_Type, a_Suffix) \ 1649 IEM_DECL_IMPL_DEF(void, RT_CONCAT3(iemAImpl_bextr_u,a_cBits,a_Suffix),(a_Type *puDst, a_Type uSrc1, \ 1650 a_Type uSrc2, uint32_t *pfEFlags)) \ 1651 { \ 1652 /* uSrc1 is considered virtually zero extended to 512 bits width. */ \ 1653 uint32_t fEfl = *pfEFlags & ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF); \ 1654 a_Type uResult; \ 1655 uint8_t const iFirstBit = (uint8_t)uSrc2; \ 1656 if (iFirstBit < a_cBits) \ 1657 { \ 1658 uResult = uSrc1 >> iFirstBit; \ 1659 uint8_t const cBits = (uint8_t)(uSrc2 >> 8); \ 1660 if (cBits < a_cBits) \ 1661 uResult &= RT_CONCAT(RT_BIT_,a_cBits)(cBits) - 1; \ 1662 *puDst = uResult; \ 1663 if (!uResult) \ 1664 fEfl |= X86_EFL_ZF; \ 1665 } \ 1666 else \ 1667 { \ 1668 *puDst = uResult = 0; \ 1669 fEfl |= X86_EFL_ZF; \ 1670 } \ 1671 /** @todo complete flag calculations. */ \ 1672 *pfEFlags = fEfl; \ 1673 } 1674 1675 EMIT_BEXTR(64, uint64_t, _fallback) 1676 EMIT_BEXTR(32, uint32_t, _fallback) 1677 #if defined(RT_ARCH_X86) || defined(IEM_WITHOUT_ASSEMBLY) 1678 EMIT_BEXTR(64, uint64_t, RT_NOTHING) 1679 #endif 1680 #if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY) 1681 EMIT_BEXTR(32, uint32_t, RT_NOTHING) 1682 #endif 1683 1684 #if !defined(RT_ARCH_AMD64) || defined(IEM_WITHOUT_ASSEMBLY) 1471 1685 1472 1686 /* … … 3018 3232 EMIT_SHRD_16(_intel, 1) 3019 3233 EMIT_SHRD_16(_amd, 0) 3234 3235 3236 /* 3237 * RORX (BMI2) 3238 */ 3239 #define EMIT_RORX(a_cBitsWidth, a_uType, a_fnHlp) \ 3240 IEM_DECL_IMPL_DEF(void, RT_CONCAT(iemAImpl_rorx_u,a_cBitsWidth),(a_uType *puDst, a_uType uSrc, a_uType cShift)) \ 3241 { \ 3242 *puDst = a_fnHlp(uSrc, cShift & (a_cBitsWidth - 1)); \ 3243 } 3244 3245 #if !defined(RT_ARCH_AMD64) || defined(IEM_WITHOUT_ASSEMBLY) 3246 EMIT_RORX(64, uint64_t, ASMRotateRightU64) 3247 #endif 3248 #if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY) 3249 EMIT_RORX(32, uint32_t, ASMRotateRightU32) 3250 #endif 3251 3252 3253 /* 3254 * SHLX (BMI2) 3255 */ 3256 #define EMIT_SHLX(a_cBitsWidth, a_uType, a_Suffix) \ 3257 IEM_DECL_IMPL_DEF(void, RT_CONCAT3(iemAImpl_shlx_u,a_cBitsWidth,a_Suffix),(a_uType *puDst, a_uType uSrc, a_uType cShift)) \ 3258 { \ 3259 cShift &= a_cBitsWidth - 1; \ 3260 *puDst = uSrc << cShift; \ 3261 } 3262 3263 #if !defined(RT_ARCH_AMD64) || defined(IEM_WITHOUT_ASSEMBLY) 3264 EMIT_SHLX(64, uint64_t, RT_NOTHING) 3265 EMIT_SHLX(64, uint64_t, _fallback) 3266 #endif 3267 3268 #if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY) 3269 EMIT_SHLX(32, uint32_t, RT_NOTHING) 3270 EMIT_SHLX(32, uint32_t, _fallback) 3271 #endif 3272 3273 3274 /* 3275 * SHRX (BMI2) 3276 */ 3277 #define EMIT_SHRX(a_cBitsWidth, a_uType, a_Suffix) \ 3278 IEM_DECL_IMPL_DEF(void, RT_CONCAT3(iemAImpl_shrx_u,a_cBitsWidth,a_Suffix),(a_uType *puDst, a_uType uSrc, a_uType cShift)) \ 3279 { \ 3280 cShift &= a_cBitsWidth - 1; \ 3281 *puDst = uSrc >> cShift; \ 3282 } 3283 3284 #if !defined(RT_ARCH_AMD64) || defined(IEM_WITHOUT_ASSEMBLY) 3285 EMIT_SHRX(64, uint64_t, RT_NOTHING) 3286 EMIT_SHRX(64, uint64_t, _fallback) 3287 #endif 3288 3289 #if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY) 3290 EMIT_SHRX(32, uint32_t, RT_NOTHING) 3291 EMIT_SHRX(32, uint32_t, _fallback) 3292 #endif 3293 3294 3295 /* 3296 * SARX (BMI2) 3297 */ 3298 #define EMIT_SARX(a_cBitsWidth, a_uType, a_iType, a_Suffix) \ 3299 IEM_DECL_IMPL_DEF(void, RT_CONCAT3(iemAImpl_sarx_u,a_cBitsWidth,a_Suffix),(a_uType *puDst, a_uType uSrc, a_uType cShift)) \ 3300 { \ 3301 cShift &= a_cBitsWidth - 1; \ 3302 *puDst = (a_iType)uSrc >> cShift; \ 3303 } 3304 3305 #if !defined(RT_ARCH_AMD64) || defined(IEM_WITHOUT_ASSEMBLY) 3306 EMIT_SARX(64, uint64_t, int64_t, RT_NOTHING) 3307 EMIT_SARX(64, uint64_t, int64_t, _fallback) 3308 #endif 3309 3310 #if (!defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) || defined(IEM_WITHOUT_ASSEMBLY) 3311 EMIT_SARX(32, uint32_t, int32_t, RT_NOTHING) 3312 EMIT_SARX(32, uint32_t, int32_t, _fallback) 3313 #endif 3020 3314 3021 3315 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py
r95167 r95308 374 374 'VEX_RMV_REG': ( 'VEX.ModR/M', [ 'reg', 'rm', 'vvvv' ], '11 mr/reg', ), 375 375 'VEX_RMV_MEM': ( 'VEX.ModR/M', [ 'reg', 'rm', 'vvvv' ], '!11 mr/reg', ), 376 'VEX_RMI': ( 'VEX.ModR/M', [ 'reg', 'rm', 'imm' ], '', ), 377 'VEX_RMI_REG': ( 'VEX.ModR/M', [ 'reg', 'rm', 'imm' ], '11 mr/reg', ), 378 'VEX_RMI_MEM': ( 'VEX.ModR/M', [ 'reg', 'rm', 'imm' ], '!11 mr/reg', ), 376 379 'VEX_MVR': ( 'VEX.ModR/M', [ 'rm', 'vvvv', 'reg' ], '', ), 377 380 'VEX_MVR_REG': ( 'VEX.ModR/M', [ 'rm', 'vvvv', 'reg' ], '11 mr/reg', ), … … 529 532 'vex_l_zero': '', ##< VEX.L must be 0. 530 533 'vex_l_ignored': '', ##< VEX.L is ignored. 534 'vex_v_zero': '', ##< VEX.V must be 0. (generate sub-table?) 531 535 'lock_allowed': '', ##< Lock prefix allowed. 532 536 }; -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r94162 r95308 8004 8004 8005 8005 8006 /** 8007 * Common worker for BSF and BSR instructions. 8008 * 8009 * These cannot use iemOpHlpBinaryOperator_rv_rm because they don't always write 8010 * the destination register, which means that for 32-bit operations the high 8011 * bits must be left alone. 8012 * 8013 * @param pImpl Pointer to the instruction implementation (assembly). 8014 */ 8015 FNIEMOP_DEF_1(iemOpHlpBitScanOperator_rv_rm, PCIEMOPBINSIZES, pImpl) 8016 { 8017 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 8018 8019 /* 8020 * If rm is denoting a register, no more instruction bytes. 8021 */ 8022 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 8023 { 8024 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 8025 switch (pVCpu->iem.s.enmEffOpSize) 8026 { 8027 case IEMMODE_16BIT: 8028 IEM_MC_BEGIN(3, 0); 8029 IEM_MC_ARG(uint16_t *, pu16Dst, 0); 8030 IEM_MC_ARG(uint16_t, u16Src, 1); 8031 IEM_MC_ARG(uint32_t *, pEFlags, 2); 8032 8033 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 8034 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 8035 IEM_MC_REF_EFLAGS(pEFlags); 8036 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); 8037 8038 IEM_MC_ADVANCE_RIP(); 8039 IEM_MC_END(); 8040 break; 8041 8042 case IEMMODE_32BIT: 8043 IEM_MC_BEGIN(3, 0); 8044 IEM_MC_ARG(uint32_t *, pu32Dst, 0); 8045 IEM_MC_ARG(uint32_t, u32Src, 1); 8046 IEM_MC_ARG(uint32_t *, pEFlags, 2); 8047 8048 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 8049 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 8050 IEM_MC_REF_EFLAGS(pEFlags); 8051 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); 8052 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF) 8053 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); 8054 IEM_MC_ENDIF(); 8055 IEM_MC_ADVANCE_RIP(); 8056 IEM_MC_END(); 8057 break; 8058 8059 case IEMMODE_64BIT: 8060 IEM_MC_BEGIN(3, 0); 8061 IEM_MC_ARG(uint64_t *, pu64Dst, 0); 8062 IEM_MC_ARG(uint64_t, u64Src, 1); 8063 IEM_MC_ARG(uint32_t *, pEFlags, 2); 8064 8065 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 8066 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 8067 IEM_MC_REF_EFLAGS(pEFlags); 8068 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); 8069 8070 IEM_MC_ADVANCE_RIP(); 8071 IEM_MC_END(); 8072 break; 8073 } 8074 } 8075 else 8076 { 8077 /* 8078 * We're accessing memory. 8079 */ 8080 switch (pVCpu->iem.s.enmEffOpSize) 8081 { 8082 case IEMMODE_16BIT: 8083 IEM_MC_BEGIN(3, 1); 8084 IEM_MC_ARG(uint16_t *, pu16Dst, 0); 8085 IEM_MC_ARG(uint16_t, u16Src, 1); 8086 IEM_MC_ARG(uint32_t *, pEFlags, 2); 8087 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 8088 8089 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 8090 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 8091 IEM_MC_FETCH_MEM_U16(u16Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 8092 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 8093 IEM_MC_REF_EFLAGS(pEFlags); 8094 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags); 8095 8096 IEM_MC_ADVANCE_RIP(); 8097 IEM_MC_END(); 8098 break; 8099 8100 case IEMMODE_32BIT: 8101 IEM_MC_BEGIN(3, 1); 8102 IEM_MC_ARG(uint32_t *, pu32Dst, 0); 8103 IEM_MC_ARG(uint32_t, u32Src, 1); 8104 IEM_MC_ARG(uint32_t *, pEFlags, 2); 8105 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 8106 8107 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 8108 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 8109 IEM_MC_FETCH_MEM_U32(u32Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 8110 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 8111 IEM_MC_REF_EFLAGS(pEFlags); 8112 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags); 8113 8114 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF) 8115 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); 8116 IEM_MC_ENDIF(); 8117 IEM_MC_ADVANCE_RIP(); 8118 IEM_MC_END(); 8119 break; 8120 8121 case IEMMODE_64BIT: 8122 IEM_MC_BEGIN(3, 1); 8123 IEM_MC_ARG(uint64_t *, pu64Dst, 0); 8124 IEM_MC_ARG(uint64_t, u64Src, 1); 8125 IEM_MC_ARG(uint32_t *, pEFlags, 2); 8126 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 8127 8128 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 8129 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 8130 IEM_MC_FETCH_MEM_U64(u64Src, pVCpu->iem.s.iEffSeg, GCPtrEffDst); 8131 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 8132 IEM_MC_REF_EFLAGS(pEFlags); 8133 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags); 8134 8135 IEM_MC_ADVANCE_RIP(); 8136 IEM_MC_END(); 8137 break; 8138 } 8139 } 8140 return VINF_SUCCESS; 8141 } 8142 8143 8006 8144 /** Opcode 0x0f 0xbc. */ 8007 8145 FNIEMOP_DEF(iemOp_bsf_Gv_Ev) … … 8010 8148 IEMOP_HLP_MIN_386(); 8011 8149 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF); 8012 return FNIEMOP_CALL_1(iemOpHlpBi naryOperator_rv_rm, IEMTARGETCPU_EFL_BEHAVIOR_SELECT(g_iemAImpl_bsf_eflags));8150 return FNIEMOP_CALL_1(iemOpHlpBitScanOperator_rv_rm, IEMTARGETCPU_EFL_BEHAVIOR_SELECT(g_iemAImpl_bsf_eflags)); 8013 8151 } 8014 8152 8015 8153 8016 8154 /** Opcode 0xf3 0x0f 0xbc - TZCNT Gv, Ev */ 8017 FNIEMOP_STUB(iemOp_tzcnt_Gv_Ev); 8155 FNIEMOP_DEF(iemOp_tzcnt_Gv_Ev) 8156 { 8157 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fBmi1) 8158 return FNIEMOP_CALL(iemOp_bsf_Gv_Ev); 8159 IEMOP_MNEMONIC2(RM, TZCNT, tzcnt, Gv, Ev, DISOPTYPE_HARMLESS, 0); 8160 8161 #ifndef TST_IEM_CHECK_MC 8162 static const IEMOPBINSIZES s_iemAImpl_tzcnt = 8163 { NULL, NULL, iemAImpl_tzcnt_u16, NULL, iemAImpl_tzcnt_u32, NULL, iemAImpl_tzcnt_u64, NULL }; 8164 static const IEMOPBINSIZES s_iemAImpl_tzcnt_amd = 8165 { NULL, NULL, iemAImpl_tzcnt_u16_amd, NULL, iemAImpl_tzcnt_u32_amd, NULL, iemAImpl_tzcnt_u64_amd, NULL }; 8166 static const IEMOPBINSIZES s_iemAImpl_tzcnt_intel = 8167 { NULL, NULL, iemAImpl_tzcnt_u16_intel, NULL, iemAImpl_tzcnt_u32_intel, NULL, iemAImpl_tzcnt_u64_intel, NULL }; 8168 static const IEMOPBINSIZES * const s_iemAImpl_tzcnt_eflags[2][4] = 8169 { 8170 { &s_iemAImpl_tzcnt_intel, &s_iemAImpl_tzcnt_intel, &s_iemAImpl_tzcnt_amd, &s_iemAImpl_tzcnt_intel }, 8171 { &s_iemAImpl_tzcnt, &s_iemAImpl_tzcnt_intel, &s_iemAImpl_tzcnt_amd, &s_iemAImpl_tzcnt } 8172 }; 8173 #endif 8174 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF); 8175 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, 8176 IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(s_iemAImpl_tzcnt_eflags, IEM_GET_HOST_CPU_FEATURES(pVCpu)->fBmi1)); 8177 } 8018 8178 8019 8179 … … 8024 8184 IEMOP_HLP_MIN_386(); 8025 8185 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF); 8026 return FNIEMOP_CALL_1(iemOpHlpBi naryOperator_rv_rm, IEMTARGETCPU_EFL_BEHAVIOR_SELECT(g_iemAImpl_bsr_eflags));8186 return FNIEMOP_CALL_1(iemOpHlpBitScanOperator_rv_rm, IEMTARGETCPU_EFL_BEHAVIOR_SELECT(g_iemAImpl_bsr_eflags)); 8027 8187 } 8028 8188 8029 8189 8030 8190 /** Opcode 0xf3 0x0f 0xbd - LZCNT Gv, Ev */ 8031 FNIEMOP_STUB(iemOp_lzcnt_Gv_Ev); 8191 FNIEMOP_DEF(iemOp_lzcnt_Gv_Ev) 8192 { 8193 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fBmi1) 8194 return FNIEMOP_CALL(iemOp_bsr_Gv_Ev); 8195 IEMOP_MNEMONIC2(RM, LZCNT, lzcnt, Gv, Ev, DISOPTYPE_HARMLESS, 0); 8196 8197 #ifndef TST_IEM_CHECK_MC 8198 static const IEMOPBINSIZES s_iemAImpl_lzcnt = 8199 { NULL, NULL, iemAImpl_lzcnt_u16, NULL, iemAImpl_lzcnt_u32, NULL, iemAImpl_lzcnt_u64, NULL }; 8200 static const IEMOPBINSIZES s_iemAImpl_lzcnt_amd = 8201 { NULL, NULL, iemAImpl_lzcnt_u16_amd, NULL, iemAImpl_lzcnt_u32_amd, NULL, iemAImpl_lzcnt_u64_amd, NULL }; 8202 static const IEMOPBINSIZES s_iemAImpl_lzcnt_intel = 8203 { NULL, NULL, iemAImpl_lzcnt_u16_intel, NULL, iemAImpl_lzcnt_u32_intel, NULL, iemAImpl_lzcnt_u64_intel, NULL }; 8204 static const IEMOPBINSIZES * const s_iemAImpl_lzcnt_eflags[2][4] = 8205 { 8206 { &s_iemAImpl_lzcnt_intel, &s_iemAImpl_lzcnt_intel, &s_iemAImpl_lzcnt_amd, &s_iemAImpl_lzcnt_intel }, 8207 { &s_iemAImpl_lzcnt, &s_iemAImpl_lzcnt_intel, &s_iemAImpl_lzcnt_amd, &s_iemAImpl_lzcnt } 8208 }; 8209 #endif 8210 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF); 8211 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, 8212 IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(s_iemAImpl_lzcnt_eflags, IEM_GET_HOST_CPU_FEATURES(pVCpu)->fBmi1)); 8213 } 8214 8032 8215 8033 8216 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsVexMap2.cpp.h
r94617 r95308 537 537 /* Opcode VEX.F2.0F38 0xf1 - invalid (legacy only). */ 538 538 539 /* Opcode VEX.0F38 0xf2 - invalid (vex only). */ 540 FNIEMOP_STUB(iemOp_andn_Gy_By_Ey); 539 /** Opcode VEX.0F38 0xf2 - ANDN (vex only). */ 540 FNIEMOP_DEF(iemOp_andn_Gy_By_Ey) 541 { 542 IEMOP_MNEMONIC3(VEX_RVM, ANDN, andn, Gy, By, Ey, DISOPTYPE_HARMLESS, 0); 543 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fBmi1) 544 return iemOp_InvalidNeedRM(pVCpu); 545 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_PF); 546 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 547 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 548 { 549 /* 550 * Register, register. 551 */ 552 IEMOP_HLP_DONE_VEX_DECODING(); 553 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 554 { 555 IEM_MC_BEGIN(4, 0); 556 IEM_MC_ARG(uint64_t *, pDst, 0); 557 IEM_MC_ARG(uint64_t, uSrc1, 1); 558 IEM_MC_ARG(uint64_t, uSrc2, 2); 559 IEM_MC_ARG(uint32_t *, pEFlags, 3); 560 IEM_MC_REF_GREG_U64(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 561 IEM_MC_FETCH_GREG_U64(uSrc1, IEM_GET_EFFECTIVE_VVVV(pVCpu)); 562 IEM_MC_FETCH_GREG_U64(uSrc2, IEM_GET_MODRM_RM(pVCpu, bRm)); 563 IEM_MC_REF_EFLAGS(pEFlags); 564 IEM_MC_CALL_VOID_AIMPL_4(IEM_SELECT_HOST_OR_FALLBACK(fBmi1, iemAImpl_andn_u64, iemAImpl_andn_u64_fallback), 565 pDst, uSrc1, uSrc2, pEFlags); 566 IEM_MC_ADVANCE_RIP(); 567 IEM_MC_END(); 568 } 569 else 570 { 571 IEM_MC_BEGIN(4, 0); 572 IEM_MC_ARG(uint32_t *, pDst, 0); 573 IEM_MC_ARG(uint32_t, uSrc1, 1); 574 IEM_MC_ARG(uint32_t, uSrc2, 2); 575 IEM_MC_ARG(uint32_t *, pEFlags, 3); 576 IEM_MC_REF_GREG_U32(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 577 IEM_MC_FETCH_GREG_U32(uSrc1, IEM_GET_EFFECTIVE_VVVV(pVCpu)); 578 IEM_MC_FETCH_GREG_U32(uSrc2, IEM_GET_MODRM_RM(pVCpu, bRm)); 579 IEM_MC_REF_EFLAGS(pEFlags); 580 IEM_MC_CALL_VOID_AIMPL_4(IEM_SELECT_HOST_OR_FALLBACK(fBmi1, iemAImpl_andn_u32, iemAImpl_andn_u32_fallback), 581 pDst, uSrc1, uSrc2, pEFlags); 582 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pDst); 583 IEM_MC_ADVANCE_RIP(); 584 IEM_MC_END(); 585 } 586 } 587 else 588 { 589 /* 590 * Register, memory. 591 */ 592 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 593 { 594 IEM_MC_BEGIN(4, 1); 595 IEM_MC_ARG(uint64_t *, pDst, 0); 596 IEM_MC_ARG(uint64_t, uSrc1, 1); 597 IEM_MC_ARG(uint64_t, uSrc2, 2); 598 IEM_MC_ARG(uint32_t *, pEFlags, 3); 599 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 600 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 601 IEMOP_HLP_DONE_VEX_DECODING(); 602 IEM_MC_FETCH_MEM_U64(uSrc2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 603 IEM_MC_FETCH_GREG_U64(uSrc1, IEM_GET_EFFECTIVE_VVVV(pVCpu)); 604 IEM_MC_REF_GREG_U64(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 605 IEM_MC_REF_EFLAGS(pEFlags); 606 IEM_MC_CALL_VOID_AIMPL_4(IEM_SELECT_HOST_OR_FALLBACK(fBmi1, iemAImpl_andn_u64, iemAImpl_andn_u64_fallback), 607 pDst, uSrc1, uSrc2, pEFlags); 608 IEM_MC_ADVANCE_RIP(); 609 IEM_MC_END(); 610 } 611 else 612 { 613 IEM_MC_BEGIN(4, 1); 614 IEM_MC_ARG(uint32_t *, pDst, 0); 615 IEM_MC_ARG(uint32_t, uSrc1, 1); 616 IEM_MC_ARG(uint32_t, uSrc2, 2); 617 IEM_MC_ARG(uint32_t *, pEFlags, 3); 618 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 619 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 620 IEMOP_HLP_DONE_VEX_DECODING(); 621 IEM_MC_FETCH_MEM_U32(uSrc2, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 622 IEM_MC_FETCH_GREG_U32(uSrc1, IEM_GET_EFFECTIVE_VVVV(pVCpu)); 623 IEM_MC_REF_GREG_U32(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 624 IEM_MC_REF_EFLAGS(pEFlags); 625 IEM_MC_CALL_VOID_AIMPL_4(IEM_SELECT_HOST_OR_FALLBACK(fBmi1, iemAImpl_andn_u32, iemAImpl_andn_u32_fallback), 626 pDst, uSrc1, uSrc2, pEFlags); 627 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pDst); 628 IEM_MC_ADVANCE_RIP(); 629 IEM_MC_END(); 630 } 631 } 632 return VINF_SUCCESS; 633 } 634 541 635 /* Opcode VEX.66.0F38 0xf2 - invalid. */ 542 636 /* Opcode VEX.F3.0F38 0xf2 - invalid. */ … … 580 674 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 581 675 return FNIEMOP_CALL_1(g_apfnVexGroup17_f3[((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)], bRm); 582 583 676 } 584 677 … … 590 683 /* Opcode VEX.F3.0F38 0xf4 - invalid. */ 591 684 /* Opcode VEX.F2.0F38 0xf4 - invalid. */ 685 686 #define IEMOP_BODY_Gy_Ey_By(a_Instr, a_fFeatureMember, a_fUndefFlags) \ 687 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->a_fFeatureMember) \ 688 return iemOp_InvalidNeedRM(pVCpu); \ 689 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fUndefFlags); \ 690 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \ 691 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \ 692 { \ 693 /* \ 694 * Register, register. \ 695 */ \ 696 IEMOP_HLP_DONE_VEX_DECODING(); \ 697 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) \ 698 { \ 699 IEM_MC_BEGIN(4, 0); \ 700 IEM_MC_ARG(uint64_t *, pDst, 0); \ 701 IEM_MC_ARG(uint64_t, uSrc1, 1); \ 702 IEM_MC_ARG(uint64_t, uSrc2, 2); \ 703 IEM_MC_ARG(uint32_t *, pEFlags, 3); \ 704 IEM_MC_REF_GREG_U64(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 705 IEM_MC_FETCH_GREG_U64(uSrc1, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 706 IEM_MC_FETCH_GREG_U64(uSrc2, IEM_GET_EFFECTIVE_VVVV(pVCpu)); \ 707 IEM_MC_REF_EFLAGS(pEFlags); \ 708 IEM_MC_CALL_VOID_AIMPL_4(IEM_SELECT_HOST_OR_FALLBACK(a_fFeatureMember, iemAImpl_ ## a_Instr ## _u64, \ 709 iemAImpl_ ## a_Instr ## _u64_fallback), \ 710 pDst, uSrc1, uSrc2, pEFlags); \ 711 IEM_MC_ADVANCE_RIP(); \ 712 IEM_MC_END(); \ 713 } \ 714 else \ 715 { \ 716 IEM_MC_BEGIN(4, 0); \ 717 IEM_MC_ARG(uint32_t *, pDst, 0); \ 718 IEM_MC_ARG(uint32_t, uSrc1, 1); \ 719 IEM_MC_ARG(uint32_t, uSrc2, 2); \ 720 IEM_MC_ARG(uint32_t *, pEFlags, 3); \ 721 IEM_MC_REF_GREG_U32(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 722 IEM_MC_FETCH_GREG_U32(uSrc1, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 723 IEM_MC_FETCH_GREG_U32(uSrc2, IEM_GET_EFFECTIVE_VVVV(pVCpu)); \ 724 IEM_MC_REF_EFLAGS(pEFlags); \ 725 IEM_MC_CALL_VOID_AIMPL_4(IEM_SELECT_HOST_OR_FALLBACK(a_fFeatureMember, iemAImpl_ ## a_Instr ## _u32, \ 726 iemAImpl_ ## a_Instr ## _u32_fallback), \ 727 pDst, uSrc1, uSrc2, pEFlags); \ 728 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pDst); \ 729 IEM_MC_ADVANCE_RIP(); \ 730 IEM_MC_END(); \ 731 } \ 732 } \ 733 else \ 734 { \ 735 /* \ 736 * Register, memory. \ 737 */ \ 738 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) \ 739 { \ 740 IEM_MC_BEGIN(4, 1); \ 741 IEM_MC_ARG(uint64_t *, pDst, 0); \ 742 IEM_MC_ARG(uint64_t, uSrc1, 1); \ 743 IEM_MC_ARG(uint64_t, uSrc2, 2); \ 744 IEM_MC_ARG(uint32_t *, pEFlags, 3); \ 745 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \ 746 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \ 747 IEMOP_HLP_DONE_VEX_DECODING(); \ 748 IEM_MC_FETCH_MEM_U64(uSrc1, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 749 IEM_MC_FETCH_GREG_U64(uSrc2, IEM_GET_EFFECTIVE_VVVV(pVCpu)); \ 750 IEM_MC_REF_GREG_U64(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 751 IEM_MC_REF_EFLAGS(pEFlags); \ 752 IEM_MC_CALL_VOID_AIMPL_4(IEM_SELECT_HOST_OR_FALLBACK(a_fFeatureMember, iemAImpl_ ## a_Instr ## _u64, \ 753 iemAImpl_ ## a_Instr ## _u64_fallback), \ 754 pDst, uSrc1, uSrc2, pEFlags); \ 755 IEM_MC_ADVANCE_RIP(); \ 756 IEM_MC_END(); \ 757 } \ 758 else \ 759 { \ 760 IEM_MC_BEGIN(4, 1); \ 761 IEM_MC_ARG(uint32_t *, pDst, 0); \ 762 IEM_MC_ARG(uint32_t, uSrc1, 1); \ 763 IEM_MC_ARG(uint32_t, uSrc2, 2); \ 764 IEM_MC_ARG(uint32_t *, pEFlags, 3); \ 765 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \ 766 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \ 767 IEMOP_HLP_DONE_VEX_DECODING(); \ 768 IEM_MC_FETCH_MEM_U32(uSrc1, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 769 IEM_MC_FETCH_GREG_U32(uSrc2, IEM_GET_EFFECTIVE_VVVV(pVCpu)); \ 770 IEM_MC_REF_GREG_U32(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 771 IEM_MC_REF_EFLAGS(pEFlags); \ 772 IEM_MC_CALL_VOID_AIMPL_4(IEM_SELECT_HOST_OR_FALLBACK(a_fFeatureMember, iemAImpl_ ## a_Instr ## _u32, \ 773 iemAImpl_ ## a_Instr ## _u32_fallback), \ 774 pDst, uSrc1, uSrc2, pEFlags); \ 775 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pDst); \ 776 IEM_MC_ADVANCE_RIP(); \ 777 IEM_MC_END(); \ 778 } \ 779 } \ 780 return VINF_SUCCESS 781 782 #define IEMOP_BODY_Gy_Ey_By_NoEflags(a_Instr, a_fFeatureMember, a_fUndefFlags) \ 783 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->a_fFeatureMember) \ 784 return iemOp_InvalidNeedRM(pVCpu); \ 785 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fUndefFlags); \ 786 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \ 787 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \ 788 { \ 789 /* \ 790 * Register, register. \ 791 */ \ 792 IEMOP_HLP_DONE_VEX_DECODING(); \ 793 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) \ 794 { \ 795 IEM_MC_BEGIN(3, 0); \ 796 IEM_MC_ARG(uint64_t *, pDst, 0); \ 797 IEM_MC_ARG(uint64_t, uSrc1, 1); \ 798 IEM_MC_ARG(uint64_t, uSrc2, 2); \ 799 IEM_MC_REF_GREG_U64(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 800 IEM_MC_FETCH_GREG_U64(uSrc1, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 801 IEM_MC_FETCH_GREG_U64(uSrc2, IEM_GET_EFFECTIVE_VVVV(pVCpu)); \ 802 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(a_fFeatureMember, iemAImpl_ ## a_Instr ## _u64, \ 803 iemAImpl_ ## a_Instr ## _u64_fallback), pDst, uSrc1, uSrc2); \ 804 IEM_MC_ADVANCE_RIP(); \ 805 IEM_MC_END(); \ 806 } \ 807 else \ 808 { \ 809 IEM_MC_BEGIN(3, 0); \ 810 IEM_MC_ARG(uint32_t *, pDst, 0); \ 811 IEM_MC_ARG(uint32_t, uSrc1, 1); \ 812 IEM_MC_ARG(uint32_t, uSrc2, 2); \ 813 IEM_MC_REF_GREG_U32(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 814 IEM_MC_FETCH_GREG_U32(uSrc1, IEM_GET_MODRM_RM(pVCpu, bRm)); \ 815 IEM_MC_FETCH_GREG_U32(uSrc2, IEM_GET_EFFECTIVE_VVVV(pVCpu)); \ 816 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(a_fFeatureMember, iemAImpl_ ## a_Instr ## _u32, \ 817 iemAImpl_ ## a_Instr ## _u32_fallback), pDst, uSrc1, uSrc2); \ 818 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pDst); \ 819 IEM_MC_ADVANCE_RIP(); \ 820 IEM_MC_END(); \ 821 } \ 822 } \ 823 else \ 824 { \ 825 /* \ 826 * Register, memory. \ 827 */ \ 828 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) \ 829 { \ 830 IEM_MC_BEGIN(3, 1); \ 831 IEM_MC_ARG(uint64_t *, pDst, 0); \ 832 IEM_MC_ARG(uint64_t, uSrc1, 1); \ 833 IEM_MC_ARG(uint64_t, uSrc2, 2); \ 834 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \ 835 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \ 836 IEMOP_HLP_DONE_VEX_DECODING(); \ 837 IEM_MC_FETCH_MEM_U64(uSrc1, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 838 IEM_MC_FETCH_GREG_U64(uSrc2, IEM_GET_EFFECTIVE_VVVV(pVCpu)); \ 839 IEM_MC_REF_GREG_U64(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 840 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(a_fFeatureMember, iemAImpl_ ## a_Instr ## _u64, \ 841 iemAImpl_ ## a_Instr ## _u64_fallback), pDst, uSrc1, uSrc2); \ 842 IEM_MC_ADVANCE_RIP(); \ 843 IEM_MC_END(); \ 844 } \ 845 else \ 846 { \ 847 IEM_MC_BEGIN(3, 1); \ 848 IEM_MC_ARG(uint32_t *, pDst, 0); \ 849 IEM_MC_ARG(uint32_t, uSrc1, 1); \ 850 IEM_MC_ARG(uint32_t, uSrc2, 2); \ 851 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \ 852 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \ 853 IEMOP_HLP_DONE_VEX_DECODING(); \ 854 IEM_MC_FETCH_MEM_U32(uSrc1, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); \ 855 IEM_MC_FETCH_GREG_U32(uSrc2, IEM_GET_EFFECTIVE_VVVV(pVCpu)); \ 856 IEM_MC_REF_GREG_U32(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); \ 857 IEM_MC_CALL_VOID_AIMPL_3(IEM_SELECT_HOST_OR_FALLBACK(a_fFeatureMember, iemAImpl_ ## a_Instr ## _u32, \ 858 iemAImpl_ ## a_Instr ## _u32_fallback), pDst, uSrc1, uSrc2); \ 859 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pDst); \ 860 IEM_MC_ADVANCE_RIP(); \ 861 IEM_MC_END(); \ 862 } \ 863 } \ 864 return VINF_SUCCESS 592 865 593 866 /** Opcode VEX.0F38 0xf5 (vex only). */ … … 605 878 FNIEMOP_STUB(iemOp_mulx_By_Gy_rDX_Ey); 606 879 880 607 881 /** Opcode VEX.0F38 0xf7 (vex only). */ 608 FNIEMOP_STUB(iemOp_bextr_Gy_Ey_By); 882 FNIEMOP_DEF(iemOp_bextr_Gy_Ey_By) 883 { 884 IEMOP_MNEMONIC3(VEX_RMV, BEXTR, bextr, Gy, Ey, By, DISOPTYPE_HARMLESS, 0); 885 IEMOP_BODY_Gy_Ey_By(bextr, fBmi1, X86_EFL_SF | X86_EFL_AF | X86_EFL_PF); 886 } 887 888 609 889 /** Opcode VEX.66.0F38 0xf7 (vex only). */ 610 FNIEMOP_STUB(iemOp_shlx_Gy_Ey_By); 890 FNIEMOP_DEF(iemOp_shlx_Gy_Ey_By) 891 { 892 IEMOP_MNEMONIC3(VEX_RMV, SHLX, shlx, Gy, Ey, By, DISOPTYPE_HARMLESS, 0); 893 IEMOP_BODY_Gy_Ey_By_NoEflags(shlx, fBmi2, 0); 894 } 895 896 611 897 /** Opcode VEX.F3.0F38 0xf7 (vex only). */ 612 FNIEMOP_STUB(iemOp_sarx_Gy_Ey_By); 898 FNIEMOP_DEF(iemOp_sarx_Gy_Ey_By) 899 { 900 IEMOP_MNEMONIC3(VEX_RMV, SARX, sarx, Gy, Ey, By, DISOPTYPE_HARMLESS, 0); 901 IEMOP_BODY_Gy_Ey_By_NoEflags(sarx, fBmi2, 0); 902 } 903 904 613 905 /** Opcode VEX.F2.0F38 0xf7 (vex only). */ 614 FNIEMOP_STUB(iemOp_shrx_Gy_Ey_By); 906 FNIEMOP_DEF(iemOp_shrx_Gy_Ey_By) 907 { 908 IEMOP_MNEMONIC3(VEX_RMV, SHRX, shrx, Gy, Ey, By, DISOPTYPE_HARMLESS, 0); 909 IEMOP_BODY_Gy_Ey_By_NoEflags(shrx, fBmi2, 0); 910 } 615 911 616 912 /* Opcode VEX.0F38 0xf8 - invalid. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsVexMap3.cpp.h
r93115 r95308 268 268 269 269 270 /* Opcode VEX.F2.0F3A (vex only) */ 271 FNIEMOP_STUB(iemOp_rorx_Gy_Ey_Ib); 270 /** Opcode VEX.F2.0F3A (vex only) */ 271 FNIEMOP_DEF(iemOp_rorx_Gy_Ey_Ib) 272 { 273 IEMOP_MNEMONIC3(VEX_RMI, RORX, rorx, Gy, Ey, Ib, DISOPTYPE_HARMLESS, IEMOPHINT_VEX_L_ZERO | IEMOPHINT_VEX_V_ZERO); 274 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fBmi2) 275 return iemOp_InvalidNeedRMImm8(pVCpu); 276 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 277 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 278 { 279 /* 280 * Register, register. 281 */ 282 uint8_t bImm8; IEM_OPCODE_GET_NEXT_U8(&bImm8); 283 IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV(); 284 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 285 { 286 IEM_MC_BEGIN(3, 0); 287 IEM_MC_ARG(uint64_t *, pDst, 0); 288 IEM_MC_ARG(uint64_t, uSrc1, 1); 289 IEM_MC_ARG_CONST(uint64_t, uSrc2, bImm8, 2); 290 IEM_MC_REF_GREG_U64(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 291 IEM_MC_FETCH_GREG_U64(uSrc1, IEM_GET_MODRM_RM(pVCpu, bRm)); 292 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_rorx_u64, pDst, uSrc1, uSrc2); 293 IEM_MC_ADVANCE_RIP(); 294 IEM_MC_END(); 295 } 296 else 297 { 298 IEM_MC_BEGIN(3, 0); 299 IEM_MC_ARG(uint32_t *, pDst, 0); 300 IEM_MC_ARG(uint32_t, uSrc1, 1); 301 IEM_MC_ARG_CONST(uint32_t, uSrc2, bImm8, 2); 302 IEM_MC_REF_GREG_U32(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 303 IEM_MC_FETCH_GREG_U32(uSrc1, IEM_GET_MODRM_RM(pVCpu, bRm)); 304 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_rorx_u32, pDst, uSrc1, uSrc2); 305 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pDst); 306 IEM_MC_ADVANCE_RIP(); 307 IEM_MC_END(); 308 } 309 } 310 else 311 { 312 /* 313 * Register, memory. 314 */ 315 if (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_REX_W) 316 { 317 IEM_MC_BEGIN(3, 1); 318 IEM_MC_ARG(uint64_t *, pDst, 0); 319 IEM_MC_ARG(uint64_t, uSrc1, 1); 320 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 321 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1); 322 uint8_t bImm8; IEM_OPCODE_GET_NEXT_U8(&bImm8); 323 IEM_MC_ARG_CONST(uint64_t, uSrc2, bImm8, 2); 324 IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV(); 325 IEM_MC_FETCH_MEM_U64(uSrc1, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 326 IEM_MC_REF_GREG_U64(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 327 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_rorx_u64, pDst, uSrc1, uSrc2); 328 IEM_MC_ADVANCE_RIP(); 329 IEM_MC_END(); 330 } 331 else 332 { 333 IEM_MC_BEGIN(3, 1); 334 IEM_MC_ARG(uint32_t *, pDst, 0); 335 IEM_MC_ARG(uint32_t, uSrc1, 1); 336 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 337 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1); 338 uint8_t bImm8; IEM_OPCODE_GET_NEXT_U8(&bImm8); 339 IEM_MC_ARG_CONST(uint32_t, uSrc2, bImm8, 2); 340 IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV(); 341 IEM_MC_FETCH_MEM_U32(uSrc1, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 342 IEM_MC_REF_GREG_U32(pDst, IEM_GET_MODRM_REG(pVCpu, bRm)); 343 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_rorx_u32, pDst, uSrc1, uSrc2); 344 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pDst); 345 IEM_MC_ADVANCE_RIP(); 346 IEM_MC_END(); 347 } 348 } 349 return VINF_SUCCESS; 350 } 272 351 273 352 -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r94932 r95308 140 140 if (idCpu == 0) 141 141 { 142 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM); 143 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM); 142 pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM); 143 pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM); 144 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL 145 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_VIA /*??*/ 146 ? IEMTARGETCPU_EFL_BEHAVIOR_INTEL : IEMTARGETCPU_EFL_BEHAVIOR_AMD; 147 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 148 if (pVCpu->iem.s.enmCpuVendor == pVCpu->iem.s.enmHostCpuVendor) 149 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = IEMTARGETCPU_EFL_BEHAVIOR_NATIVE; 150 else 151 #endif 152 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVCpu->iem.s.aidxTargetCpuEflFlavour[0]; 153 144 154 #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC 145 155 switch (pVM->cpum.ro.GuestFeatures.enmMicroarch) … … 156 166 default: pVCpu->iem.s.uTargetCpu = IEMTARGETCPU_CURRENT; break; 157 167 } 158 LogRel(("IEM: TargetCpu=%s, Microarch=%s\n", iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch))); 168 LogRel(("IEM: TargetCpu=%s, Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n", 169 iemGetTargetCpuName(pVCpu->iem.s.uTargetCpu), CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch), 170 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1])); 171 #else 172 LogRel(("IEM: Microarch=%s aidxTargetCpuEflFlavour={%d,%d}\n", 173 CPUMMicroarchName(pVM->cpum.ro.GuestFeatures.enmMicroarch), 174 pVCpu->iem.s.aidxTargetCpuEflFlavour[0], pVCpu->iem.s.aidxTargetCpuEflFlavour[1])); 159 175 #endif 160 176 } 161 177 else 162 178 { 163 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor; 164 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor; 179 pVCpu->iem.s.enmCpuVendor = pVM->apCpusR3[0]->iem.s.enmCpuVendor; 180 pVCpu->iem.s.enmHostCpuVendor = pVM->apCpusR3[0]->iem.s.enmHostCpuVendor; 181 pVCpu->iem.s.aidxTargetCpuEflFlavour[0] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[0]; 182 pVCpu->iem.s.aidxTargetCpuEflFlavour[1] = pVM->apCpusR3[0]->iem.s.aidxTargetCpuEflFlavour[1]; 165 183 #if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC 166 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu;184 pVCpu->iem.s.uTargetCpu = pVM->apCpusR3[0]->iem.s.uTargetCpu; 167 185 #endif 168 186 } -
trunk/src/VBox/VMM/include/IEMInternal.h
r95185 r95308 115 115 116 116 117 /** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU:: idxTargetCpuEflFlavour117 /** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour 118 118 * @{ */ 119 119 #define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 0 /**< Native x86 EFLAGS result; Intel EFLAGS when on non-x86 hosts. */ … … 125 125 * pVCpu is implicit in the caller context. */ 126 126 #define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \ 127 (a_aArray[pVCpu->iem.s.idxTargetCpuEflFlavour & IEMTARGETCPU_EFL_BEHAVIOR_MASK]) 128 /** @} */ 127 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK]) 128 /** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can 129 * be used because the host CPU does not support the operation. */ 130 #define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \ 131 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK]) 132 /** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional 133 * array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index 134 * into the two. 135 * @sa IEM_SELECT_NATIVE_OR_FALLBACK */ 136 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 137 # define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \ 138 (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK]) 139 #else 140 # define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \ 141 (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK]) 142 #endif 143 /** @} */ 144 145 /** 146 * Picks @a a_pfnNative or @a a_pfnFallback according to the host CPU feature 147 * indicator given by @a a_fCpumFeatureMember (CPUMFEATURES member). 148 * 149 * On non-x86 hosts, this will shortcut to the fallback w/o checking the 150 * indicator. 151 * 152 * @sa IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX 153 */ 154 #if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64) 155 # define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) \ 156 (g_CpumHostFeatures.s.a_fCpumFeatureMember ? a_pfnNative : a_pfnFallback) 157 #else 158 # define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) (a_pfnFallback) 159 #endif 129 160 130 161 … … 654 685 #endif 655 686 /** For selecting assembly works matching the target CPU EFLAGS behaviour, see 656 * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values. This is for instance used for the 657 * BSF & BSR instructions where AMD and Intel CPUs produce different EFLAGS. */ 658 uint8_t idxTargetCpuEflFlavour; 687 * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no 688 * native host support and the 2nd for when there is. 689 * 690 * The two values are typically indexed by a g_CpumHostFeatures bit. 691 * 692 * This is for instance used for the BSF & BSR instructions where AMD and 693 * Intel CPUs produce different EFLAGS. */ 694 uint8_t aidxTargetCpuEflFlavour[2]; 659 695 660 696 /** The CPU vendor. */ … … 940 976 /** VEX+ModR/M: reg, vvvv, r/m (memory). */ 941 977 #define IEMOPFORM_VEX_RVM_MEM (IEMOPFORM_VEX_RVM | IEMOPFORM_NOT_MOD3) 978 /** VEX+ModR/M: reg, r/m, vvvv */ 979 #define IEMOPFORM_VEX_RMV 9 980 /** VEX+ModR/M: reg, r/m, vvvv (register). */ 981 #define IEMOPFORM_VEX_RMV_REG (IEMOPFORM_VEX_RMV | IEMOPFORM_MOD3) 982 /** VEX+ModR/M: reg, r/m, vvvv (memory). */ 983 #define IEMOPFORM_VEX_RMV_MEM (IEMOPFORM_VEX_RMV | IEMOPFORM_NOT_MOD3) 984 /** VEX+ModR/M: reg, r/m, imm8 */ 985 #define IEMOPFORM_VEX_RMI 10 986 /** VEX+ModR/M: reg, r/m, imm8 (register). */ 987 #define IEMOPFORM_VEX_RMI_REG (IEMOPFORM_VEX_RMI | IEMOPFORM_MOD3) 988 /** VEX+ModR/M: reg, r/m, imm8 (memory). */ 989 #define IEMOPFORM_VEX_RMI_MEM (IEMOPFORM_VEX_RMI | IEMOPFORM_NOT_MOD3) 942 990 /** VEX+ModR/M: r/m, vvvv, reg */ 943 #define IEMOPFORM_VEX_MVR 9991 #define IEMOPFORM_VEX_MVR 11 944 992 /** VEX+ModR/M: r/m, vvvv, reg (register) */ 945 993 #define IEMOPFORM_VEX_MVR_REG (IEMOPFORM_VEX_MVR | IEMOPFORM_MOD3) … … 971 1019 /** The VEX.L value must be zero (i.e. 128-bit width only). */ 972 1020 #define IEMOPHINT_VEX_L_ZERO RT_BIT_32(13) 1021 /** The VEX.V value must be zero. */ 1022 #define IEMOPHINT_VEX_V_ZERO RT_BIT_32(14) 973 1023 974 1024 /** Hint to IEMAllInstructionPython.py that this macro should be skipped. */ … … 1182 1232 /** @} */ 1183 1233 1234 /** @name Arithmetic three operand operations on double words (binary). 1235 * @{ */ 1236 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2, uint32_t *pEFlags)); 1237 typedef FNIEMAIMPLBINVEXU32 *PFNIEMAIMPLBINVEXU32; 1238 FNIEMAIMPLBINVEXU32 iemAImpl_andn_u32, iemAImpl_andn_u32_fallback; 1239 FNIEMAIMPLBINVEXU32 iemAImpl_bextr_u32, iemAImpl_bextr_u32_fallback; 1240 /** @} */ 1241 1242 /** @name Arithmetic three operand operations on quad words (binary). 1243 * @{ */ 1244 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2, uint32_t *pEFlags)); 1245 typedef FNIEMAIMPLBINVEXU64 *PFNIEMAIMPLBINVEXU64; 1246 FNIEMAIMPLBINVEXU64 iemAImpl_andn_u64, iemAImpl_andn_u64_fallback; 1247 FNIEMAIMPLBINVEXU64 iemAImpl_bextr_u64, iemAImpl_bextr_u64_fallback; 1248 /** @} */ 1249 1250 /** @name Arithmetic three operand operations on double words w/o EFLAGS (binary). 1251 * @{ */ 1252 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU32NOEFL, (uint32_t *pu32Dst, uint32_t u32Src1, uint32_t u32Src2)); 1253 typedef FNIEMAIMPLBINVEXU32NOEFL *PFNIEMAIMPLBINVEXU32NOEFL; 1254 FNIEMAIMPLBINVEXU32NOEFL iemAImpl_sarx_u32, iemAImpl_sarx_u32_fallback; 1255 FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shlx_u32, iemAImpl_shlx_u32_fallback; 1256 FNIEMAIMPLBINVEXU32NOEFL iemAImpl_shrx_u32, iemAImpl_shrx_u32_fallback; 1257 FNIEMAIMPLBINVEXU32NOEFL iemAImpl_rorx_u32; 1258 /** @} */ 1259 1260 /** @name Arithmetic three operand operations on quad words w/o EFLAGS (binary). 1261 * @{ */ 1262 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLBINVEXU64NOEFL, (uint64_t *pu64Dst, uint64_t u64Src1, uint64_t u64Src2)); 1263 typedef FNIEMAIMPLBINVEXU64NOEFL *PFNIEMAIMPLBINVEXU64NOEFL; 1264 FNIEMAIMPLBINVEXU64NOEFL iemAImpl_sarx_u64, iemAImpl_sarx_u64_fallback; 1265 FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shlx_u64, iemAImpl_shlx_u64_fallback; 1266 FNIEMAIMPLBINVEXU64NOEFL iemAImpl_shrx_u64, iemAImpl_shrx_u64_fallback; 1267 FNIEMAIMPLBINVEXU64NOEFL iemAImpl_rorx_u64; 1268 /** @} */ 1269 1184 1270 /** @name Exchange memory with register operations. 1185 1271 * @{ */ … … 1272 1358 FNIEMAIMPLBINU32 iemAImpl_bsr_u32, iemAImpl_bsr_u32_amd, iemAImpl_bsr_u32_intel; 1273 1359 FNIEMAIMPLBINU64 iemAImpl_bsr_u64, iemAImpl_bsr_u64_amd, iemAImpl_bsr_u64_intel; 1360 FNIEMAIMPLBINU16 iemAImpl_lzcnt_u16, iemAImpl_lzcnt_u16_amd, iemAImpl_lzcnt_u16_intel; 1361 FNIEMAIMPLBINU32 iemAImpl_lzcnt_u32, iemAImpl_lzcnt_u32_amd, iemAImpl_lzcnt_u32_intel; 1362 FNIEMAIMPLBINU64 iemAImpl_lzcnt_u64, iemAImpl_lzcnt_u64_amd, iemAImpl_lzcnt_u64_intel; 1363 FNIEMAIMPLBINU16 iemAImpl_tzcnt_u16, iemAImpl_tzcnt_u16_amd, iemAImpl_tzcnt_u16_intel; 1364 FNIEMAIMPLBINU32 iemAImpl_tzcnt_u32, iemAImpl_tzcnt_u32_amd, iemAImpl_tzcnt_u32_intel; 1365 FNIEMAIMPLBINU64 iemAImpl_tzcnt_u64, iemAImpl_tzcnt_u64_amd, iemAImpl_tzcnt_u64_intel; 1274 1366 /** @} */ 1275 1367 … … 2275 2367 */ 2276 2368 #define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr) 2369 2370 2371 /** 2372 * Gets the register (reg) part of a ModR/M encoding, with REX.R added in. 2373 * 2374 * For use during decoding. 2375 */ 2376 #define IEM_GET_MODRM_REG(a_pVCpu, a_bRm) ( (((a_bRm) >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | (a_pVCpu)->iem.s.uRexReg ) 2377 /** 2378 * Gets the r/m part of a ModR/M encoding as a register index, with REX.B added in. 2379 * 2380 * For use during decoding. 2381 */ 2382 #define IEM_GET_MODRM_RM(a_pVCpu, a_bRm) ( ((a_bRm) & X86_MODRM_RM_MASK) | (a_pVCpu)->iem.s.uRexB ) 2277 2383 2278 2384 /** -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r94768 r95308 233 233 234 234 #undef IEMTARGETCPU_EFL_BEHAVIOR_SELECT 235 #define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) NULL 235 #define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) NULL 236 #undef IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE 237 #define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) NULL 238 #undef IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX 239 #define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) NULL 236 240 237 241 #define iemAImpl_fpu_r32_to_r80 NULL … … 551 555 #define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) do { (void)fAvxWrite; (void)fAvxRead; (void)fMcBegin; } while (0) 552 556 553 #define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)554 #define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) do { CHK_TYPE(uint16_t, a_GCPtrMem16); (void)fMcBegin; } while (0)555 #define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) do { CHK_TYPE(uint32_t, a_GCPtrMem32); (void)fMcBegin; } while (0)556 #define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)557 #define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u8Dst) == (sizeof(uint8_t))); (void)fMcBegin; } while (0) 558 #define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) do { CHK_TYPE(uint16_t, a_GCPtrMem16); AssertCompile(sizeof(a_u8Dst) == (sizeof(uint8_t))); (void)fMcBegin; } while (0) 559 #define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) do { CHK_TYPE(uint32_t, a_GCPtrMem32); AssertCompile(sizeof(a_u8Dst) == (sizeof(uint8_t))); (void)fMcBegin; } while (0) 560 #define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u16Dst) == (sizeof(uint16_t))); (void)fMcBegin; } while (0) 557 561 #define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(int16_t, a_i16Dst); (void)fMcBegin; } while (0) 558 #define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)562 #define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u32Dst) == (sizeof(uint32_t))); (void)fMcBegin; } while (0) 559 563 #define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(int32_t, a_i32Dst); (void)fMcBegin; } while (0) 560 #define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)561 #define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)562 #define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)564 #define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u64Dst) == (sizeof(uint64_t))); (void)fMcBegin; } while (0) 565 #define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u64Dst) == (sizeof(uint64_t))); (void)fMcBegin; } while (0) 566 #define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u64Dst) == (sizeof(uint64_t))); (void)fMcBegin; } while (0) 563 567 #define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(int64_t, a_i64Dst); (void)fMcBegin; } while (0) 564 568 … … 572 576 do { CHK_GCPTR(a_GCPtrMem); CHK_CONST(uint8_t, a_offDisp); CHK_TYPE(uint64_t, a_u64Dst); (void)fMcBegin; } while (0) 573 577 574 #define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)575 #define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)576 #define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)577 #define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)578 #define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)579 #define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)580 #define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)581 #define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)582 #define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)583 #define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)584 #define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)585 #define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)578 #define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u16Dst) == (sizeof(uint16_t))); (void)fMcBegin; } while (0) 579 #define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u32Dst) == (sizeof(uint32_t))); (void)fMcBegin; } while (0) 580 #define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u64Dst) == (sizeof(uint64_t))); (void)fMcBegin; } while (0) 581 #define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u32Dst) == (sizeof(uint32_t))); (void)fMcBegin; } while (0) 582 #define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u64Dst) == (sizeof(uint64_t))); (void)fMcBegin; } while (0) 583 #define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u64Dst) == (sizeof(uint64_t))); (void)fMcBegin; } while (0) 584 #define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u16Dst) == (sizeof(uint16_t))); (void)fMcBegin; } while (0) 585 #define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u32Dst) == (sizeof(uint32_t))); (void)fMcBegin; } while (0) 586 #define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u64Dst) == (sizeof(uint64_t))); (void)fMcBegin; } while (0) 587 #define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u32Dst) == (sizeof(uint32_t))); (void)fMcBegin; } while (0) 588 #define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u64Dst) == (sizeof(uint64_t))); (void)fMcBegin; } while (0) 589 #define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); AssertCompile(sizeof(a_u64Dst) == (sizeof(uint64_t))); (void)fMcBegin; } while (0) 586 590 #define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTFLOAT32U, a_r32Dst); (void)fMcBegin; } while (0) 587 591 #define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTFLOAT64U, a_r64Dst); (void)fMcBegin; } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.