Changeset 36851 in vbox
- Timestamp:
- Apr 27, 2011 9:08:01 AM (14 years ago)
- svn:sync-xref-src-repo-rev:
- 71422
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r36849 r36851 172 172 173 173 /** 174 * Function table for a double precision shift operator providing implementation 175 * based on operand size. 176 */ 177 typedef struct IEMOPSHIFTDBLSIZES 178 { 179 PFNIEMAIMPLSHIFTDBLU16 pfnNormalU16; 180 PFNIEMAIMPLSHIFTDBLU32 pfnNormalU32; 181 PFNIEMAIMPLSHIFTDBLU64 pfnNormalU64; 182 } IEMOPSHIFTDBLSIZES; 183 /** Pointer to a double precision shift function table. */ 184 typedef IEMOPSHIFTDBLSIZES const *PCIEMOPSHIFTDBLSIZES; 185 186 187 /** 174 188 * Selector descriptor table entry as fetched by iemMemFetchSelDesc. 175 189 */ … … 503 517 iemAImpl_idiv_u32, 504 518 iemAImpl_idiv_u64 519 }; 520 521 /** Function table for the SHLD instruction */ 522 static const IEMOPSHIFTDBLSIZES g_iemAImpl_shld = 523 { 524 iemAImpl_shld_u16, 525 iemAImpl_shld_u32, 526 iemAImpl_shld_u64, 527 }; 528 529 /** Function table for the SHRD instruction */ 530 static const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd = 531 { 532 iemAImpl_shrd_u16, 533 iemAImpl_shrd_u32, 534 iemAImpl_shrd_u64, 505 535 }; 506 536 … … 3511 3541 3512 3542 /* 3513 * Get the selector table base and c heck bounds.3543 * Get the selector table base and calculate the entry address. 3514 3544 */ 3515 3545 RTGCPTR GCPtr = uSel & X86_SEL_LDT … … 3517 3547 : pCtx->gdtr.pGdt; 3518 3548 GCPtr += uSel & X86_SEL_MASK; 3519 GCPtr += 2 + 2; 3520 uint32_t volatile *pu32; /** @todo Does the CPU do a 32-bit or 8-bit access here? */ 3521 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_DATA_RW); 3522 if (rcStrict == VINF_SUCCESS) 3523 { 3524 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but is preceeded by u8BaseHigh1. */ 3525 3526 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_DATA_RW); 3527 } 3528 3529 return rcStrict; 3549 3550 /* 3551 * ASMAtomicBitSet will assert if the address is misaligned, so do some 3552 * ugly stuff to avoid this. This will make sure it's an atomic access 3553 * as well more or less remove any question about 8-bit or 32-bit accesss. 3554 */ 3555 VBOXSTRICTRC rcStrict; 3556 uint32_t volatile *pu32; 3557 if ((GCPtr & 3) == 0) 3558 { 3559 /* The normal case, map the 32-bit bits around the accessed bit (40). */ 3560 GCPtr += 2 + 2; 3561 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_DATA_RW); 3562 if (rcStrict != VINF_SUCCESS) 3563 return rcStrict; 3564 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */ 3565 } 3566 else 3567 { 3568 /* The misaligned GDT/LDT case, map the whole thing. */ 3569 rcStrict = iemMemMap(pIemCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_DATA_RW); 3570 if (rcStrict != VINF_SUCCESS) 3571 return rcStrict; 3572 switch (GCPtr & 3) 3573 { 3574 case 0: ASMAtomicBitSet(pu32, 40 ); break; 3575 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 - 24); break; 3576 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 - 16); break; 3577 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 - 8); break; 3578 } 3579 } 3580 3581 return iemMemCommitAndUnmap(pIemCpu, (void *)pu32, IEM_ACCESS_DATA_RW); 3530 3582 } 3531 3583 … … 3804 3856 #define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1)) 3805 3857 #define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2)) 3858 #define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3)) 3806 3859 #define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3)) 3807 3860 -
trunk/src/VBox/VMM/VMMAll/IEMAllAImpl.asm
r36840 r36851 630 630 631 631 ;; 632 ; Macro for implemeting a multiplication operations. 632 ; Macro for implementing a doulbe precision shift operation. 633 ; 634 ; This will generate code for the 16, 32 and 64 bit accesses, except on 635 ; 32-bit system where the 64-bit accesses requires hand coding. 636 ; 637 ; The functions takes the destination operand (r/m) in A0, the source (reg) in 638 ; A1, the shift count in A2 and a pointer to the eflags variable/register in A3. 639 ; 640 ; @param 1 The instruction mnemonic. 641 ; @param 2 The modified flags. 642 ; @param 3 The undefined flags. 643 ; 644 ; Makes ASSUMPTIONS about A0, A1, A2 and A3 assignments. 645 ; 646 %macro IEMIMPL_SHIFT_DBL_OP 3 647 BEGINPROC iemAImpl_ %+ %1 %+ _u16 648 PROLOGUE_4_ARGS 649 IEM_MAYBE_LOAD_FLAGS A3, %2, %3 650 %ifdef ASM_CALL64_GCC 651 xchg A3, A2 652 %1 [A0], A1_16, cl 653 xchg A3, A2 654 %else 655 xchg A0, A2 656 %1 [A2], A1_16, cl 657 %endif 658 IEM_SAVE_FLAGS A3, %2, %3 659 EPILOGUE_4_ARGS 660 ret 661 ENDPROC iemAImpl_ %+ %1 %+ _u16 662 663 BEGINPROC iemAImpl_ %+ %1 %+ _u32 664 PROLOGUE_4_ARGS 665 IEM_MAYBE_LOAD_FLAGS A3, %2, %3 666 %ifdef ASM_CALL64_GCC 667 xchg A3, A2 668 %1 [A0], A1_32, cl 669 xchg A3, A2 670 %else 671 xchg A0, A2 672 %1 [A2], A1_32, cl 673 %endif 674 IEM_SAVE_FLAGS A3, %2, %3 675 EPILOGUE_4_ARGS 676 ret 677 ENDPROC iemAImpl_ %+ %1 %+ _u32 678 679 %ifdef RT_ARCH_AMD64 680 BEGINPROC iemAImpl_ %+ %1 %+ _u64 681 PROLOGUE_4_ARGS 682 IEM_MAYBE_LOAD_FLAGS A3, %2, %3 683 %ifdef ASM_CALL64_GCC 684 xchg A3, A2 685 %1 [A0], A1, cl 686 xchg A3, A2 687 %else 688 xchg A0, A2 689 %1 [A2], A1, cl 690 %endif 691 IEM_SAVE_FLAGS A3, %2, %3 692 EPILOGUE_4_ARGS 693 ret 694 ENDPROC iemAImpl_ %+ %1 %+ _u64 695 %else ; stub it for now - later, replace with hand coded stuff. 696 BEGINPROC iemAImpl_ %+ %1 %+ _u64 697 int3 698 ret 699 ENDPROC iemAImpl_ %+ %1 %+ _u64 700 %endif ; !RT_ARCH_AMD64 701 702 %endmacro 703 704 IEMIMPL_SHIFT_DBL_OP shld, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), (X86_EFL_AF) 705 IEMIMPL_SHIFT_DBL_OP shrd, (X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF), (X86_EFL_AF) 706 707 708 ;; 709 ; Macro for implementing a multiplication operations. 633 710 ; 634 711 ; This will generate code for the 8, 16, 32 and 64 bit accesses, except on … … 735 812 736 813 ;; 737 ; Macro for impleme ting a division operations.814 ; Macro for implementing a division operations. 738 815 ; 739 816 ; This will generate code for the 8, 16, 32 and 64 bit accesses, except on -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r36849 r36851 2663 2663 /** Opcode 0x0f 0xa3. */ 2664 2664 FNIEMOP_STUB(iemOp_bt_Ev_Gv); 2665 2666 2667 /** 2668 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib. 2669 */ 2670 FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl) 2671 { 2672 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 2673 IEMOP_HLP_NO_LOCK_PREFIX(); 2674 2675 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2676 { 2677 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift); 2678 IEMOP_HLP_NO_LOCK_PREFIX(); 2679 2680 switch (pIemCpu->enmEffOpSize) 2681 { 2682 case IEMMODE_16BIT: 2683 IEM_MC_BEGIN(4, 0); 2684 IEM_MC_ARG(uint16_t *, pu16Dst, 0); 2685 IEM_MC_ARG(uint16_t, u16Src, 1); 2686 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2); 2687 IEM_MC_ARG(uint32_t *, pEFlags, 3); 2688 2689 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2690 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 2691 IEM_MC_REF_EFLAGS(pEFlags); 2692 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags); 2693 2694 IEM_MC_ADVANCE_RIP(); 2695 IEM_MC_END(); 2696 return VINF_SUCCESS; 2697 2698 case IEMMODE_32BIT: 2699 IEM_MC_BEGIN(4, 0); 2700 IEM_MC_ARG(uint32_t *, pu32Dst, 0); 2701 IEM_MC_ARG(uint32_t, u32Src, 1); 2702 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2); 2703 IEM_MC_ARG(uint32_t *, pEFlags, 3); 2704 2705 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2706 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 2707 IEM_MC_REF_EFLAGS(pEFlags); 2708 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags); 2709 2710 IEM_MC_ADVANCE_RIP(); 2711 IEM_MC_END(); 2712 return VINF_SUCCESS; 2713 2714 case IEMMODE_64BIT: 2715 IEM_MC_BEGIN(4, 0); 2716 IEM_MC_ARG(uint64_t *, pu64Dst, 0); 2717 IEM_MC_ARG(uint64_t, u64Src, 1); 2718 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2); 2719 IEM_MC_ARG(uint32_t *, pEFlags, 3); 2720 2721 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2722 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 2723 IEM_MC_REF_EFLAGS(pEFlags); 2724 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags); 2725 2726 IEM_MC_ADVANCE_RIP(); 2727 IEM_MC_END(); 2728 return VINF_SUCCESS; 2729 2730 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 2731 } 2732 } 2733 else 2734 { 2735 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 2736 2737 switch (pIemCpu->enmEffOpSize) 2738 { 2739 case IEMMODE_16BIT: 2740 IEM_MC_BEGIN(4, 2); 2741 IEM_MC_ARG(uint16_t *, pu16Dst, 0); 2742 IEM_MC_ARG(uint16_t, u16Src, 1); 2743 IEM_MC_ARG(uint8_t, cShiftArg, 2); 2744 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); 2745 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2746 2747 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2748 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift); 2749 IEM_MC_ASSIGN(cShiftArg, cShift); 2750 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2751 IEM_MC_FETCH_EFLAGS(EFlags); 2752 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); 2753 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags); 2754 2755 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW); 2756 IEM_MC_COMMIT_EFLAGS(EFlags); 2757 IEM_MC_ADVANCE_RIP(); 2758 IEM_MC_END(); 2759 return VINF_SUCCESS; 2760 2761 case IEMMODE_32BIT: 2762 IEM_MC_BEGIN(4, 2); 2763 IEM_MC_ARG(uint32_t *, pu32Dst, 0); 2764 IEM_MC_ARG(uint32_t, u32Src, 1); 2765 IEM_MC_ARG(uint8_t, cShiftArg, 2); 2766 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); 2767 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2768 2769 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2770 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift); 2771 IEM_MC_ASSIGN(cShiftArg, cShift); 2772 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2773 IEM_MC_FETCH_EFLAGS(EFlags); 2774 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); 2775 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags); 2776 2777 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW); 2778 IEM_MC_COMMIT_EFLAGS(EFlags); 2779 IEM_MC_ADVANCE_RIP(); 2780 IEM_MC_END(); 2781 return VINF_SUCCESS; 2782 2783 case IEMMODE_64BIT: 2784 IEM_MC_BEGIN(4, 2); 2785 IEM_MC_ARG(uint64_t *, pu64Dst, 0); 2786 IEM_MC_ARG(uint64_t, u64Src, 1); 2787 IEM_MC_ARG(uint8_t, cShiftArg, 2); 2788 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); 2789 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2790 2791 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2792 uint8_t cShift; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &cShift); 2793 IEM_MC_ASSIGN(cShiftArg, cShift); 2794 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2795 IEM_MC_FETCH_EFLAGS(EFlags); 2796 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); 2797 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags); 2798 2799 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW); 2800 IEM_MC_COMMIT_EFLAGS(EFlags); 2801 IEM_MC_ADVANCE_RIP(); 2802 IEM_MC_END(); 2803 return VINF_SUCCESS; 2804 2805 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 2806 } 2807 } 2808 } 2809 2810 2811 /** 2812 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL. 2813 */ 2814 FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl) 2815 { 2816 uint8_t bRm; IEM_OPCODE_GET_NEXT_BYTE(pIemCpu, &bRm); 2817 IEMOP_HLP_NO_LOCK_PREFIX(); 2818 2819 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2820 { 2821 IEMOP_HLP_NO_LOCK_PREFIX(); 2822 2823 switch (pIemCpu->enmEffOpSize) 2824 { 2825 case IEMMODE_16BIT: 2826 IEM_MC_BEGIN(4, 0); 2827 IEM_MC_ARG(uint16_t *, pu16Dst, 0); 2828 IEM_MC_ARG(uint16_t, u16Src, 1); 2829 IEM_MC_ARG(uint8_t, cShiftArg, 2); 2830 IEM_MC_ARG(uint32_t *, pEFlags, 3); 2831 2832 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2833 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 2834 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 2835 IEM_MC_REF_EFLAGS(pEFlags); 2836 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags); 2837 2838 IEM_MC_ADVANCE_RIP(); 2839 IEM_MC_END(); 2840 return VINF_SUCCESS; 2841 2842 case IEMMODE_32BIT: 2843 IEM_MC_BEGIN(4, 0); 2844 IEM_MC_ARG(uint32_t *, pu32Dst, 0); 2845 IEM_MC_ARG(uint32_t, u32Src, 1); 2846 IEM_MC_ARG(uint8_t, cShiftArg, 2); 2847 IEM_MC_ARG(uint32_t *, pEFlags, 3); 2848 2849 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2850 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 2851 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 2852 IEM_MC_REF_EFLAGS(pEFlags); 2853 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags); 2854 2855 IEM_MC_ADVANCE_RIP(); 2856 IEM_MC_END(); 2857 return VINF_SUCCESS; 2858 2859 case IEMMODE_64BIT: 2860 IEM_MC_BEGIN(4, 0); 2861 IEM_MC_ARG(uint64_t *, pu64Dst, 0); 2862 IEM_MC_ARG(uint64_t, u64Src, 1); 2863 IEM_MC_ARG(uint8_t, cShiftArg, 2); 2864 IEM_MC_ARG(uint32_t *, pEFlags, 3); 2865 2866 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2867 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 2868 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 2869 IEM_MC_REF_EFLAGS(pEFlags); 2870 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags); 2871 2872 IEM_MC_ADVANCE_RIP(); 2873 IEM_MC_END(); 2874 return VINF_SUCCESS; 2875 2876 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 2877 } 2878 } 2879 else 2880 { 2881 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */ 2882 2883 switch (pIemCpu->enmEffOpSize) 2884 { 2885 case IEMMODE_16BIT: 2886 IEM_MC_BEGIN(4, 2); 2887 IEM_MC_ARG(uint16_t *, pu16Dst, 0); 2888 IEM_MC_ARG(uint16_t, u16Src, 1); 2889 IEM_MC_ARG(uint8_t, cShiftArg, 2); 2890 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); 2891 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2892 2893 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2894 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2895 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 2896 IEM_MC_FETCH_EFLAGS(EFlags); 2897 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); 2898 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags); 2899 2900 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW); 2901 IEM_MC_COMMIT_EFLAGS(EFlags); 2902 IEM_MC_ADVANCE_RIP(); 2903 IEM_MC_END(); 2904 return VINF_SUCCESS; 2905 2906 case IEMMODE_32BIT: 2907 IEM_MC_BEGIN(4, 2); 2908 IEM_MC_ARG(uint32_t *, pu32Dst, 0); 2909 IEM_MC_ARG(uint32_t, u32Src, 1); 2910 IEM_MC_ARG(uint8_t, cShiftArg, 2); 2911 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); 2912 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2913 2914 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2915 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2916 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 2917 IEM_MC_FETCH_EFLAGS(EFlags); 2918 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); 2919 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags); 2920 2921 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW); 2922 IEM_MC_COMMIT_EFLAGS(EFlags); 2923 IEM_MC_ADVANCE_RIP(); 2924 IEM_MC_END(); 2925 return VINF_SUCCESS; 2926 2927 case IEMMODE_64BIT: 2928 IEM_MC_BEGIN(4, 2); 2929 IEM_MC_ARG(uint64_t *, pu64Dst, 0); 2930 IEM_MC_ARG(uint64_t, u64Src, 1); 2931 IEM_MC_ARG(uint8_t, cShiftArg, 2); 2932 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3); 2933 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 2934 2935 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm); 2936 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 2937 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX); 2938 IEM_MC_FETCH_EFLAGS(EFlags); 2939 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0); 2940 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags); 2941 2942 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW); 2943 IEM_MC_COMMIT_EFLAGS(EFlags); 2944 IEM_MC_ADVANCE_RIP(); 2945 IEM_MC_END(); 2946 return VINF_SUCCESS; 2947 2948 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 2949 } 2950 } 2951 } 2952 2953 2954 2665 2955 /** Opcode 0x0f 0xa4. */ 2666 FNIEMOP_STUB(iemOp_shld_Ev_Gv_Ib); 2956 FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib) 2957 { 2958 IEMOP_MNEMONIC("shld Ev,Gv,Ib"); 2959 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld); 2960 } 2961 2962 2667 2963 /** Opcode 0x0f 0xa7. */ 2668 FNIEMOP_STUB(iemOp_shld_Ev_Gv_CL); 2964 FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL) 2965 { 2966 IEMOP_MNEMONIC("shld Ev,Gv,CL"); 2967 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld); 2968 } 2669 2969 2670 2970 … … 2691 2991 /** Opcode 0x0f 0xab. */ 2692 2992 FNIEMOP_STUB(iemOp_bts_Ev_Gv); 2993 2994 2693 2995 /** Opcode 0x0f 0xac. */ 2694 FNIEMOP_STUB(iemOp_shrd_Ev_Gv_Ib); 2996 FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib) 2997 { 2998 IEMOP_MNEMONIC("shrd Ev,Gv,Ib"); 2999 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd); 3000 } 3001 3002 2695 3003 /** Opcode 0x0f 0xad. */ 2696 FNIEMOP_STUB(iemOp_shrd_Ev_Gv_CL); 3004 FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL) 3005 { 3006 IEMOP_MNEMONIC("shrd Ev,Gv,CL"); 3007 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd); 3008 } 3009 3010 2697 3011 /** Opcode 0x0f 0xae. */ 2698 3012 FNIEMOP_STUB(iemOp_Grp15); … … 2723 3037 /* The source cannot be a register. */ 2724 3038 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2725 return IEMOP_RAISE_INVALID_ LOCK_PREFIX();3039 return IEMOP_RAISE_INVALID_OPCODE(); 2726 3040 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & bRm & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg; 2727 3041 -
trunk/src/VBox/VMM/include/IEMInternal.h
r36849 r36851 466 466 /** @} */ 467 467 468 /** @name Double precision shifts 469 * @{ */ 470 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU16,(uint16_t *pu16Dst, uint16_t u16Src, uint8_t cShift, uint32_t *pEFlags)); 471 typedef FNIEMAIMPLSHIFTDBLU16 *PFNIEMAIMPLSHIFTDBLU16; 472 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU32,(uint32_t *pu32Dst, uint32_t u32Src, uint8_t cShift, uint32_t *pEFlags)); 473 typedef FNIEMAIMPLSHIFTDBLU32 *PFNIEMAIMPLSHIFTDBLU32; 474 typedef IEM_DECL_IMPL_TYPE(void, FNIEMAIMPLSHIFTDBLU64,(uint64_t *pu64Dst, uint64_t u64Src, uint8_t cShift, uint32_t *pEFlags)); 475 typedef FNIEMAIMPLSHIFTDBLU64 *PFNIEMAIMPLSHIFTDBLU64; 476 FNIEMAIMPLSHIFTDBLU16 iemAImpl_shld_u16; 477 FNIEMAIMPLSHIFTDBLU32 iemAImpl_shld_u32; 478 FNIEMAIMPLSHIFTDBLU64 iemAImpl_shld_u64; 479 FNIEMAIMPLSHIFTDBLU16 iemAImpl_shrd_u16; 480 FNIEMAIMPLSHIFTDBLU32 iemAImpl_shrd_u32; 481 FNIEMAIMPLSHIFTDBLU64 iemAImpl_shrd_u64; 482 /** @} */ 483 468 484 /** @name Signed multiplication operations (thrown in with the binary ops). 469 485 * @{ */
Note:
See TracChangeset
for help on using the changeset viewer.