Changeset 65610 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Feb 3, 2017 8:50:03 PM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r65609 r65610 2606 2606 FNIEMOP_STUB(iemOp_vmaxsd_Vsd_Hsd_Wsd); 2607 2607 2608 2609 2608 /** 2610 * Common worker for SSE2 and MMX instructions on the forms: 2611 * pxxxx xmm1, xmm2/mem128 2609 * Common worker for MMX instructions on the forms: 2612 2610 * pxxxx mm1, mm2/mem32 2613 2611 * … … 2618 2616 * Exceptions type 4. 2619 2617 */ 2620 FNIEMOP_DEF_1(iemOpCommonMmx Sse_LowLow_To_Full, PCIEMOPMEDIAF1L1, pImpl)2618 FNIEMOP_DEF_1(iemOpCommonMmx_LowLow_To_Full, PCIEMOPMEDIAF1L1, pImpl) 2621 2619 { 2622 2620 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2623 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)) 2624 { 2625 case IEM_OP_PRF_SIZE_OP: /* SSE */ 2626 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2627 { 2628 /* 2629 * Register, register. 2630 */ 2631 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2632 IEM_MC_BEGIN(2, 0); 2633 IEM_MC_ARG(uint128_t *, pDst, 0); 2634 IEM_MC_ARG(uint64_t const *, pSrc, 1); 2635 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2636 IEM_MC_PREPARE_SSE_USAGE(); 2637 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2638 IEM_MC_REF_XREG_U64_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2639 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2640 IEM_MC_ADVANCE_RIP(); 2641 IEM_MC_END(); 2642 } 2643 else 2644 { 2645 /* 2646 * Register, memory. 2647 */ 2648 IEM_MC_BEGIN(2, 2); 2649 IEM_MC_ARG(uint128_t *, pDst, 0); 2650 IEM_MC_LOCAL(uint64_t, uSrc); 2651 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); 2652 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2653 2654 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2655 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2656 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2657 IEM_MC_FETCH_MEM_U64_ALIGN_U128(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2658 2659 IEM_MC_PREPARE_SSE_USAGE(); 2660 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2661 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2662 2663 IEM_MC_ADVANCE_RIP(); 2664 IEM_MC_END(); 2665 } 2666 return VINF_SUCCESS; 2667 2668 case 0: /* MMX */ 2669 if (!pImpl->pfnU64) 2670 return IEMOP_RAISE_INVALID_OPCODE(); 2671 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2672 { 2673 /* 2674 * Register, register. 2675 */ 2676 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ 2677 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ 2678 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2679 IEM_MC_BEGIN(2, 0); 2680 IEM_MC_ARG(uint64_t *, pDst, 0); 2681 IEM_MC_ARG(uint32_t const *, pSrc, 1); 2682 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2683 IEM_MC_PREPARE_FPU_USAGE(); 2684 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2685 IEM_MC_REF_MREG_U32_CONST(pSrc, bRm & X86_MODRM_RM_MASK); 2686 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 2687 IEM_MC_ADVANCE_RIP(); 2688 IEM_MC_END(); 2689 } 2690 else 2691 { 2692 /* 2693 * Register, memory. 2694 */ 2695 IEM_MC_BEGIN(2, 2); 2696 IEM_MC_ARG(uint64_t *, pDst, 0); 2697 IEM_MC_LOCAL(uint32_t, uSrc); 2698 IEM_MC_ARG_LOCAL_REF(uint32_t const *, pSrc, uSrc, 1); 2699 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2700 2701 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2702 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2703 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2704 IEM_MC_FETCH_MEM_U32(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2705 2706 IEM_MC_PREPARE_FPU_USAGE(); 2707 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2708 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 2709 2710 IEM_MC_ADVANCE_RIP(); 2711 IEM_MC_END(); 2712 } 2713 return VINF_SUCCESS; 2714 2715 default: 2716 return IEMOP_RAISE_INVALID_OPCODE(); 2717 } 2718 } 2719 2720 2721 /** Opcode 0x0f 0x60. */ 2722 FNIEMOP_DEF(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq) 2723 { 2724 IEMOP_MNEMONIC(punpcklbw, "punpcklbw"); 2725 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklbw); 2726 } 2727 2728 2729 /** Opcode 0x0f 0x61. */ 2730 FNIEMOP_DEF(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq) 2731 { 2732 IEMOP_MNEMONIC(punpcklwd, "punpcklwd"); /** @todo AMD mark the MMX version as 3DNow!. Intel says MMX CPUID req. */ 2733 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklwd); 2734 } 2735 2736 2737 /** Opcode 0x0f 0x62. */ 2738 FNIEMOP_DEF(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq) 2739 { 2740 IEMOP_MNEMONIC(punpckldq, "punpckldq"); 2741 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpckldq); 2742 } 2621 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2622 { 2623 /* 2624 * Register, register. 2625 */ 2626 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2627 IEM_MC_BEGIN(2, 0); 2628 IEM_MC_ARG(uint128_t *, pDst, 0); 2629 IEM_MC_ARG(uint64_t const *, pSrc, 1); 2630 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2631 IEM_MC_PREPARE_SSE_USAGE(); 2632 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2633 IEM_MC_REF_XREG_U64_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB); 2634 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2635 IEM_MC_ADVANCE_RIP(); 2636 IEM_MC_END(); 2637 } 2638 else 2639 { 2640 /* 2641 * Register, memory. 2642 */ 2643 IEM_MC_BEGIN(2, 2); 2644 IEM_MC_ARG(uint128_t *, pDst, 0); 2645 IEM_MC_LOCAL(uint64_t, uSrc); 2646 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1); 2647 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2648 2649 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2650 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2651 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); 2652 IEM_MC_FETCH_MEM_U64_ALIGN_U128(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2653 2654 IEM_MC_PREPARE_SSE_USAGE(); 2655 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg); 2656 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc); 2657 2658 IEM_MC_ADVANCE_RIP(); 2659 IEM_MC_END(); 2660 } 2661 return VINF_SUCCESS; 2662 } 2663 2664 2665 /** 2666 * Common worker for SSE2 instructions on the forms: 2667 * pxxxx xmm1, xmm2/mem128 2668 * 2669 * The 2nd operand is the first half of a register, which in the memory case 2670 * means a 32-bit memory access for MMX and 128-bit aligned 64-bit or 128-bit 2671 * memory accessed for MMX. 2672 * 2673 * Exceptions type 4. 2674 */ 2675 FNIEMOP_DEF_1(iemOpCommonSse_LowLow_To_Full, PCIEMOPMEDIAF1L1, pImpl) 2676 { 2677 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 2678 if (!pImpl->pfnU64) 2679 return IEMOP_RAISE_INVALID_OPCODE(); 2680 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 2681 { 2682 /* 2683 * Register, register. 2684 */ 2685 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */ 2686 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */ 2687 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2688 IEM_MC_BEGIN(2, 0); 2689 IEM_MC_ARG(uint64_t *, pDst, 0); 2690 IEM_MC_ARG(uint32_t const *, pSrc, 1); 2691 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2692 IEM_MC_PREPARE_FPU_USAGE(); 2693 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2694 IEM_MC_REF_MREG_U32_CONST(pSrc, bRm & X86_MODRM_RM_MASK); 2695 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 2696 IEM_MC_ADVANCE_RIP(); 2697 IEM_MC_END(); 2698 } 2699 else 2700 { 2701 /* 2702 * Register, memory. 2703 */ 2704 IEM_MC_BEGIN(2, 2); 2705 IEM_MC_ARG(uint64_t *, pDst, 0); 2706 IEM_MC_LOCAL(uint32_t, uSrc); 2707 IEM_MC_ARG_LOCAL_REF(uint32_t const *, pSrc, uSrc, 1); 2708 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 2709 2710 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 2711 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 2712 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT(); 2713 IEM_MC_FETCH_MEM_U32(uSrc, pVCpu->iem.s.iEffSeg, GCPtrEffSrc); 2714 2715 IEM_MC_PREPARE_FPU_USAGE(); 2716 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 2717 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc); 2718 2719 IEM_MC_ADVANCE_RIP(); 2720 IEM_MC_END(); 2721 } 2722 return VINF_SUCCESS; 2723 } 2724 2725 2726 /** Opcode 0x0f 0x60 - punpcklbw Pq, Qd */ 2727 FNIEMOP_DEF(iemOp_punpcklbw_Pq_Qd) 2728 { 2729 IEMOP_MNEMONIC(punpcklbw, "punpcklbw Pq, Qd"); 2730 return FNIEMOP_CALL_1(iemOpCommonMmx_LowLow_To_Full, &g_iemAImpl_punpcklbw); 2731 } 2732 2733 /** Opcode 0x66 0x0f 0x60 - vpunpcklbw Vx, Hx, W */ 2734 FNIEMOP_DEF(iemOp_vpunpcklbw_Vx_Hx_Wx) 2735 { 2736 IEMOP_MNEMONIC(vpunpcklbw, "vpunpcklbw Vx, Hx, Wx"); 2737 return FNIEMOP_CALL_1(iemOpCommonMmx_LowLow_To_Full, &g_iemAImpl_punpcklbw); 2738 } 2739 2740 /* Opcode 0xf3 0x0f 0x60 - invalid */ 2741 2742 2743 /** Opcode 0x0f 0x61 - punpcklwd Pq, Qd */ 2744 FNIEMOP_DEF(iemOp_punpcklwd_Pq_Qd) 2745 { 2746 IEMOP_MNEMONIC(punpcklwd, "punpcklwd Pq, Qd"); /** @todo AMD mark the MMX version as 3DNow!. Intel says MMX CPUID req. */ 2747 return FNIEMOP_CALL_1(iemOpCommonMmx_LowLow_To_Full, &g_iemAImpl_punpcklwd); 2748 } 2749 2750 /** Opcode 0x66 0x0f 0x61 - vpunpcklwd Vx, Hx, Wx */ 2751 FNIEMOP_DEF(iemOp_vpunpcklwd_Vx_Hx_Wx) 2752 { 2753 IEMOP_MNEMONIC(vpunpcklwd, "vpunpcklwd Vx, Hx, Wx"); 2754 return FNIEMOP_CALL_1(iemOpCommonSse_LowLow_To_Full, &g_iemAImpl_punpcklwd); 2755 } 2756 2757 /* Opcode 0xf3 0x0f 0x61 - invalid */ 2758 2759 2760 /** Opcode 0x0f 0x62 - punpckldq Pq, Qd */ 2761 FNIEMOP_DEF(iemOp_punpckldq_Pq_Qd) 2762 { 2763 IEMOP_MNEMONIC(punpckldq, "punpckldq Pq, Qd"); 2764 return FNIEMOP_CALL_1(iemOpCommonMmx_LowLow_To_Full, &g_iemAImpl_punpckldq); 2765 } 2766 2767 /** Opcode 0x66 0x0f 0x62 - vpunpckldq Vx, Hx, Wx */ 2768 FNIEMOP_DEF(iemOp_vpunpckldq_Vx_Hx_Wx) 2769 { 2770 IEMOP_MNEMONIC(vpunpckldq, "vpunpckldq Vx, Hx, Wx"); 2771 return FNIEMOP_CALL_1(iemOpCommonSse_LowLow_To_Full, &g_iemAImpl_punpckldq); 2772 } 2773 2774 /* Opcode 0xf3 0x0f 0x62 - invalid */ 2775 2743 2776 2744 2777 … … 2913 2946 2914 2947 2915 /** Opcode 0x0f 0x6c. */ 2916 FNIEMOP_DEF(iemOp_punpcklqdq_Vdq_Wdq) 2917 { 2918 IEMOP_MNEMONIC(punpcklqdq, "punpcklqdq"); 2919 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklqdq); 2920 } 2948 /* Opcode 0x0f 0x6c - invalid */ 2949 2950 /** Opcode 0x66 0x0f 0x6c - vpunpcklqdq Vx, Hx, Wx */ 2951 FNIEMOP_DEF(iemOp_vpunpcklqdq_Vx_Hx_Wx) 2952 { 2953 IEMOP_MNEMONIC(vpunpcklqdq, "vpunpcklqdq Vx, Hx, Wx"); 2954 return FNIEMOP_CALL_1(iemOpCommonSse_LowLow_To_Full, &g_iemAImpl_punpcklqdq); 2955 } 2956 2957 /* Opcode 0xf3 0x0f 0x6c - invalid */ 2958 /* Opcode 0xf2 0x0f 0x6c - invalid */ 2921 2959 2922 2960 … … 8021 8059 /* 0x5f */ iemOp_vmaxps_Vps_Hps_Wps, iemOp_vmaxpd_Vpd_Hpd_Wpd, iemOp_vmaxss_Vss_Hss_Wss, iemOp_vmaxsd_Vsd_Hsd_Wsd, 8022 8060 8023 /* 0x60 */ IEMOP_X4(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq),8024 /* 0x61 */ IEMOP_X4(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq),8025 /* 0x62 */ IEMOP_X4(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq),8061 /* 0x60 */ iemOp_punpcklbw_Pq_Qd, iemOp_vpunpcklbw_Vx_Hx_Wx, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8062 /* 0x61 */ iemOp_punpcklwd_Pq_Qd, iemOp_vpunpcklwd_Vx_Hx_Wx, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8063 /* 0x62 */ iemOp_punpckldq_Pq_Qd, iemOp_vpunpckldq_Vx_Hx_Wx, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8026 8064 /* 0x63 */ iemOp_packsswb_Pq_Qq, iemOp_vpacksswb_Vx_Hx_Wx, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8027 8065 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq, iemOp_vpcmpgtb_Vx_Hx_Wx, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, … … 8033 8071 /* 0x6a */ IEMOP_X4(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq), 8034 8072 /* 0x6b */ IEMOP_X4(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq), 8035 /* 0x6c */ IEMOP_X4(iemOp_punpcklqdq_Vdq_Wdq),8073 /* 0x6c */ iemOp_InvalidNeedRM, iemOp_vpunpcklqdq_Vx_Hx_Wx, iemOp_InvalidNeedRM, iemOp_InvalidNeedRM, 8036 8074 /* 0x6d */ IEMOP_X4(iemOp_punpckhqdq_Vdq_Wdq), 8037 8075 /* 0x6e */ IEMOP_X4(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey),
Note:
See TracChangeset
for help on using the changeset viewer.