Changeset 104419 in vbox for trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
- Timestamp:
- Apr 24, 2024 2:32:29 PM (10 months ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp
r104195 r104419 916 916 917 917 /** 918 * Implements an indirect call.919 *920 * @param uNewPC The new program counter (RIP) value (loaded from the921 * operand).922 */923 IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)924 {925 uint16_t const uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;926 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)927 {928 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);929 if (rcStrict == VINF_SUCCESS)930 {931 pVCpu->cpum.GstCtx.rip = uNewPC;932 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);933 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);934 }935 return rcStrict;936 }937 return iemRaiseGeneralProtectionFault0(pVCpu);938 }939 940 941 /**942 * Implements a 16-bit relative call.943 *944 * @param offDisp The displacment offset.945 */946 IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)947 {948 uint16_t const uOldPC = pVCpu->cpum.GstCtx.ip + cbInstr;949 uint16_t const uNewPC = uOldPC + offDisp;950 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)951 {952 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pVCpu, uOldPC);953 if (rcStrict == VINF_SUCCESS)954 {955 pVCpu->cpum.GstCtx.rip = uNewPC;956 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);957 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);958 }959 return rcStrict;960 }961 return iemRaiseGeneralProtectionFault0(pVCpu);962 }963 964 965 /**966 * Implements a 32-bit indirect call.967 *968 * @param uNewPC The new program counter (RIP) value (loaded from the969 * operand).970 */971 IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)972 {973 uint32_t const uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;974 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)975 {976 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);977 if (rcStrict == VINF_SUCCESS)978 {979 pVCpu->cpum.GstCtx.rip = uNewPC;980 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);981 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);982 }983 return rcStrict;984 }985 return iemRaiseGeneralProtectionFault0(pVCpu);986 }987 988 989 /**990 * Implements a 32-bit relative call.991 *992 * @param offDisp The displacment offset.993 */994 IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)995 {996 uint32_t const uOldPC = pVCpu->cpum.GstCtx.eip + cbInstr;997 uint32_t const uNewPC = uOldPC + offDisp;998 if (uNewPC <= pVCpu->cpum.GstCtx.cs.u32Limit)999 {1000 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pVCpu, uOldPC);1001 if (rcStrict == VINF_SUCCESS)1002 {1003 pVCpu->cpum.GstCtx.rip = uNewPC;1004 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);1005 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);1006 }1007 return rcStrict;1008 }1009 return iemRaiseGeneralProtectionFault0(pVCpu);1010 }1011 1012 1013 /**1014 * Implements a 64-bit indirect call.1015 *1016 * @param uNewPC The new program counter (RIP) value (loaded from the1017 * operand).1018 */1019 IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)1020 {1021 uint64_t const uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;1022 if (IEM_IS_CANONICAL(uNewPC))1023 {1024 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);1025 if (rcStrict == VINF_SUCCESS)1026 {1027 pVCpu->cpum.GstCtx.rip = uNewPC;1028 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);1029 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);1030 }1031 return rcStrict;1032 }1033 return iemRaiseGeneralProtectionFault0(pVCpu);1034 }1035 1036 1037 /**1038 * Implements a 64-bit relative call.1039 *1040 * @param offDisp The displacment offset.1041 */1042 IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)1043 {1044 uint64_t const uOldPC = pVCpu->cpum.GstCtx.rip + cbInstr;1045 uint64_t const uNewPC = uOldPC + offDisp;1046 if (IEM_IS_CANONICAL(uNewPC))1047 {1048 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pVCpu, uOldPC);1049 if (rcStrict == VINF_SUCCESS)1050 {1051 pVCpu->cpum.GstCtx.rip = uNewPC;1052 IEM_FLUSH_PREFETCH_LIGHT(pVCpu, cbInstr);1053 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);1054 }1055 return rcStrict;1056 }1057 return iemRaiseNotCanonical(pVCpu);1058 }1059 1060 1061 /**1062 918 * Implements far jumps and calls thru task segments (TSS). 1063 919 * … … 2735 2591 2736 2592 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS); 2737 }2738 2739 2740 /**2741 * Implements retn and retn imm16.2742 *2743 * We're doing this in C because of the \#GP that might be raised if the popped2744 * program counter is out of bounds.2745 *2746 * The hope with this forced inline worker function, is that the compiler will2747 * be clever enough to eliminate unused code for the constant enmEffOpSize and2748 * maybe cbPop parameters.2749 *2750 * @param pVCpu The cross context virtual CPU structure of the2751 * calling thread.2752 * @param cbInstr The current instruction length.2753 * @param enmEffOpSize The effective operand size. This is constant.2754 * @param cbPop The amount of arguments to pop from the stack2755 * (bytes). This can be constant (zero).2756 */2757 DECL_FORCE_INLINE(VBOXSTRICTRC) iemCImpl_ReturnNearCommon(PVMCPUCC pVCpu, uint8_t cbInstr, IEMMODE enmEffOpSize, uint16_t cbPop)2758 {2759 /* Fetch the RSP from the stack. */2760 VBOXSTRICTRC rcStrict;2761 RTUINT64U NewRip;2762 RTUINT64U NewRsp;2763 NewRsp.u = pVCpu->cpum.GstCtx.rsp;2764 2765 switch (enmEffOpSize)2766 {2767 case IEMMODE_16BIT:2768 NewRip.u = 0;2769 rcStrict = iemMemStackPopU16Ex(pVCpu, &NewRip.Words.w0, &NewRsp);2770 break;2771 case IEMMODE_32BIT:2772 NewRip.u = 0;2773 rcStrict = iemMemStackPopU32Ex(pVCpu, &NewRip.DWords.dw0, &NewRsp);2774 break;2775 case IEMMODE_64BIT:2776 rcStrict = iemMemStackPopU64Ex(pVCpu, &NewRip.u, &NewRsp);2777 break;2778 IEM_NOT_REACHED_DEFAULT_CASE_RET();2779 }2780 if (rcStrict != VINF_SUCCESS)2781 return rcStrict;2782 2783 /* Check the new RSP before loading it. */2784 /** @todo Should test this as the intel+amd pseudo code doesn't mention half2785 * of it. The canonical test is performed here and for call. */2786 if (enmEffOpSize != IEMMODE_64BIT)2787 {2788 if (RT_LIKELY(NewRip.DWords.dw0 <= pVCpu->cpum.GstCtx.cs.u32Limit))2789 { /* likely */ }2790 else2791 {2792 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pVCpu->cpum.GstCtx.cs.u32Limit));2793 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);2794 }2795 }2796 else2797 {2798 if (RT_LIKELY(IEM_IS_CANONICAL(NewRip.u)))2799 { /* likely */ }2800 else2801 {2802 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));2803 return iemRaiseNotCanonical(pVCpu);2804 }2805 }2806 2807 /* Apply cbPop */2808 if (cbPop)2809 iemRegAddToRspEx(pVCpu, &NewRsp, cbPop);2810 2811 /* Commit it. */2812 pVCpu->cpum.GstCtx.rip = NewRip.u;2813 pVCpu->cpum.GstCtx.rsp = NewRsp.u;2814 2815 /* Flush the prefetch buffer. */2816 IEM_FLUSH_PREFETCH_HEAVY(pVCpu, cbInstr); /** @todo only need a light flush here, don't we? We don't really need any flushing... */2817 RT_NOREF(cbInstr);2818 2819 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);2820 }2821 2822 2823 /**2824 * Implements retn imm16 with 16-bit effective operand size.2825 *2826 * @param cbPop The amount of arguments to pop from the stack (bytes).2827 */2828 IEM_CIMPL_DEF_1(iemCImpl_retn_iw_16, uint16_t, cbPop)2829 {2830 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_16BIT, cbPop);2831 }2832 2833 2834 /**2835 * Implements retn imm16 with 32-bit effective operand size.2836 *2837 * @param cbPop The amount of arguments to pop from the stack (bytes).2838 */2839 IEM_CIMPL_DEF_1(iemCImpl_retn_iw_32, uint16_t, cbPop)2840 {2841 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_32BIT, cbPop);2842 }2843 2844 2845 /**2846 * Implements retn imm16 with 64-bit effective operand size.2847 *2848 * @param cbPop The amount of arguments to pop from the stack (bytes).2849 */2850 IEM_CIMPL_DEF_1(iemCImpl_retn_iw_64, uint16_t, cbPop)2851 {2852 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_64BIT, cbPop);2853 }2854 2855 2856 /**2857 * Implements retn with 16-bit effective operand size.2858 */2859 IEM_CIMPL_DEF_0(iemCImpl_retn_16)2860 {2861 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_16BIT, 0);2862 }2863 2864 2865 /**2866 * Implements retn with 32-bit effective operand size.2867 */2868 IEM_CIMPL_DEF_0(iemCImpl_retn_32)2869 {2870 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_32BIT, 0);2871 }2872 2873 2874 /**2875 * Implements retn with 64-bit effective operand size.2876 */2877 IEM_CIMPL_DEF_0(iemCImpl_retn_64)2878 {2879 return iemCImpl_ReturnNearCommon(pVCpu, cbInstr, IEMMODE_64BIT, 0);2880 2593 } 2881 2594
Note:
See TracChangeset
for help on using the changeset viewer.