Changeset 102585 in vbox
- Timestamp:
- Dec 12, 2023 12:26:29 PM (14 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r102435 r102585 4692 4692 return iemRaiseGeneralProtectionFault0(pVCpu); 4693 4693 } 4694 4695 #ifndef IEM_WITH_CODE_TLB4696 /* Flush the prefetch buffer. */4697 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);4698 #endif4699 4700 /*4701 * Clear RF and finish the instruction (maybe raise #DB).4702 */4703 return iemRegFinishClearingRF(pVCpu);4704 }4705 4706 4707 /**4708 * Performs a near jump to the specified address.4709 *4710 * May raise a \#GP(0) if the new IP outside the code segment limit.4711 *4712 * @param pVCpu The cross context virtual CPU structure of the calling thread.4713 * @param uNewIp The new IP value.4714 */4715 VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT4716 {4717 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit4718 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */))4719 pVCpu->cpum.GstCtx.rip = uNewIp;4720 else4721 return iemRaiseGeneralProtectionFault0(pVCpu);4722 /** @todo Test 16-bit jump in 64-bit mode. */4723 4724 #ifndef IEM_WITH_CODE_TLB4725 /* Flush the prefetch buffer. */4726 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);4727 #endif4728 4729 /*4730 * Clear RF and finish the instruction (maybe raise #DB).4731 */4732 return iemRegFinishClearingRF(pVCpu);4733 }4734 4735 4736 /**4737 * Performs a near jump to the specified address.4738 *4739 * May raise a \#GP(0) if the new RIP is outside the code segment limit.4740 *4741 * @param pVCpu The cross context virtual CPU structure of the calling thread.4742 * @param uNewEip The new EIP value.4743 */4744 VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT4745 {4746 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);4747 Assert(!IEM_IS_64BIT_CODE(pVCpu));4748 4749 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))4750 pVCpu->cpum.GstCtx.rip = uNewEip;4751 else4752 return iemRaiseGeneralProtectionFault0(pVCpu);4753 4754 #ifndef IEM_WITH_CODE_TLB4755 /* Flush the prefetch buffer. */4756 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);4757 #endif4758 4759 /*4760 * Clear RF and finish the instruction (maybe raise #DB).4761 */4762 return iemRegFinishClearingRF(pVCpu);4763 }4764 4765 4766 /**4767 * Performs a near jump to the specified address.4768 *4769 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code4770 * segment limit.4771 *4772 * @param pVCpu The cross context virtual CPU structure of the calling thread.4773 * @param uNewRip The new RIP value.4774 */4775 VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT4776 {4777 Assert(IEM_IS_64BIT_CODE(pVCpu));4778 4779 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))4780 pVCpu->cpum.GstCtx.rip = uNewRip;4781 else4782 return iemRaiseGeneralProtectionFault0(pVCpu);4783 4694 4784 4695 #ifndef IEM_WITH_CODE_TLB -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r102582 r102585 59 59 'IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32_WITH_FLAGS': (None, True, True, ), 60 60 'IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC64_WITH_FLAGS': (None, True, True, ), 61 62 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16': (None, False, True, ),63 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32': (None, False, True, ),64 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32': (None, False, True, ),65 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS': (None, False, True, ),66 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64': (None, False, True, ),67 68 'IEM_MC_CALL_CIMPL_1_THREADED': (None, True, True, ),69 'IEM_MC_CALL_CIMPL_2_THREADED': (None, True, True, ),70 'IEM_MC_CALL_CIMPL_3_THREADED': (None, True, True, ),71 'IEM_MC_CALL_CIMPL_4_THREADED': (None, True, True, ),72 'IEM_MC_CALL_CIMPL_5_THREADED': (None, True, True, ),73 61 74 62 'IEM_MC_REL_JMP_S8_AND_FINISH_THREADED_PC16': (None, True, True, ), … … 89 77 'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC32_WITH_FLAGS': (None, True, True, ), 90 78 'IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS': (None, True, True, ), 79 80 'IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16': (None, True, False, ), 81 'IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32': (None, True, False, ), 82 'IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64': (None, True, False, ), 83 'IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC16': (None, True, False, ), 84 'IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32': (None, True, False, ), 85 'IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64': (None, True, False, ), 86 'IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC32': (None, True, False, ), 87 'IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64': (None, True, False, ), 88 89 'IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS': (None, True, False, ), 90 'IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS': (None, True, False, ), 91 'IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS': (None, True, False, ), 92 'IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC16_WITH_FLAGS': (None, True, False, ), 93 'IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS': (None, True, False, ), 94 'IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS': (None, True, False, ), 95 'IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC32_WITH_FLAGS': (None, True, False, ), 96 'IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS': (None, True, False, ), 97 98 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_16': (None, False, True, ), 99 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_32': (None, False, True, ), 100 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_ADDR32': (None, False, True, ), 101 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64_FSGS': (None, False, True, ), 102 'IEM_MC_CALC_RM_EFF_ADDR_THREADED_64': (None, False, True, ), 103 104 'IEM_MC_CALL_CIMPL_1_THREADED': (None, True, True, ), 105 'IEM_MC_CALL_CIMPL_2_THREADED': (None, True, True, ), 106 'IEM_MC_CALL_CIMPL_3_THREADED': (None, True, True, ), 107 'IEM_MC_CALL_CIMPL_4_THREADED': (None, True, True, ), 108 'IEM_MC_CALL_CIMPL_5_THREADED': (None, True, True, ), 91 109 92 110 'IEM_MC_STORE_GREG_U8_THREADED': (None, True, True, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102584 r102585 5460 5460 return off; 5461 5461 } 5462 5463 5464 5465 /********************************************************************************************************************************* 5466 * Emitters for changing PC/RIP/EIP/IP with a indirect jump (IEM_MC_SET_RIP_UXX_AND_FINISH). * 5467 *********************************************************************************************************************************/ 5468 5469 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */ 5470 #define IEM_MC_SET_RIP_U16_AND_FINISH(a_u16NewIP) return iemRegRipJumpU16AndFinishClearningRF((pVCpu), (a_u16NewIP)) 5471 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */ 5472 #define IEM_MC_SET_RIP_U32_AND_FINISH(a_u32NewIP) return iemRegRipJumpU32AndFinishClearningRF((pVCpu), (a_u32NewIP)) 5473 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */ 5474 #define IEM_MC_SET_RIP_U64_AND_FINISH(a_u64NewIP) return iemRegRipJumpU64AndFinishClearningRF((pVCpu), (a_u64NewIP)) 5462 5475 5463 5476 -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncs.cpp
r102510 r102585 218 218 219 219 /** Variant of IEM_MC_REL_JMP_S32_AND_FINISH with instruction length as 220 * an extra parameter, for use in 64-bit code and we need to check and clear220 * an extra parameter, for use in 64-bit code and we need to check and clear 221 221 * flags. */ 222 222 #define IEM_MC_REL_JMP_S32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_i32, a_cbInstr) \ … … 224 224 225 225 #undef IEM_MC_REL_JMP_S32_AND_FINISH 226 227 228 229 /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for pre-386 targets. */ 230 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16(a_u16NewIP) \ 231 return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP)) 232 233 /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for 386+ targets. */ 234 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32(a_u16NewIP) \ 235 return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP)) 236 237 /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for use in 64-bit code. */ 238 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64(a_u16NewIP) \ 239 return iemRegRipJumpU16AndFinishNoFlags((pVCpu), (a_u16NewIP)) 240 241 /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for pre-386 targets that checks and 242 * clears flags. */ 243 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC16_WITH_FLAGS(a_u16NewIP) \ 244 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP)) 245 246 /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for 386+ targets that checks and 247 * clears flags. */ 248 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u16NewIP) \ 249 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP)) 250 251 /** Variant of IEM_MC_SET_RIP_U16_AND_FINISH for use in 64-bit code that checks and 252 * clears flags. */ 253 #define IEM_MC_SET_RIP_U16_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u16NewIP) \ 254 return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP)) 255 256 #undef IEM_MC_SET_RIP_U16_AND_FINISH 257 258 259 /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for 386+ targets. */ 260 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32(a_u32NewEIP) \ 261 return iemRegRipJumpU32AndFinishNoFlags((pVCpu), (a_u32NewEIP)) 262 263 /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for use in 64-bit code. */ 264 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64(a_u32NewEIP) \ 265 return iemRegRipJumpU32AndFinishNoFlags((pVCpu), (a_u32NewEIP)) 266 267 /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for 386+ targets that checks and 268 * clears flags. */ 269 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC32_WITH_FLAGS(a_u32NewEIP) \ 270 return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewEIP)) 271 272 /** Variant of IEM_MC_SET_RIP_U32_AND_FINISH for use in 64-bit code that checks 273 * and clears flags. */ 274 #define IEM_MC_SET_RIP_U32_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) \ 275 return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewEIP)) 276 277 #undef IEM_MC_SET_RIP_U32_AND_FINISH 278 279 280 /** Variant of IEM_MC_SET_RIP_U64_AND_FINISH for use in 64-bit code. */ 281 #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64(a_u32NewEIP) \ 282 return iemRegRipJumpU64AndFinishNoFlags((pVCpu), (a_u32NewEIP)) 283 284 /** Variant of IEM_MC_SET_RIP_U64_AND_FINISH for use in 64-bit code that checks 285 * and clears flags. */ 286 #define IEM_MC_SET_RIP_U64_AND_FINISH_THREADED_PC64_WITH_FLAGS(a_u32NewEIP) \ 287 return iemRegRipJumpU64AndFinishClearingRF((pVCpu), (a_u32NewEIP)) 288 289 #undef IEM_MC_SET_RIP_U64_AND_FINISH 226 290 227 291 -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py
r102572 r102585 830 830 ]; 831 831 # ... and IEM_MC_ADVANCE_RIP_AND_FINISH into *_THREADED_PCxx[_WITH_FLAGS] ... 832 elif oNewStmt.sName in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 'IEM_MC_REL_JMP_S8_AND_FINISH', 833 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH'): 834 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName); 832 elif ( oNewStmt.sName 833 in ('IEM_MC_ADVANCE_RIP_AND_FINISH', 834 'IEM_MC_REL_JMP_S8_AND_FINISH', 'IEM_MC_REL_JMP_S16_AND_FINISH', 'IEM_MC_REL_JMP_S32_AND_FINISH', 835 'IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 'IEM_MC_SET_RIP_U64_AND_FINISH', )): 836 if oNewStmt.sName not in ('IEM_MC_SET_RIP_U16_AND_FINISH', 'IEM_MC_SET_RIP_U32_AND_FINISH', 837 'IEM_MC_SET_RIP_U64_AND_FINISH', ): 838 oNewStmt.asParams.append(self.dParamRefs['cbInstr'][0].sNewName); 835 839 if ( oNewStmt.sName in ('IEM_MC_REL_JMP_S8_AND_FINISH', ) 836 840 and self.sVariation not in (self.ksVariation_16_Pre386, self.ksVariation_16f_Pre386,)): … … 1479 1483 'IEM_MC_REL_JMP_S16_AND_FINISH': True, 1480 1484 'IEM_MC_REL_JMP_S32_AND_FINISH': True, 1485 'IEM_MC_SET_RIP_U16_AND_FINISH': True, 1486 'IEM_MC_SET_RIP_U32_AND_FINISH': True, 1487 'IEM_MC_SET_RIP_U64_AND_FINISH': True, 1481 1488 }): 1482 1489 asVariations = [sVariation for sVariation in asVariations -
trunk/src/VBox/VMM/include/IEMInline.h
r102430 r102585 2773 2773 2774 2774 2775 2776 /** 2777 * Performs a near jump to the specified address, no checking or clearing of 2778 * flags 2779 * 2780 * May raise a \#GP(0) if the new IP outside the code segment limit. 2781 * 2782 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2783 * @param uNewIp The new IP value. 2784 */ 2785 DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU16AndFinishNoFlags(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT 2786 { 2787 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit 2788 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */)) 2789 pVCpu->cpum.GstCtx.rip = uNewIp; 2790 else 2791 return iemRaiseGeneralProtectionFault0(pVCpu); 2792 #ifndef IEM_WITH_CODE_TLB 2793 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 2794 #endif 2795 return iemRegFinishNoFlags(pVCpu); 2796 } 2797 2798 2799 /** 2800 * Performs a near jump to the specified address, no checking or clearing of 2801 * flags 2802 * 2803 * May raise a \#GP(0) if the new RIP is outside the code segment limit. 2804 * 2805 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2806 * @param uNewEip The new EIP value. 2807 */ 2808 DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU32AndFinishNoFlags(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT 2809 { 2810 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); 2811 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2812 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit)) 2813 pVCpu->cpum.GstCtx.rip = uNewEip; 2814 else 2815 return iemRaiseGeneralProtectionFault0(pVCpu); 2816 #ifndef IEM_WITH_CODE_TLB 2817 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 2818 #endif 2819 return iemRegFinishNoFlags(pVCpu); 2820 } 2821 2822 2823 /** 2824 * Performs a near jump to the specified address, no checking or clearing of 2825 * flags. 2826 * 2827 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2828 * segment limit. 2829 * 2830 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2831 * @param uNewRip The new RIP value. 2832 */ 2833 DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU64AndFinishNoFlags(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT 2834 { 2835 Assert(IEM_IS_64BIT_CODE(pVCpu)); 2836 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip))) 2837 pVCpu->cpum.GstCtx.rip = uNewRip; 2838 else 2839 return iemRaiseGeneralProtectionFault0(pVCpu); 2840 #ifndef IEM_WITH_CODE_TLB 2841 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 2842 #endif 2843 return iemRegFinishNoFlags(pVCpu); 2844 } 2845 2846 2847 /** 2848 * Performs a near jump to the specified address. 2849 * 2850 * May raise a \#GP(0) if the new IP outside the code segment limit. 2851 * 2852 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2853 * @param uNewIp The new IP value. 2854 */ 2855 DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU16AndFinishClearingRF(PVMCPUCC pVCpu, uint16_t uNewIp) RT_NOEXCEPT 2856 { 2857 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit 2858 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checks in 64-bit mode */)) 2859 pVCpu->cpum.GstCtx.rip = uNewIp; 2860 else 2861 return iemRaiseGeneralProtectionFault0(pVCpu); 2862 #ifndef IEM_WITH_CODE_TLB 2863 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 2864 #endif 2865 return iemRegFinishClearingRF(pVCpu); 2866 } 2867 2868 2869 /** 2870 * Performs a near jump to the specified address. 2871 * 2872 * May raise a \#GP(0) if the new RIP is outside the code segment limit. 2873 * 2874 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2875 * @param uNewEip The new EIP value. 2876 */ 2877 DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU32AndFinishClearingRF(PVMCPUCC pVCpu, uint32_t uNewEip) RT_NOEXCEPT 2878 { 2879 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); 2880 Assert(!IEM_IS_64BIT_CODE(pVCpu)); 2881 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit)) 2882 pVCpu->cpum.GstCtx.rip = uNewEip; 2883 else 2884 return iemRaiseGeneralProtectionFault0(pVCpu); 2885 #ifndef IEM_WITH_CODE_TLB 2886 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 2887 #endif 2888 return iemRegFinishClearingRF(pVCpu); 2889 } 2890 2891 2892 /** 2893 * Performs a near jump to the specified address. 2894 * 2895 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code 2896 * segment limit. 2897 * 2898 * @param pVCpu The cross context virtual CPU structure of the calling thread. 2899 * @param uNewRip The new RIP value. 2900 */ 2901 DECLINLINE(VBOXSTRICTRC) iemRegRipJumpU64AndFinishClearingRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT 2902 { 2903 Assert(IEM_IS_64BIT_CODE(pVCpu)); 2904 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip))) 2905 pVCpu->cpum.GstCtx.rip = uNewRip; 2906 else 2907 return iemRaiseGeneralProtectionFault0(pVCpu); 2908 #ifndef IEM_WITH_CODE_TLB 2909 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu); 2910 #endif 2911 return iemRegFinishClearingRF(pVCpu); 2912 } 2913 2914 2915 2775 2916 /** 2776 2917 * Adds to the stack pointer. -
trunk/src/VBox/VMM/include/IEMInternal.h
r102572 r102585 4994 4994 VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr, 4995 4995 IEMMODE enmEffOpSize) RT_NOEXCEPT; 4996 VBOXSTRICTRC iemRegRipJumpU16AndFinishClearningRF(PVMCPUCC pVCpu, uint16_t uNewRip) RT_NOEXCEPT;4997 VBOXSTRICTRC iemRegRipJumpU32AndFinishClearningRF(PVMCPUCC pVCpu, uint32_t uNewRip) RT_NOEXCEPT;4998 VBOXSTRICTRC iemRegRipJumpU64AndFinishClearningRF(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT;4999 4996 /** @} */ 5000 4997 -
trunk/src/VBox/VMM/include/IEMMc.h
r102572 r102585 73 73 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize) 74 74 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */ 75 #define IEM_MC_SET_RIP_U16_AND_FINISH(a_u16NewIP) return iemRegRipJumpU16AndFinishClear ningRF((pVCpu), (a_u16NewIP))75 #define IEM_MC_SET_RIP_U16_AND_FINISH(a_u16NewIP) return iemRegRipJumpU16AndFinishClearingRF((pVCpu), (a_u16NewIP)) 76 76 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */ 77 #define IEM_MC_SET_RIP_U32_AND_FINISH(a_u32NewIP) return iemRegRipJumpU32AndFinishClear ningRF((pVCpu), (a_u32NewIP))77 #define IEM_MC_SET_RIP_U32_AND_FINISH(a_u32NewIP) return iemRegRipJumpU32AndFinishClearingRF((pVCpu), (a_u32NewIP)) 78 78 /** Sets RIP (may trigger \#GP), finishes the instruction and returns. */ 79 #define IEM_MC_SET_RIP_U64_AND_FINISH(a_u64NewIP) return iemRegRipJumpU64AndFinishClear ningRF((pVCpu), (a_u64NewIP))79 #define IEM_MC_SET_RIP_U64_AND_FINISH(a_u64NewIP) return iemRegRipJumpU64AndFinishClearingRF((pVCpu), (a_u64NewIP)) 80 80 81 81 #define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
Note:
See TracChangeset
for help on using the changeset viewer.