Changeset 100865 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 11, 2023 11:16:59 PM (18 months ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h
r100863 r100865 634 634 635 635 636 /********************************************************************************************************************************* 637 * Stack Access * 638 *********************************************************************************************************************************/ 636 639 # ifdef TMPL_MEM_WITH_STACK 637 640 # ifdef IEM_WITH_SETJMP … … 643 646 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 644 647 { 645 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) && 1648 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 646 649 /* 647 650 * Decrement the stack pointer (prep), apply segmentation and check that … … 703 706 RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 704 707 { 705 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) && 1708 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 706 709 /* 707 710 * Increment the stack pointer (prep), apply segmentation and check that … … 758 761 /** 759 762 * Stack segment push function that longjmps on error. 763 * 764 * For a detailed discussion of the behaviour see the fallback functions 765 * iemMemStackPushUxxSRegSafeJmp. 760 766 */ 761 767 DECL_INLINE_THROW(void) 762 768 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 763 769 { 770 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 771 /* 772 * Decrement the stack pointer (prep), apply segmentation and check that 773 * the item doesn't cross a page boundrary. 774 */ 775 uint64_t uNewRsp; 776 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp); 777 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop); 778 # if TMPL_MEM_TYPE_SIZE > 1 779 if (RT_LIKELY( !(GCPtrEff & (sizeof(uint16_t) - 1U)) 780 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) )) 781 # endif 782 { 783 /* 784 * TLB lookup. 785 */ 786 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff); 787 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 788 if (RT_LIKELY(pTlbe->uTag == uTag)) 789 { 790 /* 791 * Check TLB page table level access flags. 792 */ 793 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 794 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 795 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 796 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE 797 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY 798 | IEMTLBE_F_PT_NO_WRITE | fNoUser)) 799 == pVCpu->iem.s.DataTlb.uTlbPhysRev)) 800 { 801 /* 802 * Do the push and return. 803 */ 804 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 805 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 806 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 807 Log8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n", 808 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue)); 809 *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue; 810 pVCpu->cpum.GstCtx.rsp = uNewRsp; 811 return; 812 } 813 } 814 } 815 816 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 817 outdated page pointer, or other troubles. (This will do a TLB load.) */ 818 Log10Func(("%RGv falling back\n", GCPtrEff)); 819 # endif 764 820 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue); 765 821 } … … 778 834 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX 779 835 && pVCpu->cpum.GstCtx.ss.u64Base == 0); 780 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) && 1836 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 781 837 /* 782 838 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. … … 835 891 RT_CONCAT3(iemMemFlat32StackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 836 892 { 837 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) && 1893 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 838 894 /* 839 895 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. … … 888 944 /** 889 945 * 32-bit flat stack segment push function that longjmps on error. 946 * 947 * For a detailed discussion of the behaviour see the fallback functions 948 * iemMemStackPushUxxSRegSafeJmp. 890 949 */ 891 950 DECL_INLINE_THROW(void) 892 951 RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 893 952 { 953 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 954 /* 955 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 956 */ 957 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE); 958 if (RT_LIKELY( !(uNewEsp & (sizeof(uint16_t) - 1)) 959 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, uint16_t) )) 960 { 961 /* 962 * TLB lookup. 963 */ 964 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */ 965 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 966 if (RT_LIKELY(pTlbe->uTag == uTag)) 967 { 968 /* 969 * Check TLB page table level access flags. 970 */ 971 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 972 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 973 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 974 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE 975 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY 976 | IEMTLBE_F_PT_NO_WRITE | fNoUser)) 977 == pVCpu->iem.s.DataTlb.uTlbPhysRev)) 978 { 979 /* 980 * Do the push and return. 981 */ 982 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 983 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 984 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 985 Log8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE " [sreg]\n", 986 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue)); 987 *(uint16_t *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue; 988 pVCpu->cpum.GstCtx.rsp = uNewEsp; 989 return; 990 } 991 } 992 } 993 994 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 995 outdated page pointer, or other troubles. (This will do a TLB load.) */ 996 Log10Func(("%RX32 falling back\n", uNewEsp)); 997 # endif 894 998 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue); 895 999 }
Note:
See TracChangeset
for help on using the changeset viewer.