Changeset 100863 in vbox
- Timestamp:
- Aug 11, 2023 8:46:44 PM (16 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h
r100860 r100863 495 495 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 496 496 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE 497 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE498 | fNoUser))497 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY 498 | IEMTLBE_F_PT_NO_WRITE | fNoUser)) 499 499 == pVCpu->iem.s.DataTlb.uTlbPhysRev)) 500 500 { … … 617 617 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 618 618 *pbUnmapInfo = 0; 619 Log 8(("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n",619 Log9(("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n", 620 620 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK])); 621 621 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; … … 643 643 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 644 644 { 645 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) && 1 646 /* 647 * Decrement the stack pointer (prep), apply segmentation and check that 648 * the item doesn't cross a page boundrary. 649 */ 650 uint64_t uNewRsp; 651 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp); 652 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop); 653 # if TMPL_MEM_TYPE_SIZE > 1 654 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) 655 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) )) 656 # endif 657 { 658 /* 659 * TLB lookup. 660 */ 661 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff); 662 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 663 if (RT_LIKELY(pTlbe->uTag == uTag)) 664 { 665 /* 666 * Check TLB page table level access flags. 667 */ 668 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 669 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 670 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 671 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE 672 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY 673 | IEMTLBE_F_PT_NO_WRITE | fNoUser)) 674 == pVCpu->iem.s.DataTlb.uTlbPhysRev)) 675 { 676 /* 677 * Do the push and return. 678 */ 679 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 680 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 681 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 682 Log8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n", 683 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue)); 684 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue; 685 pVCpu->cpum.GstCtx.rsp = uNewRsp; 686 return; 687 } 688 } 689 } 690 691 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 692 outdated page pointer, or other troubles. (This will do a TLB load.) */ 693 Log10Func(("%RGv falling back\n", GCPtrEff)); 694 # endif 645 695 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue); 646 696 } … … 653 703 RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 654 704 { 705 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) && 1 706 /* 707 * Increment the stack pointer (prep), apply segmentation and check that 708 * the item doesn't cross a page boundrary. 709 */ 710 uint64_t uNewRsp; 711 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp); 712 RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrTop); 713 # if TMPL_MEM_TYPE_SIZE > 1 714 if (RT_LIKELY( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) 715 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, TMPL_MEM_TYPE) )) 716 # endif 717 { 718 /* 719 * TLB lookup. 720 */ 721 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff); 722 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 723 if (RT_LIKELY(pTlbe->uTag == uTag)) 724 { 725 /* 726 * Check TLB page table level access flags. 727 */ 728 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 729 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 730 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 731 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ 732 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser)) 733 == pVCpu->iem.s.DataTlb.uTlbPhysRev)) 734 { 735 /* 736 * Do the push and return. 737 */ 738 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 739 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 740 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 741 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 742 Log9(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n", 743 GCPtrEff, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet)); 744 pVCpu->cpum.GstCtx.rsp = uNewRsp; 745 return uRet; 746 } 747 } 748 } 749 750 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 751 outdated page pointer, or other troubles. (This will do a TLB load.) */ 752 Log10Func(("%RGv falling back\n", GCPtrEff)); 753 # endif 655 754 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu); 656 755 } … … 675 774 RT_CONCAT3(iemMemFlat32StackPush,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP 676 775 { 776 Assert( pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig 777 && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC 778 && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX 779 && pVCpu->cpum.GstCtx.ss.u64Base == 0); 780 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) && 1 781 /* 782 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 783 */ 784 uint32_t const uNewEsp = pVCpu->cpum.GstCtx.esp - sizeof(TMPL_MEM_TYPE); 785 # if TMPL_MEM_TYPE_SIZE > 1 786 if (RT_LIKELY( !(uNewEsp & TMPL_MEM_TYPE_ALIGN) 787 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uNewEsp, TMPL_MEM_TYPE) )) 788 # endif 789 { 790 /* 791 * TLB lookup. 792 */ 793 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uNewEsp); /* Doesn't work w/o casting to RTGCPTR (win /3 hangs). */ 794 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 795 if (RT_LIKELY(pTlbe->uTag == uTag)) 796 { 797 /* 798 * Check TLB page table level access flags. 799 */ 800 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 801 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 802 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 803 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE 804 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY 805 | IEMTLBE_F_PT_NO_WRITE | fNoUser)) 806 == pVCpu->iem.s.DataTlb.uTlbPhysRev)) 807 { 808 /* 809 * Do the push and return. 810 */ 811 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 812 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 813 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 814 Log8(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RX32 (<-%RX32): " TMPL_MEM_FMT_TYPE "\n", 815 uNewEsp, pVCpu->cpum.GstCtx.esp, uValue)); 816 *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[uNewEsp & GUEST_PAGE_OFFSET_MASK] = uValue; 817 pVCpu->cpum.GstCtx.rsp = uNewEsp; 818 return; 819 } 820 } 821 } 822 823 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 824 outdated page pointer, or other troubles. (This will do a TLB load.) */ 825 Log10Func(("%RX32 falling back\n", uNewEsp)); 826 # endif 677 827 RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, uValue); 678 828 } … … 685 835 RT_CONCAT3(iemMemFlat32StackPop,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP 686 836 { 837 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) && 1 838 /* 839 * Calculate the new stack pointer and check that the item doesn't cross a page boundrary. 840 */ 841 uint32_t const uOldEsp = pVCpu->cpum.GstCtx.esp; 842 # if TMPL_MEM_TYPE_SIZE > 1 843 if (RT_LIKELY( !(uOldEsp & TMPL_MEM_TYPE_ALIGN) 844 || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, uOldEsp, TMPL_MEM_TYPE) )) 845 # endif 846 { 847 /* 848 * TLB lookup. 849 */ 850 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, (RTGCPTR)uOldEsp); /* Cast is required! 2023-08-11 */ 851 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 852 if (RT_LIKELY(pTlbe->uTag == uTag)) 853 { 854 /* 855 * Check TLB page table level access flags. 856 */ 857 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 858 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 859 if (RT_LIKELY( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 860 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ 861 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser)) 862 == pVCpu->iem.s.DataTlb.uTlbPhysRev)) 863 { 864 /* 865 * Do the push and return. 866 */ 867 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 868 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 869 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 870 TMPL_MEM_TYPE const uRet = *(TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[uOldEsp & GUEST_PAGE_OFFSET_MASK]; 871 pVCpu->cpum.GstCtx.rsp = uOldEsp + sizeof(TMPL_MEM_TYPE); 872 Log9(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RX32 (->%RX32): " TMPL_MEM_FMT_TYPE "\n", 873 uOldEsp, uOldEsp + sizeof(TMPL_MEM_TYPE), uRet)); 874 return uRet; 875 } 876 } 877 } 878 879 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 880 outdated page pointer, or other troubles. (This will do a TLB load.) */ 881 Log10Func(("%RX32 falling back\n", uOldEsp)); 882 # endif 687 883 return RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu); 688 884 } -
trunk/src/VBox/VMM/include/IEMInternal.h
r100860 r100863 517 517 * @returns Tag value for indexing and comparing with IEMTLB::uTag. 518 518 * @param a_pTlb The TLB. 519 * @param a_GCPtr The virtual address. 519 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or 520 * the clearing of the top 16 bits won't work (if 32-bit 521 * we'll end up with mostly zeros). 520 522 */ 521 523 #define IEMTLB_CALC_TAG(a_pTlb, a_GCPtr) ( IEMTLB_CALC_TAG_NO_REV(a_GCPtr) | (a_pTlb)->uTlbRevision ) … … 523 525 * Calculates the TLB tag for a virtual address but without TLB revision. 524 526 * @returns Tag value for indexing and comparing with IEMTLB::uTag. 525 * @param a_GCPtr The virtual address. 527 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or 528 * the clearing of the top 16 bits won't work (if 32-bit 529 * we'll end up with mostly zeros). 526 530 */ 527 531 #define IEMTLB_CALC_TAG_NO_REV(a_GCPtr) ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
Note:
See TracChangeset
for help on using the changeset viewer.