VirtualBox

Changeset 102766 in vbox for trunk/src


Ignore:
Timestamp:
Jan 4, 2024 8:53:03 PM (13 months ago)
Author:
vboxsync
Message:

VMM/IEM: Reworking native translation of IEM_MC_*PUSH* in prep for doing TLB lookups. bugreg:10371

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h

    r102576 r102766  
    459459
    460460/**
     461 * Safe/fallback stack store function that longjmps on error.
     462 */
     463void RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
     464                                                           TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
     465{
     466#  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     467    pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
     468#  endif
     469
     470    uint8_t        bUnmapInfo;
     471    TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrMem,
     472                                                         IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
     473    *puDst = uValue;
     474    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
     475
     476    Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrMem, uValue));
     477}
     478
     479
     480#  ifdef TMPL_WITH_PUSH_SREG
     481/**
     482 * Safe/fallback stack SREG store function that longjmps on error.
     483 */
     484void RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
     485                                                               TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
     486{
     487# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
     488    pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
     489# endif
     490
     491    /* The intel docs talks about zero extending the selector register
     492       value.  My actual intel CPU here might be zero extending the value
     493       but it still only writes the lower word... */
     494    /** @todo Test this on new HW and on AMD and in 64-bit mode.  Also test what
     495     * happens when crossing an electric page boundrary, is the high word checked
     496     * for write accessibility or not? Probably it is.  What about segment limits?
     497     * It appears this behavior is also shared with trap error codes.
     498     *
     499     * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
     500     * ancient hardware when it actually did change. */
     501    uint8_t   bUnmapInfo;
     502    uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem,
     503                                               IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
     504    *puDst = (uint16_t)uValue;
     505    iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
     506
     507    Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrMem, uValue));
     508}
     509#  endif /* TMPL_WITH_PUSH_SREG */
     510
     511
     512/**
    461513 * Safe/fallback stack push function that longjmps on error.
    462514 */
     
    514566        pVCpu->cpum.GstCtx.aGRegs[iGReg].u16 = uValue;
    515567}
    516 
    517568
    518569#  ifdef TMPL_WITH_PUSH_SREG
  • trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h

    r102572 r102766  
    726726
    727727/**
     728 * Stack store function that longjmps on error.
     729 */
     730DECL_INLINE_THROW(void)
     731RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
     732{
     733#  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     734    /*
     735     * Apply segmentation and check that the item doesn't cross a page boundrary.
     736     */
     737    RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
     738#  if TMPL_MEM_TYPE_SIZE > 1
     739    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrEff)))
     740#  endif
     741    {
     742        /*
     743         * TLB lookup.
     744         */
     745        uint64_t const uTag  = IEMTLB_CALC_TAG(    &pVCpu->iem.s.DataTlb, GCPtrEff);
     746        PIEMTLBENTRY   pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
     747        if (RT_LIKELY(pTlbe->uTag == uTag))
     748        {
     749            /*
     750             * Check TLB page table level access flags.
     751             */
     752            AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
     753            uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
     754            if (RT_LIKELY(   (pTlbe->fFlagsAndPhysRev & (  IEMTLBE_F_PHYS_REV       | IEMTLBE_F_NO_MAPPINGR3
     755                                                         | IEMTLBE_F_PG_UNASSIGNED  | IEMTLBE_F_PG_NO_WRITE
     756                                                         | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
     757                                                         | IEMTLBE_F_PT_NO_WRITE    | fNoUser))
     758                          == pVCpu->iem.s.DataTlb.uTlbPhysRev))
     759            {
     760                /*
     761                 * Do the store and return.
     762                 */
     763                STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     764                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
     765                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     766                Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE "\n", GCPtrEff, uValue));
     767                *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = uValue;
     768                return;
     769            }
     770        }
     771    }
     772
     773    /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
     774       outdated page pointer, or other troubles.  (This will do a TLB load.) */
     775    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
     776#  endif
     777    RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
     778}
     779
     780
     781#   ifdef TMPL_WITH_PUSH_SREG
     782/**
     783 * Stack segment store function that longjmps on error.
     784 *
     785 * For a detailed discussion of the behaviour see the fallback functions
     786 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
     787 */
     788DECL_INLINE_THROW(void)
     789RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
     790                                                      TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
     791{
     792#  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     793    /*
     794     * Decrement the stack pointer (prep), apply segmentation and check that
     795     * the item doesn't cross a page boundrary.
     796     */
     797    RTGCPTR const GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, X86_SREG_SS, sizeof(TMPL_MEM_TYPE), GCPtrMem);
     798#  if TMPL_MEM_TYPE_SIZE > 1
     799    if (RT_LIKELY(   !(GCPtrEff & (sizeof(uint16_t) - 1U))
     800                  || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrEff, uint16_t) ))
     801#  endif
     802    {
     803        /*
     804         * TLB lookup.
     805         */
     806        uint64_t const uTag  = IEMTLB_CALC_TAG(    &pVCpu->iem.s.DataTlb, GCPtrEff);
     807        PIEMTLBENTRY   pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
     808        if (RT_LIKELY(pTlbe->uTag == uTag))
     809        {
     810            /*
     811             * Check TLB page table level access flags.
     812             */
     813            AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
     814            uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
     815            if (RT_LIKELY(   (pTlbe->fFlagsAndPhysRev & (  IEMTLBE_F_PHYS_REV       | IEMTLBE_F_NO_MAPPINGR3
     816                                                         | IEMTLBE_F_PG_UNASSIGNED  | IEMTLBE_F_PG_NO_WRITE
     817                                                         | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
     818                                                         | IEMTLBE_F_PT_NO_WRITE    | fNoUser))
     819                          == pVCpu->iem.s.DataTlb.uTlbPhysRev))
     820            {
     821                /*
     822                 * Do the push and return.
     823                 */
     824                STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     825                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
     826                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     827                Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv: " TMPL_MEM_FMT_TYPE " [sreg]\n", GCPtrEff, uValue));
     828                *(uint16_t *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
     829                return;
     830            }
     831        }
     832    }
     833
     834    /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
     835       outdated page pointer, or other troubles.  (This will do a TLB load.) */
     836    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrEff));
     837#  endif
     838    RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
     839}
     840#   endif /* TMPL_WITH_PUSH_SREG */
     841
     842
     843/**
     844 * Flat stack store function that longjmps on error.
     845 */
     846DECL_INLINE_THROW(void)
     847RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,Jmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
     848                                                      TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
     849{
     850    Assert(   IEM_IS_64BIT_CODE(pVCpu)
     851           || (    pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig
     852                && pVCpu->cpum.GstCtx.ss.Attr.n.u4Type == X86_SEL_TYPE_RW_ACC
     853                && pVCpu->cpum.GstCtx.ss.u32Limit == UINT32_MAX
     854                && pVCpu->cpum.GstCtx.ss.u64Base == 0));
     855
     856#  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     857    /*
     858     * Check that the item doesn't cross a page boundrary.
     859     */
     860#  if TMPL_MEM_TYPE_SIZE > 1
     861    if (RT_LIKELY(TMPL_MEM_ALIGN_CHECK(GCPtrMem)))
     862#  endif
     863    {
     864        /*
     865         * TLB lookup.
     866         */
     867        uint64_t const uTag  = IEMTLB_CALC_TAG(    &pVCpu->iem.s.DataTlb, GCPtrMem);
     868        PIEMTLBENTRY   pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
     869        if (RT_LIKELY(pTlbe->uTag == uTag))
     870        {
     871            /*
     872             * Check TLB page table level access flags.
     873             */
     874            AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
     875            uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
     876            if (RT_LIKELY(   (pTlbe->fFlagsAndPhysRev & (  IEMTLBE_F_PHYS_REV       | IEMTLBE_F_NO_MAPPINGR3
     877                                                         | IEMTLBE_F_PG_UNASSIGNED  | IEMTLBE_F_PG_NO_WRITE
     878                                                         | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
     879                                                         | IEMTLBE_F_PT_NO_WRITE    | fNoUser))
     880                          == pVCpu->iem.s.DataTlb.uTlbPhysRev))
     881            {
     882                /*
     883                 * Do the push and return.
     884                 */
     885                STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     886                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
     887                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     888                Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE "\n",
     889                                           GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
     890                *(TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = uValue;
     891                return;
     892            }
     893        }
     894    }
     895
     896    /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
     897       outdated page pointer, or other troubles.  (This will do a TLB load.) */
     898    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
     899#  endif
     900    RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SafeJmp)(pVCpu, GCPtrMem, uValue);
     901}
     902
     903#   ifdef TMPL_WITH_PUSH_SREG
     904/**
     905 * Flat stack segment store function that longjmps on error.
     906 *
     907 * For a detailed discussion of the behaviour see the fallback functions
     908 * iemMemStoreStackUxxSRegSafeJmp and iemMemStackPushUxxSRegSafeJmp.
     909 */
     910DECL_INLINE_THROW(void)
     911RT_CONCAT3(iemMemFlatStoreStack,TMPL_MEM_FN_SUFF,SRegJmp)(PVMCPUCC pVCpu, RTGCPTR GCPtrMem,
     912                                                          TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
     913{
     914#  if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE)
     915    /*
     916     * Calculate the new stack pointer and check that the item doesn't cross a page boundrary.
     917     */
     918    if (RT_LIKELY(   !(GCPtrMem & (sizeof(uint16_t) - 1))
     919                  || TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(pVCpu, GCPtrMem, uint16_t) ))
     920    {
     921        /*
     922         * TLB lookup.
     923         */
     924        uint64_t const uTag  = IEMTLB_CALC_TAG(    &pVCpu->iem.s.DataTlb, GCPtrMem);
     925        PIEMTLBENTRY   pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
     926        if (RT_LIKELY(pTlbe->uTag == uTag))
     927        {
     928            /*
     929             * Check TLB page table level access flags.
     930             */
     931            AssertCompile(IEMTLBE_F_PT_NO_USER == 4);
     932            uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER;
     933            if (RT_LIKELY(   (pTlbe->fFlagsAndPhysRev & (  IEMTLBE_F_PHYS_REV       | IEMTLBE_F_NO_MAPPINGR3
     934                                                         | IEMTLBE_F_PG_UNASSIGNED  | IEMTLBE_F_PG_NO_WRITE
     935                                                         | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY
     936                                                         | IEMTLBE_F_PT_NO_WRITE    | fNoUser))
     937                          == pVCpu->iem.s.DataTlb.uTlbPhysRev))
     938            {
     939                /*
     940                 * Do the push and return.
     941                 */
     942                STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
     943                Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
     944                Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
     945                Log11Ex(LOG_GROUP_IEM_MEM,("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (<-%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
     946                                           GCPtrMem, pVCpu->cpum.GstCtx.rsp, uValue));
     947                *(uint16_t *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK] = (uint16_t)uValue;
     948                return;
     949            }
     950        }
     951    }
     952
     953    /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
     954       outdated page pointer, or other troubles.  (This will do a TLB load.) */
     955    Log12Ex(LOG_GROUP_IEM_MEM,(LOG_FN_FMT ": %RGv falling back\n", LOG_FN_NAME, GCPtrMem));
     956#  endif
     957    RT_CONCAT3(iemMemStoreStack,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, GCPtrMem, uValue);
     958}
     959#   endif /* TMPL_WITH_PUSH_SREG */
     960
     961
     962
     963/**
    728964 * Stack push function that longjmps on error.
    729965 */
     
    9121148    RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
    9131149}
    914 
    915 #   endif
     1150#   endif /* TMPL_WITH_PUSH_SREG */
     1151
    9161152#   if TMPL_MEM_TYPE_SIZE != 8
    9171153
     
    10391275
    10401276#   endif /* TMPL_MEM_TYPE_SIZE != 8*/
     1277
    10411278#   ifdef TMPL_WITH_PUSH_SREG
    10421279/**
     
    10961333    RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(pVCpu, uValue);
    10971334}
    1098 
    1099 #   endif
     1335#   endif /* TMPL_WITH_PUSH_SREG */
     1336
    11001337#   if TMPL_MEM_TYPE_SIZE != 4
    11011338
  • trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp

    r102765 r102766  
    17771777
    17781778/**
    1779  * Used by TB code to push unsigned 16-bit value onto a generic stack.
    1780  */
    1781 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackPushU16,(PVMCPUCC pVCpu, uint16_t u16Value))
    1782 {
    1783     iemMemStackPushU16Jmp(pVCpu, u16Value); /** @todo iemMemStackPushU16SafeJmp */
    1784 }
    1785 
    1786 
    1787 /**
    1788  * Used by TB code to push unsigned 32-bit value onto a generic stack.
    1789  */
    1790 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackPushU32,(PVMCPUCC pVCpu, uint32_t u32Value))
    1791 {
    1792     iemMemStackPushU32Jmp(pVCpu, u32Value); /** @todo iemMemStackPushU32SafeJmp */
    1793 }
    1794 
    1795 
    1796 /**
    1797  * Used by TB code to push 32-bit selector value onto a generic stack.
     1779 * Used by TB code to store an unsigned 16-bit value onto a generic stack.
     1780 */
     1781IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t u16Value))
     1782{
     1783#if 0
     1784    iemMemStoreStackU16SafeJmp(pVCpu, GCPtrMem, u16Value);
     1785#else
     1786    iemMemStoreStackU16Jmp(pVCpu, GCPtrMem, u16Value);
     1787#endif
     1788}
     1789
     1790
     1791/**
     1792 * Used by TB code to store an unsigned 32-bit value onto a generic stack.
     1793 */
     1794IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
     1795{
     1796#if 0
     1797    iemMemStoreStackU32SafeJmp(pVCpu, GCPtrMem, u32Value);
     1798#else
     1799    iemMemStoreStackU32Jmp(pVCpu, GCPtrMem, u32Value);
     1800#endif
     1801}
     1802
     1803
     1804/**
     1805 * Used by TB code to store an 32-bit selector value onto a generic stack.
    17981806 *
    17991807 * Intel CPUs doesn't do write a whole dword, thus the special function.
    18001808 */
    1801 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackPushU32SReg,(PVMCPUCC pVCpu, uint32_t u32Value))
    1802 {
    1803     iemMemStackPushU32SRegJmp(pVCpu, u32Value); /** @todo iemMemStackPushU32SRegSafeJmp */
     1809IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU32SReg,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
     1810{
     1811#if 0
     1812    iemMemStoreStackU32SRegSafeJmp(pVCpu, GCPtrMem, u32Value);
     1813#else
     1814    iemMemStoreStackU32SRegJmp(pVCpu, GCPtrMem, u32Value);
     1815#endif
    18041816}
    18051817
     
    18081820 * Used by TB code to push unsigned 64-bit value onto a generic stack.
    18091821 */
    1810 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackPushU64,(PVMCPUCC pVCpu, uint64_t u64Value))
    1811 {
    1812     iemMemStackPushU64Jmp(pVCpu, u64Value); /** @todo iemMemStackPushU64SafeJmp */
     1822IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackStoreU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t u64Value))
     1823{
     1824#if 0
     1825    iemMemStoreStackU64SafeJmp(pVCpu, GCPtrMem, u64Value);
     1826#else
     1827    iemMemStoreStackU64Jmp(pVCpu, GCPtrMem, u64Value);
     1828#endif
    18131829}
    18141830
     
    19882004
    19892005/**
    1990  * Used by TB code to push unsigned 16-bit value onto a flat 32-bit stack.
    1991  */
    1992 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat32PushU16,(PVMCPUCC pVCpu, uint16_t u16Value))
    1993 {
    1994     iemMemFlat32StackPushU16Jmp(pVCpu, u16Value); /** @todo iemMemFlat32StackPushU16SafeJmp */
    1995 }
    1996 
    1997 
    1998 /**
    1999  * Used by TB code to push unsigned 32-bit value onto a flat 32-bit stack.
    2000  */
    2001 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat32PushU32,(PVMCPUCC pVCpu, uint32_t u32Value))
    2002 {
    2003     iemMemFlat32StackPushU32Jmp(pVCpu, u32Value); /** @todo iemMemFlat32StackPushU32SafeJmp */
    2004 }
    2005 
    2006 
    2007 /**
    2008  * Used by TB code to push segment selector value onto a flat 32-bit stack.
     2006 * Used by TB code to store an unsigned 16-bit value onto a flat stack.
     2007 */
     2008IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU16,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t u16Value))
     2009{
     2010#if 0
     2011    iemMemStoreStackU16SafeJmp(pVCpu, GCPtrMem, u16Value);
     2012#else
     2013    iemMemFlatStoreStackU16Jmp(pVCpu, GCPtrMem, u16Value);
     2014#endif
     2015}
     2016
     2017
     2018/**
     2019 * Used by TB code to store an unsigned 32-bit value onto a flat stack.
     2020 */
     2021IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU32,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
     2022{
     2023#if 0
     2024    iemMemStoreStackU32SafeJmp(pVCpu, GCPtrMem, u32Value);
     2025#else
     2026    iemMemFlatStoreStackU32Jmp(pVCpu, GCPtrMem, u32Value);
     2027#endif
     2028}
     2029
     2030
     2031/**
     2032 * Used by TB code to store a segment selector value onto a flat stack.
    20092033 *
    20102034 * Intel CPUs doesn't do write a whole dword, thus the special function.
    20112035 */
    2012 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat32PushU32SReg,(PVMCPUCC pVCpu, uint32_t u32Value))
    2013 {
    2014     iemMemFlat32StackPushU32SRegJmp(pVCpu, u32Value); /** @todo iemMemFlat32StackPushU32SRegSafeJmp */
     2036IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU32SReg,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t u32Value))
     2037{
     2038#if 0
     2039    iemMemStoreStackU32SRegSafeJmp(pVCpu, GCPtrMem, u32Value);
     2040#else
     2041    iemMemFlatStoreStackU32SRegJmp(pVCpu, GCPtrMem, u32Value);
     2042#endif
     2043}
     2044
     2045
     2046/**
     2047 * Used by TB code to store an unsigned 64-bit value onto a flat stack.
     2048 */
     2049IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlatStoreU64,(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t u64Value))
     2050{
     2051#if 0
     2052    iemMemStoreStackU64SafeJmp(pVCpu, GCPtrMem, u64Value);
     2053#else
     2054    iemMemFlatStoreStackU64Jmp(pVCpu, GCPtrMem, u64Value);
     2055#endif
    20152056}
    20162057
     
    20312072{
    20322073    iemMemFlat32StackPopGRegU32Jmp(pVCpu, iGReg); /** @todo iemMemFlat32StackPopGRegU32SafeJmp */
    2033 }
    2034 
    2035 
    2036 
    2037 /**
    2038  * Used by TB code to push unsigned 16-bit value onto a flat 64-bit stack.
    2039  */
    2040 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat64PushU16,(PVMCPUCC pVCpu, uint16_t u16Value))
    2041 {
    2042     iemMemFlat64StackPushU16Jmp(pVCpu, u16Value); /** @todo iemMemFlat64StackPushU16SafeJmp */
    2043 }
    2044 
    2045 
    2046 /**
    2047  * Used by TB code to push unsigned 64-bit value onto a flat 64-bit stack.
    2048  */
    2049 IEM_DECL_NATIVE_HLP_DEF(void, iemNativeHlpStackFlat64PushU64,(PVMCPUCC pVCpu, uint64_t u64Value))
    2050 {
    2051     iemMemFlat64StackPushU64Jmp(pVCpu, u64Value); /** @todo iemMemFlat64StackPushU64SafeJmp */
    20522074}
    20532075
     
    1140011422#define IEM_MC_PUSH_U16(a_u16Value) \
    1140111423    off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16,  0, 0, 0), \
    11402                                  (uintptr_t)iemNativeHlpStackPushU16, pCallEntry->idxInstr)
     11424                                 (uintptr_t)iemNativeHlpStackStoreU16, pCallEntry->idxInstr)
    1140311425#define IEM_MC_PUSH_U32(a_u32Value) \
    1140411426    off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32,  0, 0, 0), \
    11405                                  (uintptr_t)iemNativeHlpStackPushU32, pCallEntry->idxInstr)
     11427                                 (uintptr_t)iemNativeHlpStackStoreU32, pCallEntry->idxInstr)
    1140611428#define IEM_MC_PUSH_U32_SREG(a_uSegVal) \
    1140711429    off = iemNativeEmitStackPush(pReNative, off, a_uSegVal,  RT_MAKE_U32_FROM_U8(32,  0, 1, 0), \
    11408                                  (uintptr_t)iemNativeHlpStackPushU32SReg, pCallEntry->idxInstr)
     11430                                 (uintptr_t)iemNativeHlpStackStoreU32SReg, pCallEntry->idxInstr)
    1140911431#define IEM_MC_PUSH_U64(a_u64Value) \
    1141011432    off = iemNativeEmitStackPush(pReNative, off, a_u64Value, RT_MAKE_U32_FROM_U8(64,  0, 0, 0), \
    11411                                  (uintptr_t)iemNativeHlpStackPushU64, pCallEntry->idxInstr)
     11433                                 (uintptr_t)iemNativeHlpStackStoreU64, pCallEntry->idxInstr)
    1141211434
    1141311435#define IEM_MC_FLAT32_PUSH_U16(a_u16Value) \
    1141411436    off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 32, 0, 0), \
    11415                                  (uintptr_t)iemNativeHlpStackFlat32PushU16, pCallEntry->idxInstr)
     11437                                 (uintptr_t)iemNativeHlpStackFlatStoreU16, pCallEntry->idxInstr)
    1141611438#define IEM_MC_FLAT32_PUSH_U32(a_u32Value) \
    1141711439    off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 32, 0, 0), \
    11418                                  (uintptr_t)iemNativeHlpStackFlat32PushU32, pCallEntry->idxInstr)
     11440                                 (uintptr_t)iemNativeHlpStackFlatStoreU32, pCallEntry->idxInstr)
    1141911441#define IEM_MC_FLAT32_PUSH_U32_SREG(a_u32Value) \
    1142011442    off = iemNativeEmitStackPush(pReNative, off, a_u32Value, RT_MAKE_U32_FROM_U8(32, 32, 1, 0), \
    11421                                  (uintptr_t)iemNativeHlpStackFlat32PushU32SReg, pCallEntry->idxInstr)
     11443                                 (uintptr_t)iemNativeHlpStackFlatStoreU32SReg, pCallEntry->idxInstr)
    1142211444
    1142311445#define IEM_MC_FLAT64_PUSH_U16(a_u16Value) \
    1142411446    off = iemNativeEmitStackPush(pReNative, off, a_u16Value, RT_MAKE_U32_FROM_U8(16, 64, 0, 0), \
    11425                                  (uintptr_t)iemNativeHlpStackFlat64PushU16, pCallEntry->idxInstr)
     11447                                 (uintptr_t)iemNativeHlpStackFlatStoreU16, pCallEntry->idxInstr)
    1142611448#define IEM_MC_FLAT64_PUSH_U64(a_u64Value) \
    1142711449    off = iemNativeEmitStackPush(pReNative, off, a_u64Value, RT_MAKE_U32_FROM_U8(64, 64, 0, 0), \
    11428                                  (uintptr_t)iemNativeHlpStackFlat64PushU64, pCallEntry->idxInstr)
     11450                                 (uintptr_t)iemNativeHlpStackFlatStoreU64, pCallEntry->idxInstr)
    1142911451
    1143011452/** IEM_MC[|_FLAT32|_FLAT64]_PUSH_U16/32/32_SREG/64 */
     
    1144411466               || (pReNative->fExec & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT);
    1144511467        Assert(   pfnFunction
    11446                == (  cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat32PushU16
    11447                    : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat32PushU32
    11448                    : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 32, 1, 0) ? (uintptr_t)iemNativeHlpStackFlat32PushU32SReg
    11449                    : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat64PushU16
    11450                    : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlat64PushU64
     11468               == (  cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatStoreU16
     11469                   : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 32, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatStoreU32
     11470                   : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 32, 1, 0) ? (uintptr_t)iemNativeHlpStackFlatStoreU32SReg
     11471                   : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatStoreU16
     11472                   : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 64, 0, 0) ? (uintptr_t)iemNativeHlpStackFlatStoreU64
    1145111473                   : UINT64_C(0xc000b000a0009000) ));
    1145211474    }
    1145311475    else
    1145411476        Assert(   pfnFunction
    11455                == (  cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackPushU16
    11456                    : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackPushU32
    11457                    : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 0, 1, 0) ? (uintptr_t)iemNativeHlpStackPushU32SReg
    11458                    : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackPushU64
     11477               == (  cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(16, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackStoreU16
     11478                   : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackStoreU32
     11479                   : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(32, 0, 1, 0) ? (uintptr_t)iemNativeHlpStackStoreU32SReg
     11480                   : cBitsVarAndFlat == RT_MAKE_U32_FROM_U8(64, 0, 0, 0) ? (uintptr_t)iemNativeHlpStackStoreU64
    1145911481                   : UINT64_C(0xc000b000a0009000) ));
    1146011482#endif
     
    1156411586    off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave);
    1156511587
    11566 
    11567     /* IEMNATIVE_CALL_ARG1_GREG = idxVarValue (first) */
    11568     off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxVarValue,
    11569                                                     0 /*offAddend*/, IEMNATIVE_CALL_VOLATILE_GREG_MASK);
     11588    if (   pReNative->Core.aVars[idxVarValue].idxReg == IEMNATIVE_CALL_ARG1_GREG
     11589        && idxRegEffSp == IEMNATIVE_CALL_ARG2_GREG)
     11590    {
     11591        /* Swap them using ARG0 as temp register: */
     11592        off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG0_GREG, IEMNATIVE_CALL_ARG1_GREG);
     11593        off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, IEMNATIVE_CALL_ARG2_GREG);
     11594        off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, IEMNATIVE_CALL_ARG0_GREG);
     11595    }
     11596    else if (idxRegEffSp != IEMNATIVE_CALL_ARG2_GREG)
     11597    {
     11598        /* IEMNATIVE_CALL_ARG2_GREG = idxVarValue (first!) */
     11599        off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, idxVarValue,
     11600                                                        0 /*offAddend*/, IEMNATIVE_CALL_VOLATILE_GREG_MASK);
     11601
     11602        /* IEMNATIVE_CALL_ARG1_GREG = idxRegEffSp */
     11603        if (idxRegEffSp != IEMNATIVE_CALL_ARG1_GREG)
     11604            off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxRegEffSp);
     11605    }
     11606    else
     11607    {
     11608        /* IEMNATIVE_CALL_ARG1_GREG = idxRegEffSp (first!) */
     11609        off = iemNativeEmitLoadGprFromGpr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxRegEffSp);
     11610
     11611        /* IEMNATIVE_CALL_ARG2_GREG = idxVarValue */
     11612        off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, idxVarValue, 0 /*offAddend*/,
     11613                                                        IEMNATIVE_CALL_VOLATILE_GREG_MASK & ~IEMNATIVE_CALL_ARG1_GREG);
     11614    }
    1157011615
    1157111616    /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */
  • trunk/src/VBox/VMM/include/IEMInternal.h

    r102703 r102766  
    52565256void            iemMemFlat64StackPopGRegU16SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
    52575257void            iemMemFlat64StackPopGRegU64SafeJmp(PVMCPUCC pVCpu, uint8_t iGReg) IEM_NOEXCEPT_MAY_LONGJMP;
     5258
     5259void            iemMemStoreStackU16SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint16_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
     5260void            iemMemStoreStackU32SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
     5261void            iemMemStoreStackU32SRegSafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
     5262void            iemMemStoreStackU64SafeJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint64_t uValue) IEM_NOEXCEPT_MAY_LONGJMP;
     5263
     5264
    52585265#endif
    52595266
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette