VirtualBox

Changeset 101151 in vbox for trunk/src/VBox/Runtime/r3/posix


Ignore:
Timestamp:
Sep 18, 2023 2:29:04 PM (20 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
159124
Message:

IPRT/mem: Eliminiated use of 'posix' in tje rtmempage-exec-mmap-heap-posix.cpp code as it will soon be made generic. bugref:10370

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r3/posix/rtmempage-exec-mmap-heap-posix.cpp

    r101150 r101151  
    6565*********************************************************************************************************************************/
    6666/** Threshold at which to we switch to simply calling mmap. */
    67 #define RTMEMPAGEPOSIX_MMAP_THRESHOLD   _1M
     67#define RTMEMPAGE_NATIVE_THRESHOLD      _1M
    6868/** The size of a heap block (power of two) - in bytes. */
    69 #define RTMEMPAGEPOSIX_BLOCK_SIZE       _2M
    70 
    71 AssertCompile(RTMEMPAGEPOSIX_BLOCK_SIZE == (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE) * PAGE_SIZE);
     69#define RTMEMPAGE_BLOCK_SIZE            _4M
     70
    7271/** The number of pages per heap block. */
    73 #define RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE)
     72#define RTMEMPAGE_BLOCK_PAGE_COUNT      (RTMEMPAGE_BLOCK_SIZE / PAGE_SIZE)
     73AssertCompile(RTMEMPAGE_BLOCK_SIZE == RTMEMPAGE_BLOCK_PAGE_COUNT * PAGE_SIZE);
    7474
    7575
     
    128128    PRTHEAPPAGE         pHeap;
    129129    /** Allocation bitmap.  Set bits marks allocated pages. */
    130     uint32_t            bmAlloc[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
     130    uint32_t            bmAlloc[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    131131    /** Allocation boundrary bitmap.  Set bits marks the start of
    132132     *  allocations. */
    133     uint32_t            bmFirst[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
     133    uint32_t            bmFirst[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    134134    /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_LOCKED has been
    135135     *  successfully applied. */
    136     uint32_t            bmLockedAdviced[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
     136    uint32_t            bmLockedAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    137137    /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_NO_DUMP has been
    138138     *  successfully applied. */
    139     uint32_t            bmNoDumpAdviced[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
     139    uint32_t            bmNoDumpAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    140140} RTHEAPPAGEBLOCK;
    141141
     
    190190*********************************************************************************************************************************/
    191191/** Initialize once structure. */
    192 static RTONCE       g_MemPagePosixInitOnce = RTONCE_INITIALIZER;
     192static RTONCE       g_MemPageHeapInitOnce = RTONCE_INITIALIZER;
    193193/** The page heap. */
    194 static RTHEAPPAGE   g_MemPagePosixHeap;
     194static RTHEAPPAGE   g_MemPageHeap;
    195195/** The exec page heap. */
    196 static RTHEAPPAGE   g_MemExecPosixHeap;
     196static RTHEAPPAGE   g_MemExecHeap;
    197197
    198198
     
    244244
    245245
     246/**
     247 * Native page allocator worker that applies advisory flags to the memory.
     248 *
     249 * @returns Set of flags succesfully applied
     250 * @param   pv      The memory block address.
     251 * @param   cb      The size of the memory block.
     252 * @param   fFlags  The flags to apply (may include other flags too, ignore).
     253 */
    246254DECLHIDDEN(uint32_t) rtMemPageNativeApplyFlags(void *pv, size_t cb, uint32_t fFlags)
    247255{
     
    274282
    275283
     284/**
     285 * Reverts flags previously applied by rtMemPageNativeApplyFlags().
     286 *
     287 * @param   pv      The memory block address.
     288 * @param   cb      The size of the memory block.
     289 * @param   fFlags  The flags to revert.
     290 */
    276291DECLHIDDEN(void) rtMemPageNativeRevertFlags(void *pv, size_t cb, uint32_t fFlags)
    277292{
     
    428443 * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    429444 */
    430 DECLINLINE(uint32_t) rtMemPagePosixApplyFlags(void *pv, size_t cb, uint32_t fFlags)
     445DECLINLINE(uint32_t) rtMemPageApplyFlags(void *pv, size_t cb, uint32_t fFlags)
    431446{
    432447    uint32_t fHandled = 0;
     
    465480    if (fFlags)
    466481    {
    467         uint32_t fHandled = rtMemPagePosixApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
     482        uint32_t fHandled = rtMemPageApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
    468483        Assert(!(fHandled & ~(RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)));
    469484        if (fHandled & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
     
    512527    if (pBlock->cFreePages >= cPages)
    513528    {
    514         int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT);
     529        int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT);
    515530        Assert(iPage >= 0);
    516531
     
    523538
    524539        while (   iPage >= 0
    525                && (unsigned)iPage <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - cPages)
     540               && (unsigned)iPage <= RTMEMPAGE_BLOCK_PAGE_COUNT - cPages)
    526541        {
    527542            if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, cPages - 1))
     
    532547
    533548            /* next */
    534             iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
    535             if (iPage < 0 || (unsigned)iPage >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - 1)
     549            iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
     550            if (iPage < 0 || (unsigned)iPage >= RTMEMPAGE_BLOCK_PAGE_COUNT - 1)
    536551                break;
    537             iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
     552            iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
    538553        }
    539554    }
     
    620635
    621636    void *pvPages = NULL;
    622     rc = rtMemPageNativeAlloc(RTMEMPAGEPOSIX_BLOCK_SIZE, pHeap->fExec ? RTMEMPAGEALLOC_F_EXECUTABLE : 0, &pvPages);
     637    rc = rtMemPageNativeAlloc(RTMEMPAGE_BLOCK_SIZE, pHeap->fExec ? RTMEMPAGEALLOC_F_EXECUTABLE : 0, &pvPages);
    623638
    624639    RTCritSectEnter(&pHeap->CritSect);
     
    631646    RT_ZERO(*pBlock);
    632647    pBlock->Core.Key        = pvPages;
    633     pBlock->Core.KeyLast    = (uint8_t *)pvPages + RTMEMPAGEPOSIX_BLOCK_SIZE - 1;
    634     pBlock->cFreePages      = RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
     648    pBlock->Core.KeyLast    = (uint8_t *)pvPages + RTMEMPAGE_BLOCK_SIZE - 1;
     649    pBlock->cFreePages      = RTMEMPAGE_BLOCK_PAGE_COUNT;
    635650    pBlock->pHeap           = pHeap;
    636651
    637652    bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
    638     pHeap->cFreePages      +=  RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
    639     pHeap->cHeapPages      +=  RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
     653    pHeap->cFreePages      +=  RTMEMPAGE_BLOCK_PAGE_COUNT;
     654    pHeap->cHeapPages      +=  RTMEMPAGE_BLOCK_PAGE_COUNT;
    640655
    641656    /*
     
    668683    AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
    669684    AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
    670     AssertMsgReturn(cPages < RTMEMPAGEPOSIX_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
     685    AssertMsgReturn(cPages < RTMEMPAGE_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
    671686
    672687    /*
     
    695710{
    696711    PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
    697     if (pBlock->cFreePages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT)
     712    if (pBlock->cFreePages == RTMEMPAGE_BLOCK_PAGE_COUNT)
    698713    {
    699714        *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
     
    740755            uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
    741756            /* Check the range is within the block. */
    742             bool fOk = iPage + cPages <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
     757            bool fOk = iPage + cPages <= RTMEMPAGE_BLOCK_PAGE_COUNT;
    743758            /* Check that it's the start of an allocation. */
    744759            fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
    745760            /* Check that the range ends at an allocation boundrary. */
    746             fOk = fOk && (   iPage + cPages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
     761            fOk = fOk && (   iPage + cPages == RTMEMPAGE_BLOCK_PAGE_COUNT
    747762                          || ASMBitTest(&pBlock->bmFirst[0], iPage + cPages)
    748763                          || !ASMBitTest(&pBlock->bmAlloc[0], iPage + cPages));
     
    778793                 * Shrink the heap. Not very efficient because of the AVL tree.
    779794                 */
    780                 if (   pHeap->cFreePages >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT * 3
     795                if (   pHeap->cFreePages >= RTMEMPAGE_BLOCK_PAGE_COUNT * 3
    781796                    && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
    782                     && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
     797                    && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGE_BLOCK_PAGE_COUNT
    783798                   )
    784799                {
     
    795810
    796811                        void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
    797                         pHeap->cHeapPages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
    798                         pHeap->cFreePages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
     812                        pHeap->cHeapPages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
     813                        pHeap->cFreePages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
    799814                        pHeap->pHint1      = NULL;
    800815                        pHeap->pHint2      = NULL;
    801816                        RTCritSectLeave(&pHeap->CritSect);
    802817
    803                         rtMemPageNativeFree(pBlock->Core.Key, RTMEMPAGEPOSIX_BLOCK_SIZE);
     818                        rtMemPageNativeFree(pBlock->Core.Key, RTMEMPAGE_BLOCK_SIZE);
    804819                        pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
    805820                        pBlock->cFreePages = 0;
     
    814829        }
    815830        else
    816             rc = VERR_NOT_FOUND; /* Distinct return code for this so rtMemPagePosixFree and others can try alternative heaps. */
     831            rc = VERR_NOT_FOUND; /* Distinct return code for this so RTMemPageFree and others can try alternative heaps. */
    817832
    818833        RTCritSectLeave(&pHeap->CritSect);
     
    829844 * @param   pvUser              Unused.
    830845 */
    831 static DECLCALLBACK(int) rtMemPagePosixInitOnce(void *pvUser)
     846static DECLCALLBACK(int) rtMemPageInitOnce(void *pvUser)
    832847{
    833848    NOREF(pvUser);
    834     int rc = RTHeapPageInit(&g_MemPagePosixHeap, false /*fExec*/);
     849    int rc = RTHeapPageInit(&g_MemPageHeap, false /*fExec*/);
    835850    if (RT_SUCCESS(rc))
    836851    {
    837         rc = RTHeapPageInit(&g_MemExecPosixHeap, true /*fExec*/);
     852        rc = RTHeapPageInit(&g_MemExecHeap, true /*fExec*/);
    838853        if (RT_SUCCESS(rc))
    839854            return rc;
    840         RTHeapPageDelete(&g_MemPagePosixHeap);
     855        RTHeapPageDelete(&g_MemPageHeap);
    841856    }
    842857    return rc;
     
    853868 * @param   pHeap               The heap to use.
    854869 */
    855 static void *rtMemPagePosixAlloc(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
     870static void *rtMemPageAllocInner(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
    856871{
    857872    /*
     
    866881     */
    867882    void *pv = NULL; /* shut up gcc */
    868     if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
     883    if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
    869884    {
    870885        int rc = rtMemPageNativeAlloc(cb, fFlags, &pv);
     
    874889
    875890            if (fFlags)
    876                 rtMemPagePosixApplyFlags(pv, cb, fFlags);
     891                rtMemPageApplyFlags(pv, cb, fFlags);
    877892        }
    878893        else
     
    881896    else
    882897    {
    883         int rc = RTOnce(&g_MemPagePosixInitOnce, rtMemPagePosixInitOnce, NULL);
     898        int rc = RTOnce(&g_MemPageHeapInitOnce, rtMemPageInitOnce, NULL);
    884899        if (RT_SUCCESS(rc))
    885900            rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fFlags, &pv);
     
    892907
    893908
    894 /**
    895  * Free memory allocated by rtMemPagePosixAlloc.
    896  *
    897  * @param   pv      The address of the memory to free.
    898  * @param   cb      The size.
    899  * @param   pHeap1  The most probable heap.
    900  * @param   pHeap2  The less probable heap.
    901  */
    902 static void rtMemPagePosixFree(void *pv, size_t cb, PRTHEAPPAGE pHeap1, PRTHEAPPAGE pHeap2)
     909RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
     910{
     911    return rtMemPageAllocInner(cb, pszTag, 0, &g_MemPageHeap);
     912}
     913
     914
     915RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
     916{
     917    return rtMemPageAllocInner(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPageHeap);
     918}
     919
     920
     921RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
     922{
     923    AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
     924    return rtMemPageAllocInner(cb, pszTag, fFlags,
     925                               !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? &g_MemPageHeap : &g_MemExecHeap);
     926}
     927
     928
     929RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
    903930{
    904931    /*
     
    915942     * If the allocation is relatively large, we used mmap/VirtualAlloc/DosAllocMem directly.
    916943     */
    917     if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
     944    if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
    918945        rtMemPageNativeFree(pv, cb);
    919946    else
    920947    {
    921         int rc = RTHeapPageFree(pHeap1, pv, cb >> PAGE_SHIFT);
     948        int rc = RTHeapPageFree(&g_MemPageHeap, pv, cb >> PAGE_SHIFT);
    922949        if (rc == VERR_NOT_FOUND)
    923             rc = RTHeapPageFree(pHeap2, pv, cb >> PAGE_SHIFT);
     950            rc = RTHeapPageFree(&g_MemExecHeap, pv, cb >> PAGE_SHIFT);
    924951        AssertRC(rc);
    925952    }
    926953}
    927954
    928 
    929 
    930 
    931 
    932 RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
    933 {
    934     return rtMemPagePosixAlloc(cb, pszTag, 0, &g_MemPagePosixHeap);
    935 }
    936 
    937 
    938 RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
    939 {
    940     return rtMemPagePosixAlloc(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPagePosixHeap);
    941 }
    942 
    943 
    944 RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
    945 {
    946     AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
    947     return rtMemPagePosixAlloc(cb, pszTag, fFlags,
    948                                !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? &g_MemPagePosixHeap : &g_MemExecPosixHeap);
    949 }
    950 
    951 
    952 RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
    953 {
    954     rtMemPagePosixFree(pv, cb, &g_MemPagePosixHeap, &g_MemExecPosixHeap);
    955 }
    956 
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette