VirtualBox

Changeset 101162 in vbox for trunk/src/VBox/Runtime/r3/posix


Ignore:
Timestamp:
Sep 18, 2023 8:03:52 PM (20 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
159135
Message:

IPRT/mem: Use mempage /w heap code everwhere all the time. bugref:10370

File:
1 moved

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r3/posix/mempage-native-posix.cpp

    r101161 r101162  
    11/* $Id$ */
    22/** @file
    3  * IPRT - RTMemPage*, POSIX with heap.
     3 * IPRT - rtMemPageNative*, POSIX implementation.
    44 */
    55
     
    4242#include <iprt/mem.h>
    4343
    44 #include <iprt/asm.h>
    4544#include <iprt/assert.h>
    46 #include <iprt/avl.h>
    47 #include <iprt/critsect.h>
    4845#include <iprt/errcore.h>
    49 #include <iprt/list.h>
    50 #include <iprt/once.h>
    5146#include <iprt/param.h>
    52 #include <iprt/string.h>
    53 /*#include "internal/mem.h"*/
     47#include "internal/mem.h"
    5448
    5549#include <stdlib.h>
     
    6155
    6256
    63 /*********************************************************************************************************************************
    64 *   Defined Constants And Macros                                                                                                 *
    65 *********************************************************************************************************************************/
    66 /** Threshold at which to we switch to simply calling mmap. */
    67 #define RTMEMPAGE_NATIVE_THRESHOLD      _1M
    68 /** The size of a heap block (power of two) - in bytes. */
    69 #define RTMEMPAGE_BLOCK_SIZE            _4M
    7057
    71 /** The number of pages per heap block. */
    72 #define RTMEMPAGE_BLOCK_PAGE_COUNT      (RTMEMPAGE_BLOCK_SIZE / PAGE_SIZE)
    73 AssertCompile(RTMEMPAGE_BLOCK_SIZE == RTMEMPAGE_BLOCK_PAGE_COUNT * PAGE_SIZE);
    74 
    75 
    76 /*********************************************************************************************************************************
    77 *   Structures and Typedefs                                                                                                      *
    78 *********************************************************************************************************************************/
    79 /** Pointer to a page heap block. */
    80 typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
    81 
    82 /**
    83  * A simple page heap.
    84  */
    85 typedef struct RTHEAPPAGE
    86 {
    87     /** Magic number (RTHEAPPAGE_MAGIC). */
    88     uint32_t            u32Magic;
    89     /** The number of pages in the heap (in BlockTree). */
    90     uint32_t            cHeapPages;
    91     /** The number of currently free pages. */
    92     uint32_t            cFreePages;
    93     /** Number of successful calls. */
    94     uint32_t            cAllocCalls;
    95     /** Number of successful free calls. */
    96     uint32_t            cFreeCalls;
    97     /** The free call number at which we last tried to minimize the heap. */
    98     uint32_t            uLastMinimizeCall;
    99     /** Tree of heap blocks. */
    100     AVLRPVTREE          BlockTree;
    101     /** Allocation hint no 1 (last freed). */
    102     PRTHEAPPAGEBLOCK    pHint1;
    103     /** Allocation hint no 2 (last alloc). */
    104     PRTHEAPPAGEBLOCK    pHint2;
    105     /** The allocation chunks for the RTHEAPPAGEBLOCK allocator
    106      * (RTHEAPPAGEBLOCKALLOCCHUNK). */
    107     RTLISTANCHOR        BlockAllocatorChunks;
    108     /** Critical section protecting the heap. */
    109     RTCRITSECT          CritSect;
    110     /** Set if the memory must allocated with execute access. */
    111     bool                fExec;
    112 } RTHEAPPAGE;
    113 #define RTHEAPPAGE_MAGIC     UINT32_C(0xfeedface)
    114 /** Pointer to a page heap. */
    115 typedef RTHEAPPAGE *PRTHEAPPAGE;
    116 
    117 
    118 /**
    119  * Describes a page heap block.
    120  */
    121 typedef struct RTHEAPPAGEBLOCK
    122 {
    123     /** The AVL tree node core (void pointer range). */
    124     AVLRPVNODECORE      Core;
    125     /** The number of free pages. */
    126     uint32_t            cFreePages;
    127     /** Pointer back to the heap. */
    128     PRTHEAPPAGE         pHeap;
    129     /** Allocation bitmap.  Set bits marks allocated pages. */
    130     uint32_t            bmAlloc[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    131     /** Allocation boundrary bitmap.  Set bits marks the start of
    132      *  allocations. */
    133     uint32_t            bmFirst[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    134     /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_LOCKED has been
    135      *  successfully applied. */
    136     uint32_t            bmLockedAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    137     /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_NO_DUMP has been
    138      *  successfully applied. */
    139     uint32_t            bmNoDumpAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
    140 } RTHEAPPAGEBLOCK;
    141 
    142 
    143 /**
    144  * Allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures.
    145  *
    146  * This is backed by an 64KB allocation and non-present blocks will be marked as
    147  * allocated in bmAlloc.
    148  */
    149 typedef struct RTHEAPPAGEBLOCKALLOCCHUNK
    150 {
    151     /** List entry. */
    152     RTLISTNODE          ListEntry;
    153     /** Number of free RTHEAPPAGEBLOCK structures here. */
    154     uint32_t            cFree;
    155     /** Number of blocks in aBlocks. */
    156     uint32_t            cBlocks;
    157     /** Allocation bitmap. */
    158     uint32_t            bmAlloc[ARCH_BITS == 32 ? 28 : 26];
    159     /** Block array. */
    160     RT_FLEXIBLE_ARRAY_EXTENSION
    161     RTHEAPPAGEBLOCK     aBlocks[RT_FLEXIBLE_ARRAY];
    162 } RTHEAPPAGEBLOCKALLOCCHUNK;
    163 AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, bmAlloc, 8);
    164 AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks, 64);
    165 /** Pointer to an allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures. */
    166 typedef RTHEAPPAGEBLOCKALLOCCHUNK *PRTHEAPPAGEBLOCKALLOCCHUNK;
    167 
    168 /** Max number of blocks one RTHEAPPAGEBLOCKALLOCCHUNK can track (896/832). */
    169 #define RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS    ((ARCH_BITS == 32 ? 28 : 26) * 32)
    170 /** The chunk size for the block allocator. */
    171 #define RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE    _64K
    172 
    173 
    174 /**
    175  * Argument package for rtHeapPageAllocCallback.
    176  */
    177 typedef struct RTHEAPPAGEALLOCARGS
    178 {
    179     /** The number of pages to allocate. */
    180     size_t          cPages;
    181     /** Non-null on success.  */
    182     void           *pvAlloc;
    183     /** RTMEMPAGEALLOC_F_XXX. */
    184     uint32_t        fFlags;
    185 } RTHEAPPAGEALLOCARGS;
    186 
    187 
    188 /*********************************************************************************************************************************
    189 *   Global Variables                                                                                                             *
    190 *********************************************************************************************************************************/
    191 /** Initialize once structure. */
    192 static RTONCE       g_MemPageHeapInitOnce = RTONCE_INITIALIZER;
    193 /** The page heap. */
    194 static RTHEAPPAGE   g_MemPageHeap;
    195 /** The exec page heap. */
    196 static RTHEAPPAGE   g_MemExecHeap;
    197 
    198 
    199 /**
    200  * Native allocation worker for the heap-based RTMemPage implementation.
    201  */
    20258DECLHIDDEN(int) rtMemPageNativeAlloc(size_t cb, uint32_t fFlags, void **ppvRet)
    20359{
    204 #ifdef RT_OS_OS2
    205     ULONG fAlloc = OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE;
    206     if (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE)
    207         fAlloc |= PAG_EXECUTE;
    208     APIRET rc = DosAllocMem(ppvRet, cb, fAlloc);
    209     if (rc == NO_ERROR)
    210         return VINF_SUCCESS;
    211     return RTErrConvertFromOS2(rc);
    212 
    213 #else
    21460    void *pvRet = mmap(NULL, cb,
    21561                       PROT_READ | PROT_WRITE | (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE ? PROT_EXEC : 0),
     
    22369    *ppvRet = NULL;
    22470    return RTErrConvertFromErrno(errno);
    225 #endif
    22671}
    22772
    22873
    229 /**
    230  * Native allocation worker for the heap-based RTMemPage implementation.
    231  */
    23274DECLHIDDEN(int) rtMemPageNativeFree(void *pv, size_t cb)
    23375{
    234 #ifdef RT_OS_OS2
    235     APIRET rc = DosFreeMem(pv);
    236     AssertMsgReturn(rc == NO_ERROR, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb), RTErrConvertFromOS2(rc));
    237     RT_NOREF(cb);
    238 #else
    23976    int rc = munmap(pv, cb);
    24077    AssertMsgReturn(rc == 0, ("rc=%d pv=%p cb=%#zx errno=%d\n", rc, pv, cb, errno), RTErrConvertFromErrno(errno));
    241 #endif
    24278    return VINF_SUCCESS;
    24379}
    24480
    24581
    246 /**
    247  * Native page allocator worker that applies advisory flags to the memory.
    248  *
    249  * @returns Set of flags succesfully applied
    250  * @param   pv      The memory block address.
    251  * @param   cb      The size of the memory block.
    252  * @param   fFlags  The flags to apply (may include other flags too, ignore).
    253  */
    25482DECLHIDDEN(uint32_t) rtMemPageNativeApplyFlags(void *pv, size_t cb, uint32_t fFlags)
    25583{
    25684    uint32_t fRet = 0;
    257 #ifdef RT_OS_OS2
    258     RT_NOREF(pv, cb, fFlags);
    259 #else /* !RT_OS_OS2 */
    26085    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    26186    {
    26287        int rc = mlock(pv, cb);
    263 # ifndef RT_OS_SOLARIS /* mlock(3C) on Solaris requires the priv_lock_memory privilege */
     88#ifndef RT_OS_SOLARIS /* mlock(3C) on Solaris requires the priv_lock_memory privilege */
    26489        AssertMsg(rc == 0, ("mlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
    265 # endif
     90#endif
    26691        if (rc == 0)
    26792            fRet |= RTMEMPAGEALLOC_F_ADVISE_LOCKED;
    26893    }
    26994
    270 # ifdef MADV_DONTDUMP
     95#ifdef MADV_DONTDUMP
    27196    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
    27297    {
     
    276101            fRet |= RTMEMPAGEALLOC_F_ADVISE_NO_DUMP;
    277102    }
    278 # endif
    279 #endif /* !RT_OS_OS2 */
     103#endif
    280104    return fRet;
    281105}
    282106
    283107
    284 /**
    285  * Reverts flags previously applied by rtMemPageNativeApplyFlags().
    286  *
    287  * @param   pv      The memory block address.
    288  * @param   cb      The size of the memory block.
    289  * @param   fFlags  The flags to revert.
    290  */
    291108DECLHIDDEN(void) rtMemPageNativeRevertFlags(void *pv, size_t cb, uint32_t fFlags)
    292109{
    293 #ifdef RT_OS_OS2
    294     RT_NOREF(pv, cb, fFlags);
    295 #else /* !RT_OS_OS2 */
    296110    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    297111    {
     
    301115    }
    302116
    303 # if defined(MADV_DONTDUMP) && defined(MADV_DODUMP)
     117#if defined(MADV_DONTDUMP) && defined(MADV_DODUMP)
    304118    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
    305119    {
     
    308122        RT_NOREF(rc);
    309123    }
    310 # endif
    311 #endif /* !RT_OS_OS2 */
     124#endif
    312125}
    313126
    314 
    315 /**
    316  * Initializes the heap.
    317  *
    318  * @returns IPRT status code.
    319  * @param   pHeap           The page heap to initialize.
    320  * @param   fExec           Whether the heap memory should be marked as
    321  *                          executable or not.
    322  */
    323 static int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
    324 {
    325     int rc = RTCritSectInitEx(&pHeap->CritSect,
    326                               RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
    327                               NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
    328     if (RT_SUCCESS(rc))
    329     {
    330         pHeap->cHeapPages           = 0;
    331         pHeap->cFreePages           = 0;
    332         pHeap->cAllocCalls          = 0;
    333         pHeap->cFreeCalls           = 0;
    334         pHeap->uLastMinimizeCall    = 0;
    335         pHeap->BlockTree            = NULL;
    336         pHeap->fExec                = fExec;
    337         RTListInit(&pHeap->BlockAllocatorChunks);
    338         pHeap->u32Magic             = RTHEAPPAGE_MAGIC;
    339     }
    340     return rc;
    341 }
    342 
    343 
    344 /**
    345  * Deletes the heap and all the memory it tracks.
    346  *
    347  * @returns IPRT status code.
    348  * @param   pHeap           The page heap to delete.
    349  */
    350 static int RTHeapPageDelete(PRTHEAPPAGE pHeap)
    351 {
    352     NOREF(pHeap);
    353     pHeap->u32Magic = ~RTHEAPPAGE_MAGIC;
    354     return VINF_SUCCESS;
    355 }
    356 
    357 
    358 /**
    359  * Allocates a RTHEAPPAGEBLOCK.
    360  *
    361  * @returns Pointer to RTHEAPPAGEBLOCK on success, NULL on failure.
    362  * @param   pHeap   The heap this is for.
    363  */
    364 static PRTHEAPPAGEBLOCK rtHeapPageIntBlockAllocatorAlloc(PRTHEAPPAGE pHeap)
    365 {
    366     /*
    367      * Locate a chunk with space and grab a block from it.
    368      */
    369     PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
    370     RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
    371     {
    372         if (pChunk->cFree > 0)
    373         {
    374             int idxBlock = ASMBitFirstClear(&pChunk->bmAlloc[0], RT_MIN(RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, pChunk->cBlocks));
    375             if (idxBlock >= 0)
    376             {
    377                 ASMBitSet(&pChunk->bmAlloc[0], idxBlock);
    378                 pChunk->cFree -= 1;
    379                 return &pChunk->aBlocks[idxBlock];
    380             }
    381             AssertFailed();
    382         }
    383     }
    384 
    385     /*
    386      * Allocate a new chunk and return the first block in it.
    387      */
    388     int rc = rtMemPageNativeAlloc(RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE, 0, (void **)&pChunk);
    389     AssertRCReturn(rc, NULL);
    390     pChunk->cBlocks = (RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE - RT_UOFFSETOF(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks))
    391                     / sizeof(pChunk->aBlocks[0]);
    392     AssertStmt(pChunk->cBlocks < RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, pChunk->cBlocks = RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
    393     pChunk->cFree   = pChunk->cBlocks;
    394 
    395     RT_ZERO(pChunk->bmAlloc);
    396     ASMBitSetRange(pChunk->bmAlloc, pChunk->cBlocks, RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
    397     RTListPrepend(&pHeap->BlockAllocatorChunks, &pChunk->ListEntry);
    398 
    399     /*
    400      * Allocate the first one.
    401      */
    402     ASMBitSet(pChunk->bmAlloc, 0);
    403     pChunk->cFree -= 1;
    404 
    405     return &pChunk->aBlocks[0];
    406 }
    407 
    408 
    409 /**
    410  * Frees a RTHEAPPAGEBLOCK.
    411  *
    412  * @param   pHeap   The heap this is for.
    413  * @param   pBlock  The block to free.
    414  */
    415 static void rtHeapPageIntBlockAllocatorFree(PRTHEAPPAGE pHeap, PRTHEAPPAGEBLOCK pBlock)
    416 {
    417     /*
    418      * Locate the chunk the block belongs to and mark it as freed.
    419      */
    420     PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
    421     RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
    422     {
    423         if ((uintptr_t)pBlock - (uintptr_t)pChunk < RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE)
    424         {
    425             uintptr_t const idxBlock = (uintptr_t)(pBlock - &pChunk->aBlocks[0]);
    426             if (ASMBitTestAndClear(&pChunk->bmAlloc[0], idxBlock))
    427                 pChunk->cFree++;
    428             else
    429                 AssertMsgFailed(("pBlock=%p idxBlock=%#zx\n", pBlock, idxBlock));
    430             return;
    431         }
    432     }
    433     AssertFailed();
    434 }
    435 
    436 
    437 /**
    438  * Applies flags to an allocation.
    439  *
    440  * @return  Flags that eeds to be reverted upon free.
    441  * @param   pv              The allocation.
    442  * @param   cb              The size of the allocation (page aligned).
    443  * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    444  */
    445 DECLINLINE(uint32_t) rtMemPageApplyFlags(void *pv, size_t cb, uint32_t fFlags)
    446 {
    447     uint32_t fHandled = 0;
    448     if (fFlags & (RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP))
    449         fHandled = rtMemPageNativeApplyFlags(pv, cb, fFlags);
    450     if (fFlags & RTMEMPAGEALLOC_F_ZERO)
    451         RT_BZERO(pv, cb);
    452     return fHandled;
    453 }
    454 
    455 
    456 /**
    457  * Avoids some gotos in rtHeapPageAllocFromBlock.
    458  *
    459  * @returns VINF_SUCCESS.
    460  * @param   pBlock          The block.
    461  * @param   iPage           The page to start allocating at.
    462  * @param   cPages          The number of pages.
    463  * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    464  * @param   ppv             Where to return the allocation address.
    465  */
    466 DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, uint32_t fFlags, void **ppv)
    467 {
    468     PRTHEAPPAGE pHeap = pBlock->pHeap;
    469 
    470     ASMBitSet(&pBlock->bmFirst[0], iPage);
    471     pBlock->cFreePages -= cPages;
    472     pHeap->cFreePages  -= cPages;
    473     if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
    474         pHeap->pHint2 = pBlock;
    475     pHeap->cAllocCalls++;
    476 
    477     void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
    478     *ppv = pv;
    479 
    480     if (fFlags)
    481     {
    482         uint32_t fHandled = rtMemPageApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
    483         Assert(!(fHandled & ~(RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)));
    484         if (fHandled & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    485             ASMBitSetRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
    486         if (fHandled & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
    487             ASMBitSetRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
    488     }
    489 
    490     return VINF_SUCCESS;
    491 }
    492 
    493 
    494 /**
    495  * Checks if a page range is free in the specified block.
    496  *
    497  * @returns @c true if the range is free, @c false if not.
    498  * @param   pBlock          The block.
    499  * @param   iFirst          The first page to check.
    500  * @param   cPages          The number of pages to check.
    501  */
    502 DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
    503 {
    504     uint32_t i = iFirst + cPages;
    505     while (i-- > iFirst)
    506     {
    507         if (ASMBitTest(&pBlock->bmAlloc[0], i))
    508             return false;
    509         Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
    510     }
    511     return true;
    512 }
    513 
    514 
    515 /**
    516  * Tries to allocate a chunk of pages from a heap block.
    517  *
    518  * @retval  VINF_SUCCESS on success.
    519  * @retval  VERR_NO_MEMORY if the allocation failed.
    520  * @param   pBlock          The block to allocate from.
    521  * @param   cPages          The size of the allocation.
    522  * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    523  * @param   ppv             Where to return the allocation address on success.
    524  */
    525 DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, uint32_t fFlags, void **ppv)
    526 {
    527     if (pBlock->cFreePages >= cPages)
    528     {
    529         int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT);
    530         Assert(iPage >= 0);
    531 
    532         /* special case: single page. */
    533         if (cPages == 1)
    534         {
    535             ASMBitSet(&pBlock->bmAlloc[0], iPage);
    536             return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
    537         }
    538 
    539         while (   iPage >= 0
    540                && (unsigned)iPage <= RTMEMPAGE_BLOCK_PAGE_COUNT - cPages)
    541         {
    542             if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, cPages - 1))
    543             {
    544                 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
    545                 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
    546             }
    547 
    548             /* next */
    549             iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
    550             if (iPage < 0 || (unsigned)iPage >= RTMEMPAGE_BLOCK_PAGE_COUNT - 1)
    551                 break;
    552             iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
    553         }
    554     }
    555 
    556     return VERR_NO_MEMORY;
    557 }
    558 
    559 
    560 /**
    561  * RTAvlrPVDoWithAll callback.
    562  *
    563  * @returns 0 to continue the enum, non-zero to quit it.
    564  * @param   pNode           The node.
    565  * @param   pvUser          The user argument.
    566  */
    567 static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
    568 {
    569     PRTHEAPPAGEBLOCK        pBlock = RT_FROM_MEMBER(pNode,  RTHEAPPAGEBLOCK, Core);
    570     RTHEAPPAGEALLOCARGS    *pArgs  = (RTHEAPPAGEALLOCARGS *)pvUser;
    571     int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fFlags, &pArgs->pvAlloc);
    572     return RT_SUCCESS(rc) ? 1 : 0;
    573 }
    574 
    575 
    576 /**
    577  * Worker for RTHeapPageAlloc.
    578  *
    579  * @returns IPRT status code
    580  * @param   pHeap           The heap - locked.
    581  * @param   cPages          The page count.
    582  * @param   pszTag          The tag.
    583  * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    584  * @param   ppv             Where to return the address of the allocation
    585  *                          on success.
    586  */
    587 static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
    588 {
    589     int rc;
    590     NOREF(pszTag);
    591 
    592     /*
    593      * Use the hints first.
    594      */
    595     if (pHeap->pHint1)
    596     {
    597         rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fFlags, ppv);
    598         if (rc != VERR_NO_MEMORY)
    599             return rc;
    600     }
    601     if (pHeap->pHint2)
    602     {
    603         rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fFlags, ppv);
    604         if (rc != VERR_NO_MEMORY)
    605             return rc;
    606     }
    607 
    608     /*
    609      * Search the heap for a block with enough free space.
    610      *
    611      * N.B. This search algorithm is not optimal at all. What (hopefully) saves
    612      *      it are the two hints above.
    613      */
    614     if (pHeap->cFreePages >= cPages)
    615     {
    616         RTHEAPPAGEALLOCARGS Args;
    617         Args.cPages  = cPages;
    618         Args.pvAlloc = NULL;
    619         Args.fFlags  = fFlags;
    620         RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
    621         if (Args.pvAlloc)
    622         {
    623             *ppv = Args.pvAlloc;
    624             return VINF_SUCCESS;
    625         }
    626     }
    627 
    628     /*
    629      * Didn't find anything, so expand the heap with a new block.
    630      */
    631     PRTHEAPPAGEBLOCK const pBlock = rtHeapPageIntBlockAllocatorAlloc(pHeap);
    632     AssertReturn(pBlock, VERR_NO_MEMORY);
    633 
    634     RTCritSectLeave(&pHeap->CritSect);
    635 
    636     void *pvPages = NULL;
    637     rc = rtMemPageNativeAlloc(RTMEMPAGE_BLOCK_SIZE, pHeap->fExec ? RTMEMPAGEALLOC_F_EXECUTABLE : 0, &pvPages);
    638 
    639     RTCritSectEnter(&pHeap->CritSect);
    640     if (RT_FAILURE(rc))
    641     {
    642         rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
    643         return rc;
    644     }
    645 
    646     RT_ZERO(*pBlock);
    647     pBlock->Core.Key        = pvPages;
    648     pBlock->Core.KeyLast    = (uint8_t *)pvPages + RTMEMPAGE_BLOCK_SIZE - 1;
    649     pBlock->cFreePages      = RTMEMPAGE_BLOCK_PAGE_COUNT;
    650     pBlock->pHeap           = pHeap;
    651 
    652     bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
    653     pHeap->cFreePages      +=  RTMEMPAGE_BLOCK_PAGE_COUNT;
    654     pHeap->cHeapPages      +=  RTMEMPAGE_BLOCK_PAGE_COUNT;
    655 
    656     /*
    657      * Grab memory from the new block (cannot fail).
    658      */
    659     rc = rtHeapPageAllocFromBlock(pBlock, cPages, fFlags, ppv);
    660     Assert(rc == VINF_SUCCESS);
    661 
    662     return rc;
    663 }
    664 
    665 
    666 /**
    667  * Allocates one or more pages off the heap.
    668  *
    669  * @returns IPRT status code.
    670  * @param   pHeap           The page heap.
    671  * @param   cPages          The number of pages to allocate.
    672  * @param   pszTag          The allocation tag.
    673  * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    674  * @param   ppv             Where to return the pointer to the pages.
    675  */
    676 static int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
    677 {
    678     /*
    679      * Validate input.
    680      */
    681     AssertPtr(ppv);
    682     *ppv = NULL;
    683     AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
    684     AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
    685     AssertMsgReturn(cPages < RTMEMPAGE_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
    686 
    687     /*
    688      * Grab the lock and call a worker with many returns.
    689      */
    690     int rc = RTCritSectEnter(&pHeap->CritSect);
    691     if (RT_SUCCESS(rc))
    692     {
    693         rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fFlags, ppv);
    694         RTCritSectLeave(&pHeap->CritSect);
    695     }
    696 
    697     return rc;
    698 }
    699 
    700 
    701 /**
    702  * RTAvlrPVDoWithAll callback.
    703  *
    704  * @returns 0 to continue the enum, non-zero to quit it.
    705  * @param   pNode           The node.
    706  * @param   pvUser          Pointer to a block pointer variable. For returning
    707  *                          the address of the block to be freed.
    708  */
    709 static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
    710 {
    711     PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
    712     if (pBlock->cFreePages == RTMEMPAGE_BLOCK_PAGE_COUNT)
    713     {
    714         *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
    715         return 1;
    716     }
    717     return 0;
    718 }
    719 
    720 
    721 /**
    722  * Frees an allocation.
    723  *
    724  * @returns IPRT status code.
    725  * @retval  VERR_NOT_FOUND if pv isn't within any of the memory blocks in the
    726  *          heap.
    727  * @retval  VERR_INVALID_POINTER if the given memory range isn't exactly one
    728  *          allocation block.
    729  * @param   pHeap           The page heap.
    730  * @param   pv              Pointer to what RTHeapPageAlloc returned.
    731  * @param   cPages          The number of pages that was allocated.
    732  */
    733 static int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
    734 {
    735     /*
    736      * Validate input.
    737      */
    738     if (!pv)
    739         return VINF_SUCCESS;
    740     AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
    741     AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
    742 
    743     /*
    744      * Grab the lock and look up the page.
    745      */
    746     int rc = RTCritSectEnter(&pHeap->CritSect);
    747     if (RT_SUCCESS(rc))
    748     {
    749         PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
    750         if (pBlock)
    751         {
    752             /*
    753              * Validate the specified address range.
    754              */
    755             uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
    756             /* Check the range is within the block. */
    757             bool fOk = iPage + cPages <= RTMEMPAGE_BLOCK_PAGE_COUNT;
    758             /* Check that it's the start of an allocation. */
    759             fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
    760             /* Check that the range ends at an allocation boundrary. */
    761             fOk = fOk && (   iPage + cPages == RTMEMPAGE_BLOCK_PAGE_COUNT
    762                           || ASMBitTest(&pBlock->bmFirst[0], iPage + cPages)
    763                           || !ASMBitTest(&pBlock->bmAlloc[0], iPage + cPages));
    764             /* Check the other pages. */
    765             uint32_t const iLastPage = iPage + cPages - 1;
    766             for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
    767                 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
    768                    && !ASMBitTest(&pBlock->bmFirst[0], i);
    769             if (fOk)
    770             {
    771                 /*
    772                  * Free the memory.
    773                  */
    774                 uint32_t fRevert = (ASMBitTest(&pBlock->bmLockedAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_LOCKED  : 0)
    775                                  | (ASMBitTest(&pBlock->bmNoDumpAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_NO_DUMP : 0);
    776                 if (fRevert)
    777                 {
    778                     rtMemPageNativeRevertFlags(pv, cPages << PAGE_SHIFT, fRevert);
    779                     ASMBitClearRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
    780                     ASMBitClearRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
    781                 }
    782                 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
    783                 ASMBitClear(&pBlock->bmFirst[0], iPage);
    784                 pBlock->cFreePages += cPages;
    785                 pHeap->cFreePages  += cPages;
    786                 pHeap->cFreeCalls++;
    787                 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
    788                     pHeap->pHint1 = pBlock;
    789 
    790                 /** @todo Add bitmaps for tracking madvice and mlock so we can undo those. */
    791 
    792                 /*
    793                  * Shrink the heap. Not very efficient because of the AVL tree.
    794                  */
    795                 if (   pHeap->cFreePages >= RTMEMPAGE_BLOCK_PAGE_COUNT * 3
    796                     && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
    797                     && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGE_BLOCK_PAGE_COUNT
    798                    )
    799                 {
    800                     uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
    801                     while (pHeap->cFreePages > cFreePageTarget)
    802                     {
    803                         pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
    804 
    805                         pBlock = NULL;
    806                         RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
    807                                           rtHeapPageFindUnusedBlockCallback, &pBlock);
    808                         if (!pBlock)
    809                             break;
    810 
    811                         void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
    812                         pHeap->cHeapPages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
    813                         pHeap->cFreePages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
    814                         pHeap->pHint1      = NULL;
    815                         pHeap->pHint2      = NULL;
    816                         RTCritSectLeave(&pHeap->CritSect);
    817 
    818                         rtMemPageNativeFree(pBlock->Core.Key, RTMEMPAGE_BLOCK_SIZE);
    819                         pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
    820                         pBlock->cFreePages = 0;
    821                         rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
    822 
    823                         RTCritSectEnter(&pHeap->CritSect);
    824                     }
    825                 }
    826             }
    827             else
    828                 rc = VERR_INVALID_POINTER;
    829         }
    830         else
    831             rc = VERR_NOT_FOUND; /* Distinct return code for this so RTMemPageFree and others can try alternative heaps. */
    832 
    833         RTCritSectLeave(&pHeap->CritSect);
    834     }
    835 
    836     return rc;
    837 }
    838 
    839 
    840 /**
    841  * Initializes the heap.
    842  *
    843  * @returns IPRT status code
    844  * @param   pvUser              Unused.
    845  */
    846 static DECLCALLBACK(int) rtMemPageInitOnce(void *pvUser)
    847 {
    848     NOREF(pvUser);
    849     int rc = RTHeapPageInit(&g_MemPageHeap, false /*fExec*/);
    850     if (RT_SUCCESS(rc))
    851     {
    852         rc = RTHeapPageInit(&g_MemExecHeap, true /*fExec*/);
    853         if (RT_SUCCESS(rc))
    854             return rc;
    855         RTHeapPageDelete(&g_MemPageHeap);
    856     }
    857     return rc;
    858 }
    859 
    860 
    861 /**
    862  * Allocates memory from the specified heap.
    863  *
    864  * @returns Address of the allocated memory.
    865  * @param   cb                  The number of bytes to allocate.
    866  * @param   pszTag              The tag.
    867  * @param   fFlags              RTMEMPAGEALLOC_F_XXX.
    868  * @param   pHeap               The heap to use.
    869  */
    870 static void *rtMemPageAllocInner(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
    871 {
    872     /*
    873      * Validate & adjust the input.
    874      */
    875     Assert(cb > 0);
    876     NOREF(pszTag);
    877     cb = RT_ALIGN_Z(cb, PAGE_SIZE);
    878 
    879     /*
    880      * If the allocation is relatively large, we use mmap/VirtualAlloc/DosAllocMem directly.
    881      */
    882     void *pv = NULL; /* shut up gcc */
    883     if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
    884     {
    885         int rc = rtMemPageNativeAlloc(cb, fFlags, &pv);
    886         if (RT_SUCCESS(rc))
    887         {
    888             AssertPtr(pv);
    889 
    890             if (fFlags)
    891                 rtMemPageApplyFlags(pv, cb, fFlags);
    892         }
    893         else
    894             pv = NULL;
    895     }
    896     else
    897     {
    898         int rc = RTOnce(&g_MemPageHeapInitOnce, rtMemPageInitOnce, NULL);
    899         if (RT_SUCCESS(rc))
    900             rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fFlags, &pv);
    901         if (RT_FAILURE(rc))
    902             pv = NULL;
    903     }
    904 
    905     return pv;
    906 }
    907 
    908 
    909 RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
    910 {
    911     return rtMemPageAllocInner(cb, pszTag, 0, &g_MemPageHeap);
    912 }
    913 
    914 
    915 RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
    916 {
    917     return rtMemPageAllocInner(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPageHeap);
    918 }
    919 
    920 
    921 RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
    922 {
    923     AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
    924     return rtMemPageAllocInner(cb, pszTag, fFlags,
    925                                !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? &g_MemPageHeap : &g_MemExecHeap);
    926 }
    927 
    928 
    929 RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
    930 {
    931     /*
    932      * Validate & adjust the input.
    933      */
    934     if (!pv)
    935         return;
    936     AssertPtr(pv);
    937     Assert(cb > 0);
    938     Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
    939     cb = RT_ALIGN_Z(cb, PAGE_SIZE);
    940 
    941     /*
    942      * If the allocation is relatively large, we used mmap/VirtualAlloc/DosAllocMem directly.
    943      */
    944     if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
    945         rtMemPageNativeFree(pv, cb);
    946     else
    947     {
    948         int rc = RTHeapPageFree(&g_MemPageHeap, pv, cb >> PAGE_SHIFT);
    949         if (rc == VERR_NOT_FOUND)
    950             rc = RTHeapPageFree(&g_MemExecHeap, pv, cb >> PAGE_SHIFT);
    951         AssertRC(rc);
    952     }
    953 }
    954 
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette