VirtualBox

Changeset 101148 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Sep 18, 2023 12:57:24 PM (16 months ago)
Author:
vboxsync
Message:

IPRT/mem: Refactoring the rtmempage-exec-mmap-heap-posix.cpp code so the native code is restricted to a few functions. This is in preparation for using this code for windows as well. bugref:10370

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r3/posix/rtmempage-exec-mmap-heap-posix.cpp

    r101142 r101148  
    119119    /** The AVL tree node core (void pointer range). */
    120120    AVLRPVNODECORE      Core;
     121    /** The number of free pages. */
     122    uint32_t            cFreePages;
     123    /** Pointer back to the heap. */
     124    PRTHEAPPAGE         pHeap;
    121125    /** Allocation bitmap.  Set bits marks allocated pages. */
    122126    uint32_t            bmAlloc[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
     
    124128     *  allocations. */
    125129    uint32_t            bmFirst[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
    126     /** The number of free pages. */
    127     uint32_t            cFreePages;
    128     /** Pointer back to the heap. */
    129     PRTHEAPPAGE         pHeap;
     130    /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_LOCKED has been
     131     *  successfully applied. */
     132    uint32_t            bmLockedAdviced[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
     133    /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_NO_DUMP has been
     134     *  successfully applied. */
     135    uint32_t            bmNoDumpAdviced[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
    130136} RTHEAPPAGEBLOCK;
    131137
     
    156162
    157163
     164/**
     165 * Native allocation worker for the heap-based RTMemPage implementation.
     166 */
     167DECLHIDDEN(int) rtMemPageNativeAlloc(size_t cb, uint32_t fFlags, void **ppvRet)
     168{
    158169#ifdef RT_OS_OS2
    159 /*
    160  * A quick mmap/munmap mockup for avoid duplicating lots of good code.
    161  */
    162 # define INCL_BASE
    163 # include <os2.h>
    164 # undef  MAP_PRIVATE
    165 # define MAP_PRIVATE    0
    166 # undef  MAP_ANONYMOUS
    167 # define MAP_ANONYMOUS  0
    168 # undef  MAP_FAILED
    169 # define MAP_FAILED  (void *)-1
    170 # undef mmap
    171 # define mmap   iprt_mmap
    172 # undef munmap
    173 # define munmap iprt_munmap
    174 
    175 static void *mmap(void *pvWhere, size_t cb, int fProt, int fFlags, int fd, off_t off)
    176 {
    177     NOREF(pvWhere); NOREF(fd); NOREF(off);
    178     void   *pv    = NULL;
    179     ULONG  fAlloc = OBJ_ANY | PAG_COMMIT;
    180     if (fProt & PROT_EXEC)
     170    ULONG fAlloc = OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE;
     171    if (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE)
    181172        fAlloc |= PAG_EXECUTE;
    182     if (fProt & PROT_READ)
    183         fAlloc |= PAG_READ;
    184     if (fProt & PROT_WRITE)
    185         fAlloc |= PAG_WRITE;
    186     APIRET rc = DosAllocMem(&pv, cb, fAlloc);
     173    APIRET rc = DosAllocMem(ppvRet, cb, fAlloc);
    187174    if (rc == NO_ERROR)
    188         return pv;
    189     errno = ENOMEM;
    190     return MAP_FAILED;
    191 }
    192 
    193 static int munmap(void *pv, size_t cb)
    194 {
     175        return VINF_SUCCESS;
     176    return RTErrConvertFromOS2(rc);
     177
     178#else
     179    void *pvRet = mmap(NULL, cb,
     180                       PROT_READ | PROT_WRITE | (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE ? PROT_EXEC : 0),
     181                       MAP_PRIVATE | MAP_ANONYMOUS,
     182                       -1, 0);
     183    if (pvRet != MAP_FAILED)
     184    {
     185        *ppvRet = pvRet;
     186        return VINF_SUCCESS;
     187    }
     188    *ppvRet = NULL;
     189    return RTErrConvertFromErrno(errno);
     190#endif
     191}
     192
     193
     194/**
     195 * Native allocation worker for the heap-based RTMemPage implementation.
     196 */
     197DECLHIDDEN(int) rtMemPageNativeFree(void *pv, size_t cb)
     198{
     199#ifdef RT_OS_OS2
    195200    APIRET rc = DosFreeMem(pv);
    196     if (rc == NO_ERROR)
    197         return 0;
    198     errno = EINVAL;
    199     return -1;
    200 }
    201 
     201    AssertMsgReturn(rc == NO_ERROR, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb), RTErrConvertFromOS2(rc));
     202    RT_NOREF(cb);
     203#else
     204    int rc = munmap(pv, cb);
     205    AssertMsgReturn(rc == 0, ("rc=%d pv=%p cb=%#zx errno=%d\n", rc, pv, cb, errno), RTErrConvertFromErrno(errno));
    202206#endif
     207    return VINF_SUCCESS;
     208}
     209
     210
     211DECLHIDDEN(uint32_t) rtMemPageNativeApplyFlags(void *pv, size_t cb, uint32_t fFlags)
     212{
     213    uint32_t fRet = 0;
     214#ifdef RT_OS_OS2
     215    RT_NOREF(pv, cb, fFlags);
     216#else /* !RT_OS_OS2 */
     217    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
     218    {
     219        int rc = mlock(pv, cb);
     220# ifndef RT_OS_SOLARIS /* mlock(3C) on Solaris requires the priv_lock_memory privilege */
     221        AssertMsg(rc == 0, ("mlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
     222# endif
     223        if (rc == 0)
     224            fRet |= RTMEMPAGEALLOC_F_ADVISE_LOCKED;
     225    }
     226
     227# ifdef MADV_DONTDUMP
     228    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
     229    {
     230        int rc = madvise(pv, cb, MADV_DONTDUMP);
     231        AssertMsg(rc == 0, ("madvice %p LB %#zx MADV_DONTDUMP -> %d errno=%d\n", pv, cb, rc, errno));
     232        if (rc == 0)
     233            fRet |= RTMEMPAGEALLOC_F_ADVISE_NO_DUMP;
     234    }
     235# endif
     236#endif /* !RT_OS_OS2 */
     237    return fRet;
     238}
     239
     240
     241DECLHIDDEN(void) rtMemPageNativeRevertFlags(void *pv, size_t cb, uint32_t fFlags)
     242{
     243#ifdef RT_OS_OS2
     244    RT_NOREF(pv, cb, fFlags);
     245#else /* !RT_OS_OS2 */
     246    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
     247    {
     248        int rc = munlock(pv, cb);
     249        AssertMsg(rc == 0, ("munlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
     250        RT_NOREF(rc);
     251    }
     252
     253# if defined(MADV_DONTDUMP) && defined(MADV_DODUMP)
     254    if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
     255    {
     256        int rc = madvise(pv, cb, MADV_DODUMP);
     257        AssertMsg(rc == 0, ("madvice %p LB %#zx MADV_DODUMP -> %d errno=%d\n", pv, cb, rc, errno));
     258        RT_NOREF(rc);
     259    }
     260# endif
     261#endif /* !RT_OS_OS2 */
     262}
     263
    203264
    204265/**
     
    239300{
    240301    NOREF(pHeap);
    241     return VERR_NOT_IMPLEMENTED;
     302    pHeap->u32Magic = ~RTHEAPPAGE_MAGIC;
     303    return VINF_SUCCESS;
    242304}
    243305
     
    246308 * Applies flags to an allocation.
    247309 *
     310 * @return  Flags that eeds to be reverted upon free.
    248311 * @param   pv              The allocation.
    249312 * @param   cb              The size of the allocation (page aligned).
    250313 * @param   fFlags          RTMEMPAGEALLOC_F_XXX.
    251314 */
    252 DECLINLINE(void) rtMemPagePosixApplyFlags(void *pv, size_t cb, uint32_t fFlags)
    253 {
    254 #ifndef RT_OS_OS2
    255     if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
    256     {
    257         int rc = mlock(pv, cb);
    258 # ifndef RT_OS_SOLARIS /* mlock(3C) on Solaris requires the priv_lock_memory privilege */
    259         AssertMsg(rc == 0, ("mlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
    260 # endif
    261         NOREF(rc);
    262     }
    263 
    264 # ifdef MADV_DONTDUMP
    265     if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
    266     {
    267         int rc = madvise(pv, cb, MADV_DONTDUMP);
    268         AssertMsg(rc == 0, ("madvice %p LB %#zx MADV_DONTDUMP -> %d errno=%d\n", pv, cb, rc, errno));
    269         NOREF(rc);
    270     }
    271 # endif
    272 #endif
    273 
     315DECLINLINE(uint32_t) rtMemPagePosixApplyFlags(void *pv, size_t cb, uint32_t fFlags)
     316{
     317    uint32_t fHandled = 0;
     318    if (fFlags & (RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP))
     319        fHandled = rtMemPageNativeApplyFlags(pv, cb, fFlags);
    274320    if (fFlags & RTMEMPAGEALLOC_F_ZERO)
    275321        RT_BZERO(pv, cb);
     322    return fHandled;
    276323}
    277324
     
    302349
    303350    if (fFlags)
    304         rtMemPagePosixApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
     351    {
     352        uint32_t fHandled = rtMemPagePosixApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
     353        Assert(!(fHandled & ~(RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)));
     354        if (fHandled & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
     355            ASMBitSetRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
     356        if (fHandled & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
     357            ASMBitSetRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
     358    }
    305359
    306360    return VINF_SUCCESS;
     
    443497
    444498    /*
    445      * Didn't find anytyhing, so expand the heap with a new block.
     499     * Didn't find anything, so expand the heap with a new block.
    446500     */
    447501    RTCritSectLeave(&pHeap->CritSect);
    448     void *pvPages;
    449     pvPages = mmap(NULL, RTMEMPAGEPOSIX_BLOCK_SIZE,
    450                    PROT_READ | PROT_WRITE | (pHeap->fExec ? PROT_EXEC : 0),
    451                    MAP_PRIVATE | MAP_ANONYMOUS,
    452                    -1, 0);
    453     if (pvPages == MAP_FAILED)
     502
     503    void *pvPages = NULL;
     504    rc = rtMemPageNativeAlloc(RTMEMPAGEPOSIX_BLOCK_SIZE, pHeap->fExec ? RTMEMPAGEALLOC_F_EXECUTABLE : 0, &pvPages);
     505    if (RT_FAILURE(rc))
    454506    {
    455507        RTCritSectEnter(&pHeap->CritSect);
    456         return RTErrConvertFromErrno(errno);
    457 
     508        return rc;
    458509    }
    459510    /** @todo Eliminate this rtMemBaseAlloc dependency! */
     
    467518    if (!pBlock)
    468519    {
    469         munmap(pvPages, RTMEMPAGEPOSIX_BLOCK_SIZE);
     520        rtMemPageNativeFree(pvPages, RTMEMPAGEPOSIX_BLOCK_SIZE);
    470521        RTCritSectEnter(&pHeap->CritSect);
    471522        return VERR_NO_MEMORY;
     
    602653                 * Free the memory.
    603654                 */
     655                uint32_t fRevert = (ASMBitTest(&pBlock->bmLockedAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_LOCKED  : 0)
     656                                 | (ASMBitTest(&pBlock->bmNoDumpAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_NO_DUMP : 0);
     657                if (fRevert)
     658                {
     659                    rtMemPageNativeRevertFlags(pv, cPages << PAGE_SHIFT, fRevert);
     660                    ASMBitClearRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
     661                    ASMBitClearRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
     662                }
    604663                ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
    605664                ASMBitClear(&pBlock->bmFirst[0], iPage);
     
    638697                        RTCritSectLeave(&pHeap->CritSect);
    639698
    640                         munmap(pBlock->Core.Key, RTMEMPAGEPOSIX_BLOCK_SIZE);
     699                        rtMemPageNativeFree(pBlock->Core.Key, RTMEMPAGEPOSIX_BLOCK_SIZE);
    641700                        pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
    642701                        pBlock->cFreePages = 0;
     
    705764
    706765    /*
    707      * If the allocation is relatively large, we use mmap/munmap directly.
     766     * If the allocation is relatively large, we use mmap/VirtualAlloc/DosAllocMem directly.
    708767     */
    709768    void *pv = NULL; /* shut up gcc */
    710769    if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
    711770    {
    712 
    713         pv = mmap(NULL, cb,
    714                   PROT_READ | PROT_WRITE | (pHeap == &g_MemExecPosixHeap ? PROT_EXEC : 0),
    715                   MAP_PRIVATE | MAP_ANONYMOUS,
    716                   -1, 0);
    717         if (pv != MAP_FAILED)
     771        int rc = rtMemPageNativeAlloc(cb, fFlags, &pv);
     772        if (RT_SUCCESS(rc))
    718773        {
    719774            AssertPtr(pv);
     
    759814
    760815    /*
    761      * If the allocation is relatively large, we use mmap/munmap directly.
     816     * If the allocation is relatively large, we used mmap/VirtualAlloc/DosAllocMem directly.
    762817     */
    763818    if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
    764     {
    765         int rc = munmap(pv, cb);
    766         AssertMsg(rc == 0, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb)); NOREF(rc);
    767     }
     819        rtMemPageNativeFree(pv, cb);
    768820    else
    769821    {
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette