VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/rtmempage-exec-mmap-heap-posix.cpp@ 101150

Last change on this file since 101150 was 101150, checked in by vboxsync, 19 months ago

IPRT/mem: Eliminiated the malloc dependency of the rtmempage-exec-mmap-heap-posix.cpp code. Minimally tested. This is in preparation for using this code on Windows. bugref:10370

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 31.9 KB
Line 
1/* $Id: rtmempage-exec-mmap-heap-posix.cpp 101150 2023-09-18 14:18:29Z vboxsync $ */
2/** @file
3 * IPRT - RTMemPage*, POSIX with heap.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "internal/iprt.h"
42#include <iprt/mem.h>
43
44#include <iprt/asm.h>
45#include <iprt/assert.h>
46#include <iprt/avl.h>
47#include <iprt/critsect.h>
48#include <iprt/errcore.h>
49#include <iprt/list.h>
50#include <iprt/once.h>
51#include <iprt/param.h>
52#include <iprt/string.h>
53/*#include "internal/mem.h"*/
54
55#include <stdlib.h>
56#include <errno.h>
57#include <sys/mman.h>
58#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
59# define MAP_ANONYMOUS MAP_ANON
60#endif
61
62
63/*********************************************************************************************************************************
64* Defined Constants And Macros *
65*********************************************************************************************************************************/
66/** Threshold at which to we switch to simply calling mmap. */
67#define RTMEMPAGEPOSIX_MMAP_THRESHOLD _1M
68/** The size of a heap block (power of two) - in bytes. */
69#define RTMEMPAGEPOSIX_BLOCK_SIZE _2M
70
71AssertCompile(RTMEMPAGEPOSIX_BLOCK_SIZE == (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE) * PAGE_SIZE);
72/** The number of pages per heap block. */
73#define RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE)
74
75
76/*********************************************************************************************************************************
77* Structures and Typedefs *
78*********************************************************************************************************************************/
79/** Pointer to a page heap block. */
80typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
81
82/**
83 * A simple page heap.
84 */
85typedef struct RTHEAPPAGE
86{
87 /** Magic number (RTHEAPPAGE_MAGIC). */
88 uint32_t u32Magic;
89 /** The number of pages in the heap (in BlockTree). */
90 uint32_t cHeapPages;
91 /** The number of currently free pages. */
92 uint32_t cFreePages;
93 /** Number of successful calls. */
94 uint32_t cAllocCalls;
95 /** Number of successful free calls. */
96 uint32_t cFreeCalls;
97 /** The free call number at which we last tried to minimize the heap. */
98 uint32_t uLastMinimizeCall;
99 /** Tree of heap blocks. */
100 AVLRPVTREE BlockTree;
101 /** Allocation hint no 1 (last freed). */
102 PRTHEAPPAGEBLOCK pHint1;
103 /** Allocation hint no 2 (last alloc). */
104 PRTHEAPPAGEBLOCK pHint2;
105 /** The allocation chunks for the RTHEAPPAGEBLOCK allocator
106 * (RTHEAPPAGEBLOCKALLOCCHUNK). */
107 RTLISTANCHOR BlockAllocatorChunks;
108 /** Critical section protecting the heap. */
109 RTCRITSECT CritSect;
110 /** Set if the memory must allocated with execute access. */
111 bool fExec;
112} RTHEAPPAGE;
113#define RTHEAPPAGE_MAGIC UINT32_C(0xfeedface)
114/** Pointer to a page heap. */
115typedef RTHEAPPAGE *PRTHEAPPAGE;
116
117
118/**
119 * Describes a page heap block.
120 */
121typedef struct RTHEAPPAGEBLOCK
122{
123 /** The AVL tree node core (void pointer range). */
124 AVLRPVNODECORE Core;
125 /** The number of free pages. */
126 uint32_t cFreePages;
127 /** Pointer back to the heap. */
128 PRTHEAPPAGE pHeap;
129 /** Allocation bitmap. Set bits marks allocated pages. */
130 uint32_t bmAlloc[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
131 /** Allocation boundrary bitmap. Set bits marks the start of
132 * allocations. */
133 uint32_t bmFirst[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
134 /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_LOCKED has been
135 * successfully applied. */
136 uint32_t bmLockedAdviced[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
137 /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_NO_DUMP has been
138 * successfully applied. */
139 uint32_t bmNoDumpAdviced[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
140} RTHEAPPAGEBLOCK;
141
142
143/**
144 * Allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures.
145 *
146 * This is backed by an 64KB allocation and non-present blocks will be marked as
147 * allocated in bmAlloc.
148 */
149typedef struct RTHEAPPAGEBLOCKALLOCCHUNK
150{
151 /** List entry. */
152 RTLISTNODE ListEntry;
153 /** Number of free RTHEAPPAGEBLOCK structures here. */
154 uint32_t cFree;
155 /** Number of blocks in aBlocks. */
156 uint32_t cBlocks;
157 /** Allocation bitmap. */
158 uint32_t bmAlloc[ARCH_BITS == 32 ? 28 : 26];
159 /** Block array. */
160 RT_FLEXIBLE_ARRAY_EXTENSION
161 RTHEAPPAGEBLOCK aBlocks[RT_FLEXIBLE_ARRAY];
162} RTHEAPPAGEBLOCKALLOCCHUNK;
163AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, bmAlloc, 8);
164AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks, 64);
165/** Pointer to an allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures. */
166typedef RTHEAPPAGEBLOCKALLOCCHUNK *PRTHEAPPAGEBLOCKALLOCCHUNK;
167
168/** Max number of blocks one RTHEAPPAGEBLOCKALLOCCHUNK can track (896/832). */
169#define RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS ((ARCH_BITS == 32 ? 28 : 26) * 32)
170/** The chunk size for the block allocator. */
171#define RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE _64K
172
173
174/**
175 * Argument package for rtHeapPageAllocCallback.
176 */
177typedef struct RTHEAPPAGEALLOCARGS
178{
179 /** The number of pages to allocate. */
180 size_t cPages;
181 /** Non-null on success. */
182 void *pvAlloc;
183 /** RTMEMPAGEALLOC_F_XXX. */
184 uint32_t fFlags;
185} RTHEAPPAGEALLOCARGS;
186
187
188/*********************************************************************************************************************************
189* Global Variables *
190*********************************************************************************************************************************/
191/** Initialize once structure. */
192static RTONCE g_MemPagePosixInitOnce = RTONCE_INITIALIZER;
193/** The page heap. */
194static RTHEAPPAGE g_MemPagePosixHeap;
195/** The exec page heap. */
196static RTHEAPPAGE g_MemExecPosixHeap;
197
198
199/**
200 * Native allocation worker for the heap-based RTMemPage implementation.
201 */
202DECLHIDDEN(int) rtMemPageNativeAlloc(size_t cb, uint32_t fFlags, void **ppvRet)
203{
204#ifdef RT_OS_OS2
205 ULONG fAlloc = OBJ_ANY | PAG_COMMIT | PAG_READ | PAG_WRITE;
206 if (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE)
207 fAlloc |= PAG_EXECUTE;
208 APIRET rc = DosAllocMem(ppvRet, cb, fAlloc);
209 if (rc == NO_ERROR)
210 return VINF_SUCCESS;
211 return RTErrConvertFromOS2(rc);
212
213#else
214 void *pvRet = mmap(NULL, cb,
215 PROT_READ | PROT_WRITE | (fFlags & RTMEMPAGEALLOC_F_EXECUTABLE ? PROT_EXEC : 0),
216 MAP_PRIVATE | MAP_ANONYMOUS,
217 -1, 0);
218 if (pvRet != MAP_FAILED)
219 {
220 *ppvRet = pvRet;
221 return VINF_SUCCESS;
222 }
223 *ppvRet = NULL;
224 return RTErrConvertFromErrno(errno);
225#endif
226}
227
228
229/**
230 * Native allocation worker for the heap-based RTMemPage implementation.
231 */
232DECLHIDDEN(int) rtMemPageNativeFree(void *pv, size_t cb)
233{
234#ifdef RT_OS_OS2
235 APIRET rc = DosFreeMem(pv);
236 AssertMsgReturn(rc == NO_ERROR, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb), RTErrConvertFromOS2(rc));
237 RT_NOREF(cb);
238#else
239 int rc = munmap(pv, cb);
240 AssertMsgReturn(rc == 0, ("rc=%d pv=%p cb=%#zx errno=%d\n", rc, pv, cb, errno), RTErrConvertFromErrno(errno));
241#endif
242 return VINF_SUCCESS;
243}
244
245
246DECLHIDDEN(uint32_t) rtMemPageNativeApplyFlags(void *pv, size_t cb, uint32_t fFlags)
247{
248 uint32_t fRet = 0;
249#ifdef RT_OS_OS2
250 RT_NOREF(pv, cb, fFlags);
251#else /* !RT_OS_OS2 */
252 if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
253 {
254 int rc = mlock(pv, cb);
255# ifndef RT_OS_SOLARIS /* mlock(3C) on Solaris requires the priv_lock_memory privilege */
256 AssertMsg(rc == 0, ("mlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
257# endif
258 if (rc == 0)
259 fRet |= RTMEMPAGEALLOC_F_ADVISE_LOCKED;
260 }
261
262# ifdef MADV_DONTDUMP
263 if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
264 {
265 int rc = madvise(pv, cb, MADV_DONTDUMP);
266 AssertMsg(rc == 0, ("madvice %p LB %#zx MADV_DONTDUMP -> %d errno=%d\n", pv, cb, rc, errno));
267 if (rc == 0)
268 fRet |= RTMEMPAGEALLOC_F_ADVISE_NO_DUMP;
269 }
270# endif
271#endif /* !RT_OS_OS2 */
272 return fRet;
273}
274
275
276DECLHIDDEN(void) rtMemPageNativeRevertFlags(void *pv, size_t cb, uint32_t fFlags)
277{
278#ifdef RT_OS_OS2
279 RT_NOREF(pv, cb, fFlags);
280#else /* !RT_OS_OS2 */
281 if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
282 {
283 int rc = munlock(pv, cb);
284 AssertMsg(rc == 0, ("munlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
285 RT_NOREF(rc);
286 }
287
288# if defined(MADV_DONTDUMP) && defined(MADV_DODUMP)
289 if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
290 {
291 int rc = madvise(pv, cb, MADV_DODUMP);
292 AssertMsg(rc == 0, ("madvice %p LB %#zx MADV_DODUMP -> %d errno=%d\n", pv, cb, rc, errno));
293 RT_NOREF(rc);
294 }
295# endif
296#endif /* !RT_OS_OS2 */
297}
298
299
300/**
301 * Initializes the heap.
302 *
303 * @returns IPRT status code.
304 * @param pHeap The page heap to initialize.
305 * @param fExec Whether the heap memory should be marked as
306 * executable or not.
307 */
308static int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
309{
310 int rc = RTCritSectInitEx(&pHeap->CritSect,
311 RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
312 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
313 if (RT_SUCCESS(rc))
314 {
315 pHeap->cHeapPages = 0;
316 pHeap->cFreePages = 0;
317 pHeap->cAllocCalls = 0;
318 pHeap->cFreeCalls = 0;
319 pHeap->uLastMinimizeCall = 0;
320 pHeap->BlockTree = NULL;
321 pHeap->fExec = fExec;
322 RTListInit(&pHeap->BlockAllocatorChunks);
323 pHeap->u32Magic = RTHEAPPAGE_MAGIC;
324 }
325 return rc;
326}
327
328
329/**
330 * Deletes the heap and all the memory it tracks.
331 *
332 * @returns IPRT status code.
333 * @param pHeap The page heap to delete.
334 */
335static int RTHeapPageDelete(PRTHEAPPAGE pHeap)
336{
337 NOREF(pHeap);
338 pHeap->u32Magic = ~RTHEAPPAGE_MAGIC;
339 return VINF_SUCCESS;
340}
341
342
343/**
344 * Allocates a RTHEAPPAGEBLOCK.
345 *
346 * @returns Pointer to RTHEAPPAGEBLOCK on success, NULL on failure.
347 * @param pHeap The heap this is for.
348 */
349static PRTHEAPPAGEBLOCK rtHeapPageIntBlockAllocatorAlloc(PRTHEAPPAGE pHeap)
350{
351 /*
352 * Locate a chunk with space and grab a block from it.
353 */
354 PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
355 RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
356 {
357 if (pChunk->cFree > 0)
358 {
359 int idxBlock = ASMBitFirstClear(&pChunk->bmAlloc[0], RT_MIN(RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, pChunk->cBlocks));
360 if (idxBlock >= 0)
361 {
362 ASMBitSet(&pChunk->bmAlloc[0], idxBlock);
363 pChunk->cFree -= 1;
364 return &pChunk->aBlocks[idxBlock];
365 }
366 AssertFailed();
367 }
368 }
369
370 /*
371 * Allocate a new chunk and return the first block in it.
372 */
373 int rc = rtMemPageNativeAlloc(RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE, 0, (void **)&pChunk);
374 AssertRCReturn(rc, NULL);
375 pChunk->cBlocks = (RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE - RT_UOFFSETOF(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks))
376 / sizeof(pChunk->aBlocks[0]);
377 AssertStmt(pChunk->cBlocks < RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, pChunk->cBlocks = RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
378 pChunk->cFree = pChunk->cBlocks;
379
380 RT_ZERO(pChunk->bmAlloc);
381 ASMBitSetRange(pChunk->bmAlloc, pChunk->cBlocks, RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
382 RTListPrepend(&pHeap->BlockAllocatorChunks, &pChunk->ListEntry);
383
384 /*
385 * Allocate the first one.
386 */
387 ASMBitSet(pChunk->bmAlloc, 0);
388 pChunk->cFree -= 1;
389
390 return &pChunk->aBlocks[0];
391}
392
393
394/**
395 * Frees a RTHEAPPAGEBLOCK.
396 *
397 * @param pHeap The heap this is for.
398 * @param pBlock The block to free.
399 */
400static void rtHeapPageIntBlockAllocatorFree(PRTHEAPPAGE pHeap, PRTHEAPPAGEBLOCK pBlock)
401{
402 /*
403 * Locate the chunk the block belongs to and mark it as freed.
404 */
405 PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
406 RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
407 {
408 if ((uintptr_t)pBlock - (uintptr_t)pChunk < RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE)
409 {
410 uintptr_t const idxBlock = (uintptr_t)(pBlock - &pChunk->aBlocks[0]);
411 if (ASMBitTestAndClear(&pChunk->bmAlloc[0], idxBlock))
412 pChunk->cFree++;
413 else
414 AssertMsgFailed(("pBlock=%p idxBlock=%#zx\n", pBlock, idxBlock));
415 return;
416 }
417 }
418 AssertFailed();
419}
420
421
422/**
423 * Applies flags to an allocation.
424 *
425 * @return Flags that eeds to be reverted upon free.
426 * @param pv The allocation.
427 * @param cb The size of the allocation (page aligned).
428 * @param fFlags RTMEMPAGEALLOC_F_XXX.
429 */
430DECLINLINE(uint32_t) rtMemPagePosixApplyFlags(void *pv, size_t cb, uint32_t fFlags)
431{
432 uint32_t fHandled = 0;
433 if (fFlags & (RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP))
434 fHandled = rtMemPageNativeApplyFlags(pv, cb, fFlags);
435 if (fFlags & RTMEMPAGEALLOC_F_ZERO)
436 RT_BZERO(pv, cb);
437 return fHandled;
438}
439
440
441/**
442 * Avoids some gotos in rtHeapPageAllocFromBlock.
443 *
444 * @returns VINF_SUCCESS.
445 * @param pBlock The block.
446 * @param iPage The page to start allocating at.
447 * @param cPages The number of pages.
448 * @param fFlags RTMEMPAGEALLOC_F_XXX.
449 * @param ppv Where to return the allocation address.
450 */
451DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, uint32_t fFlags, void **ppv)
452{
453 PRTHEAPPAGE pHeap = pBlock->pHeap;
454
455 ASMBitSet(&pBlock->bmFirst[0], iPage);
456 pBlock->cFreePages -= cPages;
457 pHeap->cFreePages -= cPages;
458 if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
459 pHeap->pHint2 = pBlock;
460 pHeap->cAllocCalls++;
461
462 void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
463 *ppv = pv;
464
465 if (fFlags)
466 {
467 uint32_t fHandled = rtMemPagePosixApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
468 Assert(!(fHandled & ~(RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)));
469 if (fHandled & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
470 ASMBitSetRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
471 if (fHandled & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
472 ASMBitSetRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
473 }
474
475 return VINF_SUCCESS;
476}
477
478
479/**
480 * Checks if a page range is free in the specified block.
481 *
482 * @returns @c true if the range is free, @c false if not.
483 * @param pBlock The block.
484 * @param iFirst The first page to check.
485 * @param cPages The number of pages to check.
486 */
487DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
488{
489 uint32_t i = iFirst + cPages;
490 while (i-- > iFirst)
491 {
492 if (ASMBitTest(&pBlock->bmAlloc[0], i))
493 return false;
494 Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
495 }
496 return true;
497}
498
499
500/**
501 * Tries to allocate a chunk of pages from a heap block.
502 *
503 * @retval VINF_SUCCESS on success.
504 * @retval VERR_NO_MEMORY if the allocation failed.
505 * @param pBlock The block to allocate from.
506 * @param cPages The size of the allocation.
507 * @param fFlags RTMEMPAGEALLOC_F_XXX.
508 * @param ppv Where to return the allocation address on success.
509 */
510DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, uint32_t fFlags, void **ppv)
511{
512 if (pBlock->cFreePages >= cPages)
513 {
514 int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT);
515 Assert(iPage >= 0);
516
517 /* special case: single page. */
518 if (cPages == 1)
519 {
520 ASMBitSet(&pBlock->bmAlloc[0], iPage);
521 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
522 }
523
524 while ( iPage >= 0
525 && (unsigned)iPage <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - cPages)
526 {
527 if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, cPages - 1))
528 {
529 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
530 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
531 }
532
533 /* next */
534 iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
535 if (iPage < 0 || (unsigned)iPage >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - 1)
536 break;
537 iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
538 }
539 }
540
541 return VERR_NO_MEMORY;
542}
543
544
545/**
546 * RTAvlrPVDoWithAll callback.
547 *
548 * @returns 0 to continue the enum, non-zero to quit it.
549 * @param pNode The node.
550 * @param pvUser The user argument.
551 */
552static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
553{
554 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
555 RTHEAPPAGEALLOCARGS *pArgs = (RTHEAPPAGEALLOCARGS *)pvUser;
556 int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fFlags, &pArgs->pvAlloc);
557 return RT_SUCCESS(rc) ? 1 : 0;
558}
559
560
561/**
562 * Worker for RTHeapPageAlloc.
563 *
564 * @returns IPRT status code
565 * @param pHeap The heap - locked.
566 * @param cPages The page count.
567 * @param pszTag The tag.
568 * @param fFlags RTMEMPAGEALLOC_F_XXX.
569 * @param ppv Where to return the address of the allocation
570 * on success.
571 */
572static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
573{
574 int rc;
575 NOREF(pszTag);
576
577 /*
578 * Use the hints first.
579 */
580 if (pHeap->pHint1)
581 {
582 rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fFlags, ppv);
583 if (rc != VERR_NO_MEMORY)
584 return rc;
585 }
586 if (pHeap->pHint2)
587 {
588 rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fFlags, ppv);
589 if (rc != VERR_NO_MEMORY)
590 return rc;
591 }
592
593 /*
594 * Search the heap for a block with enough free space.
595 *
596 * N.B. This search algorithm is not optimal at all. What (hopefully) saves
597 * it are the two hints above.
598 */
599 if (pHeap->cFreePages >= cPages)
600 {
601 RTHEAPPAGEALLOCARGS Args;
602 Args.cPages = cPages;
603 Args.pvAlloc = NULL;
604 Args.fFlags = fFlags;
605 RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
606 if (Args.pvAlloc)
607 {
608 *ppv = Args.pvAlloc;
609 return VINF_SUCCESS;
610 }
611 }
612
613 /*
614 * Didn't find anything, so expand the heap with a new block.
615 */
616 PRTHEAPPAGEBLOCK const pBlock = rtHeapPageIntBlockAllocatorAlloc(pHeap);
617 AssertReturn(pBlock, VERR_NO_MEMORY);
618
619 RTCritSectLeave(&pHeap->CritSect);
620
621 void *pvPages = NULL;
622 rc = rtMemPageNativeAlloc(RTMEMPAGEPOSIX_BLOCK_SIZE, pHeap->fExec ? RTMEMPAGEALLOC_F_EXECUTABLE : 0, &pvPages);
623
624 RTCritSectEnter(&pHeap->CritSect);
625 if (RT_FAILURE(rc))
626 {
627 rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
628 return rc;
629 }
630
631 RT_ZERO(*pBlock);
632 pBlock->Core.Key = pvPages;
633 pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGEPOSIX_BLOCK_SIZE - 1;
634 pBlock->cFreePages = RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
635 pBlock->pHeap = pHeap;
636
637 bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
638 pHeap->cFreePages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
639 pHeap->cHeapPages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
640
641 /*
642 * Grab memory from the new block (cannot fail).
643 */
644 rc = rtHeapPageAllocFromBlock(pBlock, cPages, fFlags, ppv);
645 Assert(rc == VINF_SUCCESS);
646
647 return rc;
648}
649
650
651/**
652 * Allocates one or more pages off the heap.
653 *
654 * @returns IPRT status code.
655 * @param pHeap The page heap.
656 * @param cPages The number of pages to allocate.
657 * @param pszTag The allocation tag.
658 * @param fFlags RTMEMPAGEALLOC_F_XXX.
659 * @param ppv Where to return the pointer to the pages.
660 */
661static int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
662{
663 /*
664 * Validate input.
665 */
666 AssertPtr(ppv);
667 *ppv = NULL;
668 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
669 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
670 AssertMsgReturn(cPages < RTMEMPAGEPOSIX_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
671
672 /*
673 * Grab the lock and call a worker with many returns.
674 */
675 int rc = RTCritSectEnter(&pHeap->CritSect);
676 if (RT_SUCCESS(rc))
677 {
678 rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fFlags, ppv);
679 RTCritSectLeave(&pHeap->CritSect);
680 }
681
682 return rc;
683}
684
685
686/**
687 * RTAvlrPVDoWithAll callback.
688 *
689 * @returns 0 to continue the enum, non-zero to quit it.
690 * @param pNode The node.
691 * @param pvUser Pointer to a block pointer variable. For returning
692 * the address of the block to be freed.
693 */
694static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
695{
696 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
697 if (pBlock->cFreePages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT)
698 {
699 *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
700 return 1;
701 }
702 return 0;
703}
704
705
706/**
707 * Frees an allocation.
708 *
709 * @returns IPRT status code.
710 * @retval VERR_NOT_FOUND if pv isn't within any of the memory blocks in the
711 * heap.
712 * @retval VERR_INVALID_POINTER if the given memory range isn't exactly one
713 * allocation block.
714 * @param pHeap The page heap.
715 * @param pv Pointer to what RTHeapPageAlloc returned.
716 * @param cPages The number of pages that was allocated.
717 */
718static int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
719{
720 /*
721 * Validate input.
722 */
723 if (!pv)
724 return VINF_SUCCESS;
725 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
726 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
727
728 /*
729 * Grab the lock and look up the page.
730 */
731 int rc = RTCritSectEnter(&pHeap->CritSect);
732 if (RT_SUCCESS(rc))
733 {
734 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
735 if (pBlock)
736 {
737 /*
738 * Validate the specified address range.
739 */
740 uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
741 /* Check the range is within the block. */
742 bool fOk = iPage + cPages <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
743 /* Check that it's the start of an allocation. */
744 fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
745 /* Check that the range ends at an allocation boundrary. */
746 fOk = fOk && ( iPage + cPages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
747 || ASMBitTest(&pBlock->bmFirst[0], iPage + cPages)
748 || !ASMBitTest(&pBlock->bmAlloc[0], iPage + cPages));
749 /* Check the other pages. */
750 uint32_t const iLastPage = iPage + cPages - 1;
751 for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
752 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
753 && !ASMBitTest(&pBlock->bmFirst[0], i);
754 if (fOk)
755 {
756 /*
757 * Free the memory.
758 */
759 uint32_t fRevert = (ASMBitTest(&pBlock->bmLockedAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_LOCKED : 0)
760 | (ASMBitTest(&pBlock->bmNoDumpAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_NO_DUMP : 0);
761 if (fRevert)
762 {
763 rtMemPageNativeRevertFlags(pv, cPages << PAGE_SHIFT, fRevert);
764 ASMBitClearRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
765 ASMBitClearRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
766 }
767 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
768 ASMBitClear(&pBlock->bmFirst[0], iPage);
769 pBlock->cFreePages += cPages;
770 pHeap->cFreePages += cPages;
771 pHeap->cFreeCalls++;
772 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
773 pHeap->pHint1 = pBlock;
774
775 /** @todo Add bitmaps for tracking madvice and mlock so we can undo those. */
776
777 /*
778 * Shrink the heap. Not very efficient because of the AVL tree.
779 */
780 if ( pHeap->cFreePages >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT * 3
781 && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
782 && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
783 )
784 {
785 uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
786 while (pHeap->cFreePages > cFreePageTarget)
787 {
788 pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
789
790 pBlock = NULL;
791 RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
792 rtHeapPageFindUnusedBlockCallback, &pBlock);
793 if (!pBlock)
794 break;
795
796 void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
797 pHeap->cHeapPages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
798 pHeap->cFreePages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
799 pHeap->pHint1 = NULL;
800 pHeap->pHint2 = NULL;
801 RTCritSectLeave(&pHeap->CritSect);
802
803 rtMemPageNativeFree(pBlock->Core.Key, RTMEMPAGEPOSIX_BLOCK_SIZE);
804 pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
805 pBlock->cFreePages = 0;
806 rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
807
808 RTCritSectEnter(&pHeap->CritSect);
809 }
810 }
811 }
812 else
813 rc = VERR_INVALID_POINTER;
814 }
815 else
816 rc = VERR_NOT_FOUND; /* Distinct return code for this so rtMemPagePosixFree and others can try alternative heaps. */
817
818 RTCritSectLeave(&pHeap->CritSect);
819 }
820
821 return rc;
822}
823
824
825/**
826 * Initializes the heap.
827 *
828 * @returns IPRT status code
829 * @param pvUser Unused.
830 */
831static DECLCALLBACK(int) rtMemPagePosixInitOnce(void *pvUser)
832{
833 NOREF(pvUser);
834 int rc = RTHeapPageInit(&g_MemPagePosixHeap, false /*fExec*/);
835 if (RT_SUCCESS(rc))
836 {
837 rc = RTHeapPageInit(&g_MemExecPosixHeap, true /*fExec*/);
838 if (RT_SUCCESS(rc))
839 return rc;
840 RTHeapPageDelete(&g_MemPagePosixHeap);
841 }
842 return rc;
843}
844
845
846/**
847 * Allocates memory from the specified heap.
848 *
849 * @returns Address of the allocated memory.
850 * @param cb The number of bytes to allocate.
851 * @param pszTag The tag.
852 * @param fFlags RTMEMPAGEALLOC_F_XXX.
853 * @param pHeap The heap to use.
854 */
855static void *rtMemPagePosixAlloc(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
856{
857 /*
858 * Validate & adjust the input.
859 */
860 Assert(cb > 0);
861 NOREF(pszTag);
862 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
863
864 /*
865 * If the allocation is relatively large, we use mmap/VirtualAlloc/DosAllocMem directly.
866 */
867 void *pv = NULL; /* shut up gcc */
868 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
869 {
870 int rc = rtMemPageNativeAlloc(cb, fFlags, &pv);
871 if (RT_SUCCESS(rc))
872 {
873 AssertPtr(pv);
874
875 if (fFlags)
876 rtMemPagePosixApplyFlags(pv, cb, fFlags);
877 }
878 else
879 pv = NULL;
880 }
881 else
882 {
883 int rc = RTOnce(&g_MemPagePosixInitOnce, rtMemPagePosixInitOnce, NULL);
884 if (RT_SUCCESS(rc))
885 rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fFlags, &pv);
886 if (RT_FAILURE(rc))
887 pv = NULL;
888 }
889
890 return pv;
891}
892
893
894/**
895 * Free memory allocated by rtMemPagePosixAlloc.
896 *
897 * @param pv The address of the memory to free.
898 * @param cb The size.
899 * @param pHeap1 The most probable heap.
900 * @param pHeap2 The less probable heap.
901 */
902static void rtMemPagePosixFree(void *pv, size_t cb, PRTHEAPPAGE pHeap1, PRTHEAPPAGE pHeap2)
903{
904 /*
905 * Validate & adjust the input.
906 */
907 if (!pv)
908 return;
909 AssertPtr(pv);
910 Assert(cb > 0);
911 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
912 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
913
914 /*
915 * If the allocation is relatively large, we used mmap/VirtualAlloc/DosAllocMem directly.
916 */
917 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
918 rtMemPageNativeFree(pv, cb);
919 else
920 {
921 int rc = RTHeapPageFree(pHeap1, pv, cb >> PAGE_SHIFT);
922 if (rc == VERR_NOT_FOUND)
923 rc = RTHeapPageFree(pHeap2, pv, cb >> PAGE_SHIFT);
924 AssertRC(rc);
925 }
926}
927
928
929
930
931
932RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
933{
934 return rtMemPagePosixAlloc(cb, pszTag, 0, &g_MemPagePosixHeap);
935}
936
937
938RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
939{
940 return rtMemPagePosixAlloc(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPagePosixHeap);
941}
942
943
944RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
945{
946 AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
947 return rtMemPagePosixAlloc(cb, pszTag, fFlags,
948 !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? &g_MemPagePosixHeap : &g_MemExecPosixHeap);
949}
950
951
952RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
953{
954 rtMemPagePosixFree(pv, cb, &g_MemPagePosixHeap, &g_MemExecPosixHeap);
955}
956
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette