VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/mempage-heap.cpp@ 101162

Last change on this file since 101162 was 101162, checked in by vboxsync, 17 months ago

IPRT/mem: Use mempage /w heap code everwhere all the time. bugref:10370

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 28.4 KB
Line 
1/* $Id: mempage-heap.cpp 101162 2023-09-18 20:03:52Z vboxsync $ */
2/** @file
3 * IPRT - RTMemPage*, POSIX with heap.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "internal/iprt.h"
42#include <iprt/mem.h>
43
44#include <iprt/asm.h>
45#include <iprt/assert.h>
46#include <iprt/avl.h>
47#include <iprt/critsect.h>
48#include <iprt/errcore.h>
49#include <iprt/list.h>
50#include <iprt/once.h>
51#include <iprt/param.h>
52#include <iprt/string.h>
53#include "internal/mem.h"
54
55
56
57/*********************************************************************************************************************************
58* Defined Constants And Macros *
59*********************************************************************************************************************************/
60/** Threshold at which to we switch to simply calling mmap. */
61#define RTMEMPAGE_NATIVE_THRESHOLD _1M
62/** The size of a heap block (power of two) - in bytes. */
63#define RTMEMPAGE_BLOCK_SIZE _4M
64
65/** The number of pages per heap block. */
66#define RTMEMPAGE_BLOCK_PAGE_COUNT (RTMEMPAGE_BLOCK_SIZE / PAGE_SIZE)
67AssertCompile(RTMEMPAGE_BLOCK_SIZE == RTMEMPAGE_BLOCK_PAGE_COUNT * PAGE_SIZE);
68
69
70/*********************************************************************************************************************************
71* Structures and Typedefs *
72*********************************************************************************************************************************/
73/** Pointer to a page heap block. */
74typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
75
76/**
77 * A simple page heap.
78 */
79typedef struct RTHEAPPAGE
80{
81 /** Magic number (RTHEAPPAGE_MAGIC). */
82 uint32_t u32Magic;
83 /** The number of pages in the heap (in BlockTree). */
84 uint32_t cHeapPages;
85 /** The number of currently free pages. */
86 uint32_t cFreePages;
87 /** Number of successful calls. */
88 uint32_t cAllocCalls;
89 /** Number of successful free calls. */
90 uint32_t cFreeCalls;
91 /** The free call number at which we last tried to minimize the heap. */
92 uint32_t uLastMinimizeCall;
93 /** Tree of heap blocks. */
94 AVLRPVTREE BlockTree;
95 /** Allocation hint no 1 (last freed). */
96 PRTHEAPPAGEBLOCK pHint1;
97 /** Allocation hint no 2 (last alloc). */
98 PRTHEAPPAGEBLOCK pHint2;
99 /** The allocation chunks for the RTHEAPPAGEBLOCK allocator
100 * (RTHEAPPAGEBLOCKALLOCCHUNK). */
101 RTLISTANCHOR BlockAllocatorChunks;
102 /** Critical section protecting the heap. */
103 RTCRITSECT CritSect;
104 /** Set if the memory must allocated with execute access. */
105 bool fExec;
106} RTHEAPPAGE;
107#define RTHEAPPAGE_MAGIC UINT32_C(0xfeedface)
108/** Pointer to a page heap. */
109typedef RTHEAPPAGE *PRTHEAPPAGE;
110
111
112/**
113 * Describes a page heap block.
114 */
115typedef struct RTHEAPPAGEBLOCK
116{
117 /** The AVL tree node core (void pointer range). */
118 AVLRPVNODECORE Core;
119 /** The number of free pages. */
120 uint32_t cFreePages;
121 /** Pointer back to the heap. */
122 PRTHEAPPAGE pHeap;
123 /** Allocation bitmap. Set bits marks allocated pages. */
124 uint32_t bmAlloc[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
125 /** Allocation boundrary bitmap. Set bits marks the start of
126 * allocations. */
127 uint32_t bmFirst[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
128 /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_LOCKED has been
129 * successfully applied. */
130 uint32_t bmLockedAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
131 /** Bitmap tracking pages where RTMEMPAGEALLOC_F_ADVISE_NO_DUMP has been
132 * successfully applied. */
133 uint32_t bmNoDumpAdviced[RTMEMPAGE_BLOCK_PAGE_COUNT / 32];
134} RTHEAPPAGEBLOCK;
135
136
137/**
138 * Allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures.
139 *
140 * This is backed by an 64KB allocation and non-present blocks will be marked as
141 * allocated in bmAlloc.
142 */
143typedef struct RTHEAPPAGEBLOCKALLOCCHUNK
144{
145 /** List entry. */
146 RTLISTNODE ListEntry;
147 /** Number of free RTHEAPPAGEBLOCK structures here. */
148 uint32_t cFree;
149 /** Number of blocks in aBlocks. */
150 uint32_t cBlocks;
151 /** Allocation bitmap. */
152 uint32_t bmAlloc[ARCH_BITS == 32 ? 28 : 26];
153 /** Block array. */
154 RT_FLEXIBLE_ARRAY_EXTENSION
155 RTHEAPPAGEBLOCK aBlocks[RT_FLEXIBLE_ARRAY];
156} RTHEAPPAGEBLOCKALLOCCHUNK;
157AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, bmAlloc, 8);
158AssertCompileMemberAlignment(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks, 64);
159/** Pointer to an allocation chunk of RTHEAPPAGEBLOCKALLOCCHUNK structures. */
160typedef RTHEAPPAGEBLOCKALLOCCHUNK *PRTHEAPPAGEBLOCKALLOCCHUNK;
161
162/** Max number of blocks one RTHEAPPAGEBLOCKALLOCCHUNK can track (896/832). */
163#define RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS ((ARCH_BITS == 32 ? 28 : 26) * 32)
164/** The chunk size for the block allocator. */
165#define RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE _64K
166
167
168/**
169 * Argument package for rtHeapPageAllocCallback.
170 */
171typedef struct RTHEAPPAGEALLOCARGS
172{
173 /** The number of pages to allocate. */
174 size_t cPages;
175 /** Non-null on success. */
176 void *pvAlloc;
177 /** RTMEMPAGEALLOC_F_XXX. */
178 uint32_t fFlags;
179} RTHEAPPAGEALLOCARGS;
180
181
182/*********************************************************************************************************************************
183* Global Variables *
184*********************************************************************************************************************************/
185/** Initialize once structure. */
186static RTONCE g_MemPageHeapInitOnce = RTONCE_INITIALIZER;
187/** The page heap. */
188static RTHEAPPAGE g_MemPageHeap;
189/** The exec page heap. */
190static RTHEAPPAGE g_MemExecHeap;
191
192
193/**
194 * Initializes the heap.
195 *
196 * @returns IPRT status code.
197 * @param pHeap The page heap to initialize.
198 * @param fExec Whether the heap memory should be marked as
199 * executable or not.
200 */
201static int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
202{
203 int rc = RTCritSectInitEx(&pHeap->CritSect,
204 RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
205 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
206 if (RT_SUCCESS(rc))
207 {
208 pHeap->cHeapPages = 0;
209 pHeap->cFreePages = 0;
210 pHeap->cAllocCalls = 0;
211 pHeap->cFreeCalls = 0;
212 pHeap->uLastMinimizeCall = 0;
213 pHeap->BlockTree = NULL;
214 pHeap->fExec = fExec;
215 RTListInit(&pHeap->BlockAllocatorChunks);
216 pHeap->u32Magic = RTHEAPPAGE_MAGIC;
217 }
218 return rc;
219}
220
221
222/**
223 * Deletes the heap and all the memory it tracks.
224 *
225 * @returns IPRT status code.
226 * @param pHeap The page heap to delete.
227 */
228static int RTHeapPageDelete(PRTHEAPPAGE pHeap)
229{
230 NOREF(pHeap);
231 pHeap->u32Magic = ~RTHEAPPAGE_MAGIC;
232 return VINF_SUCCESS;
233}
234
235
236/**
237 * Allocates a RTHEAPPAGEBLOCK.
238 *
239 * @returns Pointer to RTHEAPPAGEBLOCK on success, NULL on failure.
240 * @param pHeap The heap this is for.
241 */
242static PRTHEAPPAGEBLOCK rtHeapPageIntBlockAllocatorAlloc(PRTHEAPPAGE pHeap)
243{
244 /*
245 * Locate a chunk with space and grab a block from it.
246 */
247 PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
248 RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
249 {
250 if (pChunk->cFree > 0)
251 {
252 int idxBlock = ASMBitFirstClear(&pChunk->bmAlloc[0], RT_MIN(RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, pChunk->cBlocks));
253 if (idxBlock >= 0)
254 {
255 ASMBitSet(&pChunk->bmAlloc[0], idxBlock);
256 pChunk->cFree -= 1;
257 return &pChunk->aBlocks[idxBlock];
258 }
259 AssertFailed();
260 }
261 }
262
263 /*
264 * Allocate a new chunk and return the first block in it.
265 */
266 int rc = rtMemPageNativeAlloc(RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE, 0, (void **)&pChunk);
267 AssertRCReturn(rc, NULL);
268 pChunk->cBlocks = (RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE - RT_UOFFSETOF(RTHEAPPAGEBLOCKALLOCCHUNK, aBlocks))
269 / sizeof(pChunk->aBlocks[0]);
270 AssertStmt(pChunk->cBlocks < RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS, pChunk->cBlocks = RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
271 pChunk->cFree = pChunk->cBlocks;
272
273 RT_ZERO(pChunk->bmAlloc);
274 ASMBitSetRange(pChunk->bmAlloc, pChunk->cBlocks, RTHEAPPAGEBLOCKALLOCCHUNK_MAX_BLOCKS);
275 RTListPrepend(&pHeap->BlockAllocatorChunks, &pChunk->ListEntry);
276
277 /*
278 * Allocate the first one.
279 */
280 ASMBitSet(pChunk->bmAlloc, 0);
281 pChunk->cFree -= 1;
282
283 return &pChunk->aBlocks[0];
284}
285
286
287/**
288 * Frees a RTHEAPPAGEBLOCK.
289 *
290 * @param pHeap The heap this is for.
291 * @param pBlock The block to free.
292 */
293static void rtHeapPageIntBlockAllocatorFree(PRTHEAPPAGE pHeap, PRTHEAPPAGEBLOCK pBlock)
294{
295 /*
296 * Locate the chunk the block belongs to and mark it as freed.
297 */
298 PRTHEAPPAGEBLOCKALLOCCHUNK pChunk;
299 RTListForEach(&pHeap->BlockAllocatorChunks, pChunk, RTHEAPPAGEBLOCKALLOCCHUNK, ListEntry)
300 {
301 if ((uintptr_t)pBlock - (uintptr_t)pChunk < RTHEAPPAGEBLOCKALLOCCHUNK_ALLOC_SIZE)
302 {
303 uintptr_t const idxBlock = (uintptr_t)(pBlock - &pChunk->aBlocks[0]);
304 if (ASMBitTestAndClear(&pChunk->bmAlloc[0], idxBlock))
305 pChunk->cFree++;
306 else
307 AssertMsgFailed(("pBlock=%p idxBlock=%#zx\n", pBlock, idxBlock));
308 return;
309 }
310 }
311 AssertFailed();
312}
313
314
315/**
316 * Applies flags to an allocation.
317 *
318 * @return Flags that eeds to be reverted upon free.
319 * @param pv The allocation.
320 * @param cb The size of the allocation (page aligned).
321 * @param fFlags RTMEMPAGEALLOC_F_XXX.
322 */
323DECLINLINE(uint32_t) rtMemPageApplyFlags(void *pv, size_t cb, uint32_t fFlags)
324{
325 uint32_t fHandled = 0;
326 if (fFlags & (RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP))
327 fHandled = rtMemPageNativeApplyFlags(pv, cb, fFlags);
328 if (fFlags & RTMEMPAGEALLOC_F_ZERO)
329 RT_BZERO(pv, cb);
330 return fHandled;
331}
332
333
334/**
335 * Avoids some gotos in rtHeapPageAllocFromBlock.
336 *
337 * @returns VINF_SUCCESS.
338 * @param pBlock The block.
339 * @param iPage The page to start allocating at.
340 * @param cPages The number of pages.
341 * @param fFlags RTMEMPAGEALLOC_F_XXX.
342 * @param ppv Where to return the allocation address.
343 */
344DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, uint32_t fFlags, void **ppv)
345{
346 PRTHEAPPAGE pHeap = pBlock->pHeap;
347
348 ASMBitSet(&pBlock->bmFirst[0], iPage);
349 pBlock->cFreePages -= (uint32_t)cPages;
350 pHeap->cFreePages -= (uint32_t)cPages;
351 if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
352 pHeap->pHint2 = pBlock;
353 pHeap->cAllocCalls++;
354
355 void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
356 *ppv = pv;
357
358 if (fFlags)
359 {
360 uint32_t fHandled = rtMemPageApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
361 Assert(!(fHandled & ~(RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)));
362 if (fHandled & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
363 ASMBitSetRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
364 if (fHandled & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
365 ASMBitSetRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
366 }
367
368 return VINF_SUCCESS;
369}
370
371
372/**
373 * Checks if a page range is free in the specified block.
374 *
375 * @returns @c true if the range is free, @c false if not.
376 * @param pBlock The block.
377 * @param iFirst The first page to check.
378 * @param cPages The number of pages to check.
379 */
380DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
381{
382 uint32_t i = iFirst + cPages;
383 while (i-- > iFirst)
384 {
385 if (ASMBitTest(&pBlock->bmAlloc[0], i))
386 return false;
387 Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
388 }
389 return true;
390}
391
392
393/**
394 * Tries to allocate a chunk of pages from a heap block.
395 *
396 * @retval VINF_SUCCESS on success.
397 * @retval VERR_NO_MEMORY if the allocation failed.
398 * @param pBlock The block to allocate from.
399 * @param cPages The size of the allocation.
400 * @param fFlags RTMEMPAGEALLOC_F_XXX.
401 * @param ppv Where to return the allocation address on success.
402 */
403DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, uint32_t fFlags, void **ppv)
404{
405 if (pBlock->cFreePages >= cPages)
406 {
407 int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT);
408 Assert(iPage >= 0);
409
410 /* special case: single page. */
411 if (cPages == 1)
412 {
413 ASMBitSet(&pBlock->bmAlloc[0], iPage);
414 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
415 }
416
417 while ( iPage >= 0
418 && (unsigned)iPage <= RTMEMPAGE_BLOCK_PAGE_COUNT - cPages)
419 {
420 if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, (uint32_t)cPages - 1))
421 {
422 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
423 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
424 }
425
426 /* next */
427 iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
428 if (iPage < 0 || (unsigned)iPage >= RTMEMPAGE_BLOCK_PAGE_COUNT - 1)
429 break;
430 iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGE_BLOCK_PAGE_COUNT, iPage);
431 }
432 }
433
434 return VERR_NO_MEMORY;
435}
436
437
438/**
439 * RTAvlrPVDoWithAll callback.
440 *
441 * @returns 0 to continue the enum, non-zero to quit it.
442 * @param pNode The node.
443 * @param pvUser The user argument.
444 */
445static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
446{
447 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
448 RTHEAPPAGEALLOCARGS *pArgs = (RTHEAPPAGEALLOCARGS *)pvUser;
449 int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fFlags, &pArgs->pvAlloc);
450 return RT_SUCCESS(rc) ? 1 : 0;
451}
452
453
454/**
455 * Worker for RTHeapPageAlloc.
456 *
457 * @returns IPRT status code
458 * @param pHeap The heap - locked.
459 * @param cPages The page count.
460 * @param pszTag The tag.
461 * @param fFlags RTMEMPAGEALLOC_F_XXX.
462 * @param ppv Where to return the address of the allocation
463 * on success.
464 */
465static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
466{
467 int rc;
468 NOREF(pszTag);
469
470 /*
471 * Use the hints first.
472 */
473 if (pHeap->pHint1)
474 {
475 rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fFlags, ppv);
476 if (rc != VERR_NO_MEMORY)
477 return rc;
478 }
479 if (pHeap->pHint2)
480 {
481 rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fFlags, ppv);
482 if (rc != VERR_NO_MEMORY)
483 return rc;
484 }
485
486 /*
487 * Search the heap for a block with enough free space.
488 *
489 * N.B. This search algorithm is not optimal at all. What (hopefully) saves
490 * it are the two hints above.
491 */
492 if (pHeap->cFreePages >= cPages)
493 {
494 RTHEAPPAGEALLOCARGS Args;
495 Args.cPages = cPages;
496 Args.pvAlloc = NULL;
497 Args.fFlags = fFlags;
498 RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
499 if (Args.pvAlloc)
500 {
501 *ppv = Args.pvAlloc;
502 return VINF_SUCCESS;
503 }
504 }
505
506 /*
507 * Didn't find anything, so expand the heap with a new block.
508 */
509 PRTHEAPPAGEBLOCK const pBlock = rtHeapPageIntBlockAllocatorAlloc(pHeap);
510 AssertReturn(pBlock, VERR_NO_MEMORY);
511
512 RTCritSectLeave(&pHeap->CritSect);
513
514 void *pvPages = NULL;
515 rc = rtMemPageNativeAlloc(RTMEMPAGE_BLOCK_SIZE, pHeap->fExec ? RTMEMPAGEALLOC_F_EXECUTABLE : 0, &pvPages);
516
517 RTCritSectEnter(&pHeap->CritSect);
518 if (RT_FAILURE(rc))
519 {
520 rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
521 return rc;
522 }
523
524 RT_ZERO(*pBlock);
525 pBlock->Core.Key = pvPages;
526 pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGE_BLOCK_SIZE - 1;
527 pBlock->cFreePages = RTMEMPAGE_BLOCK_PAGE_COUNT;
528 pBlock->pHeap = pHeap;
529
530 bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
531 pHeap->cFreePages += RTMEMPAGE_BLOCK_PAGE_COUNT;
532 pHeap->cHeapPages += RTMEMPAGE_BLOCK_PAGE_COUNT;
533
534 /*
535 * Grab memory from the new block (cannot fail).
536 */
537 rc = rtHeapPageAllocFromBlock(pBlock, cPages, fFlags, ppv);
538 Assert(rc == VINF_SUCCESS);
539
540 return rc;
541}
542
543
544/**
545 * Allocates one or more pages off the heap.
546 *
547 * @returns IPRT status code.
548 * @param pHeap The page heap.
549 * @param cPages The number of pages to allocate.
550 * @param pszTag The allocation tag.
551 * @param fFlags RTMEMPAGEALLOC_F_XXX.
552 * @param ppv Where to return the pointer to the pages.
553 */
554static int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
555{
556 /*
557 * Validate input.
558 */
559 AssertPtr(ppv);
560 *ppv = NULL;
561 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
562 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
563 AssertMsgReturn(cPages < RTMEMPAGE_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
564
565 /*
566 * Grab the lock and call a worker with many returns.
567 */
568 int rc = RTCritSectEnter(&pHeap->CritSect);
569 if (RT_SUCCESS(rc))
570 {
571 rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fFlags, ppv);
572 RTCritSectLeave(&pHeap->CritSect);
573 }
574
575 return rc;
576}
577
578
579/**
580 * RTAvlrPVDoWithAll callback.
581 *
582 * @returns 0 to continue the enum, non-zero to quit it.
583 * @param pNode The node.
584 * @param pvUser Pointer to a block pointer variable. For returning
585 * the address of the block to be freed.
586 */
587static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
588{
589 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
590 if (pBlock->cFreePages == RTMEMPAGE_BLOCK_PAGE_COUNT)
591 {
592 *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
593 return 1;
594 }
595 return 0;
596}
597
598
599/**
600 * Frees an allocation.
601 *
602 * @returns IPRT status code.
603 * @retval VERR_NOT_FOUND if pv isn't within any of the memory blocks in the
604 * heap.
605 * @retval VERR_INVALID_POINTER if the given memory range isn't exactly one
606 * allocation block.
607 * @param pHeap The page heap.
608 * @param pv Pointer to what RTHeapPageAlloc returned.
609 * @param cPages The number of pages that was allocated.
610 */
611static int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
612{
613 /*
614 * Validate input.
615 */
616 if (!pv)
617 return VINF_SUCCESS;
618 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
619 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
620
621 /*
622 * Grab the lock and look up the page.
623 */
624 int rc = RTCritSectEnter(&pHeap->CritSect);
625 if (RT_SUCCESS(rc))
626 {
627 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
628 if (pBlock)
629 {
630 /*
631 * Validate the specified address range.
632 */
633 uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
634 /* Check the range is within the block. */
635 bool fOk = iPage + cPages <= RTMEMPAGE_BLOCK_PAGE_COUNT;
636 /* Check that it's the start of an allocation. */
637 fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
638 /* Check that the range ends at an allocation boundrary. */
639 fOk = fOk && ( iPage + cPages == RTMEMPAGE_BLOCK_PAGE_COUNT
640 || ASMBitTest(&pBlock->bmFirst[0], iPage + (uint32_t)cPages)
641 || !ASMBitTest(&pBlock->bmAlloc[0], iPage + (uint32_t)cPages));
642 /* Check the other pages. */
643 uint32_t const iLastPage = iPage + (uint32_t)cPages - 1;
644 for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
645 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
646 && !ASMBitTest(&pBlock->bmFirst[0], i);
647 if (fOk)
648 {
649 /*
650 * Free the memory.
651 */
652 uint32_t fRevert = (ASMBitTest(&pBlock->bmLockedAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_LOCKED : 0)
653 | (ASMBitTest(&pBlock->bmNoDumpAdviced[0], iPage) ? RTMEMPAGEALLOC_F_ADVISE_NO_DUMP : 0);
654 if (fRevert)
655 {
656 rtMemPageNativeRevertFlags(pv, cPages << PAGE_SHIFT, fRevert);
657 ASMBitClearRange(&pBlock->bmLockedAdviced[0], iPage, iPage + cPages);
658 ASMBitClearRange(&pBlock->bmNoDumpAdviced[0], iPage, iPage + cPages);
659 }
660 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
661 ASMBitClear(&pBlock->bmFirst[0], iPage);
662 pBlock->cFreePages += (uint32_t)cPages;
663 pHeap->cFreePages += (uint32_t)cPages;
664 pHeap->cFreeCalls++;
665 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
666 pHeap->pHint1 = pBlock;
667
668 /** @todo Add bitmaps for tracking madvice and mlock so we can undo those. */
669
670 /*
671 * Shrink the heap. Not very efficient because of the AVL tree.
672 */
673 if ( pHeap->cFreePages >= RTMEMPAGE_BLOCK_PAGE_COUNT * 3
674 && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
675 && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGE_BLOCK_PAGE_COUNT
676 )
677 {
678 uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
679 while (pHeap->cFreePages > cFreePageTarget)
680 {
681 pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
682
683 pBlock = NULL;
684 RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
685 rtHeapPageFindUnusedBlockCallback, &pBlock);
686 if (!pBlock)
687 break;
688
689 void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
690 pHeap->cHeapPages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
691 pHeap->cFreePages -= RTMEMPAGE_BLOCK_PAGE_COUNT;
692 pHeap->pHint1 = NULL;
693 pHeap->pHint2 = NULL;
694 RTCritSectLeave(&pHeap->CritSect);
695
696 rtMemPageNativeFree(pBlock->Core.Key, RTMEMPAGE_BLOCK_SIZE);
697 pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
698 pBlock->cFreePages = 0;
699 rtHeapPageIntBlockAllocatorFree(pHeap, pBlock);
700
701 RTCritSectEnter(&pHeap->CritSect);
702 }
703 }
704 }
705 else
706 rc = VERR_INVALID_POINTER;
707 }
708 else
709 rc = VERR_NOT_FOUND; /* Distinct return code for this so RTMemPageFree and others can try alternative heaps. */
710
711 RTCritSectLeave(&pHeap->CritSect);
712 }
713
714 return rc;
715}
716
717
718/**
719 * Initializes the heap.
720 *
721 * @returns IPRT status code
722 * @param pvUser Unused.
723 */
724static DECLCALLBACK(int) rtMemPageInitOnce(void *pvUser)
725{
726 NOREF(pvUser);
727 int rc = RTHeapPageInit(&g_MemPageHeap, false /*fExec*/);
728 if (RT_SUCCESS(rc))
729 {
730 rc = RTHeapPageInit(&g_MemExecHeap, true /*fExec*/);
731 if (RT_SUCCESS(rc))
732 return rc;
733 RTHeapPageDelete(&g_MemPageHeap);
734 }
735 return rc;
736}
737
738
739/**
740 * Allocates memory from the specified heap.
741 *
742 * @returns Address of the allocated memory.
743 * @param cb The number of bytes to allocate.
744 * @param pszTag The tag.
745 * @param fFlags RTMEMPAGEALLOC_F_XXX.
746 * @param pHeap The heap to use.
747 */
748static void *rtMemPageAllocInner(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
749{
750 /*
751 * Validate & adjust the input.
752 */
753 Assert(cb > 0);
754 NOREF(pszTag);
755 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
756
757 /*
758 * If the allocation is relatively large, we use mmap/VirtualAlloc/DosAllocMem directly.
759 */
760 void *pv = NULL; /* shut up gcc */
761 if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
762 {
763 int rc = rtMemPageNativeAlloc(cb, fFlags, &pv);
764 if (RT_SUCCESS(rc))
765 {
766 AssertPtr(pv);
767
768 if (fFlags)
769 rtMemPageApplyFlags(pv, cb, fFlags);
770 }
771 else
772 pv = NULL;
773 }
774 else
775 {
776 int rc = RTOnce(&g_MemPageHeapInitOnce, rtMemPageInitOnce, NULL);
777 if (RT_SUCCESS(rc))
778 rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fFlags, &pv);
779 if (RT_FAILURE(rc))
780 pv = NULL;
781 }
782
783 return pv;
784}
785
786
787RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
788{
789 return rtMemPageAllocInner(cb, pszTag, 0, &g_MemPageHeap);
790}
791
792
793RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
794{
795 return rtMemPageAllocInner(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPageHeap);
796}
797
798
799RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
800{
801 AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
802 return rtMemPageAllocInner(cb, pszTag, fFlags,
803 !(fFlags & RTMEMPAGEALLOC_F_EXECUTABLE) ? &g_MemPageHeap : &g_MemExecHeap);
804}
805
806
807RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
808{
809 /*
810 * Validate & adjust the input.
811 */
812 if (!pv)
813 return;
814 AssertPtr(pv);
815 Assert(cb > 0);
816 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
817 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
818
819 /*
820 * If the allocation is relatively large, we used mmap/VirtualAlloc/DosAllocMem directly.
821 */
822 if (cb >= RTMEMPAGE_NATIVE_THRESHOLD)
823 rtMemPageNativeFree(pv, cb);
824 else
825 {
826 int rc = RTHeapPageFree(&g_MemPageHeap, pv, cb >> PAGE_SHIFT);
827 if (rc == VERR_NOT_FOUND)
828 rc = RTHeapPageFree(&g_MemExecHeap, pv, cb >> PAGE_SHIFT);
829 AssertRC(rc);
830 }
831}
832
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette