VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/rtmempage-exec-mmap-heap-posix.cpp@ 86186

Last change on this file since 86186 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 25.2 KB
Line 
1/* $Id: rtmempage-exec-mmap-heap-posix.cpp 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * IPRT - RTMemPage*, POSIX with heap.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "internal/iprt.h"
32#include <iprt/mem.h>
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/avl.h>
37#include <iprt/critsect.h>
38#include <iprt/errcore.h>
39#include <iprt/once.h>
40#include <iprt/param.h>
41#include <iprt/string.h>
42#include "internal/mem.h"
43#include "../alloc-ef.h"
44
45#include <stdlib.h>
46#include <errno.h>
47#include <sys/mman.h>
48#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
49# define MAP_ANONYMOUS MAP_ANON
50#endif
51
52
53/*********************************************************************************************************************************
54* Defined Constants And Macros *
55*********************************************************************************************************************************/
56/** Threshold at which to we switch to simply calling mmap. */
57#define RTMEMPAGEPOSIX_MMAP_THRESHOLD _128K
58/** The size of a heap block (power of two) - in bytes. */
59#define RTMEMPAGEPOSIX_BLOCK_SIZE _2M
60AssertCompile(RTMEMPAGEPOSIX_BLOCK_SIZE == (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE) * PAGE_SIZE);
61/** The number of pages per heap block. */
62#define RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE)
63
64
65/*********************************************************************************************************************************
66* Structures and Typedefs *
67*********************************************************************************************************************************/
68/** Pointer to a page heap block. */
69typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
70
71/**
72 * A simple page heap.
73 */
74typedef struct RTHEAPPAGE
75{
76 /** Magic number (RTHEAPPAGE_MAGIC). */
77 uint32_t u32Magic;
78 /** The number of pages in the heap (in BlockTree). */
79 uint32_t cHeapPages;
80 /** The number of currently free pages. */
81 uint32_t cFreePages;
82 /** Number of successful calls. */
83 uint32_t cAllocCalls;
84 /** Number of successful free calls. */
85 uint32_t cFreeCalls;
86 /** The free call number at which we last tried to minimize the heap. */
87 uint32_t uLastMinimizeCall;
88 /** Tree of heap blocks. */
89 AVLRPVTREE BlockTree;
90 /** Allocation hint no 1 (last freed). */
91 PRTHEAPPAGEBLOCK pHint1;
92 /** Allocation hint no 2 (last alloc). */
93 PRTHEAPPAGEBLOCK pHint2;
94 /** Critical section protecting the heap. */
95 RTCRITSECT CritSect;
96 /** Set if the memory must allocated with execute access. */
97 bool fExec;
98} RTHEAPPAGE;
99#define RTHEAPPAGE_MAGIC UINT32_C(0xfeedface)
100/** Pointer to a page heap. */
101typedef RTHEAPPAGE *PRTHEAPPAGE;
102
103
104/**
105 * Describes a page heap block.
106 */
107typedef struct RTHEAPPAGEBLOCK
108{
109 /** The AVL tree node core (void pointer range). */
110 AVLRPVNODECORE Core;
111 /** Allocation bitmap. Set bits marks allocated pages. */
112 uint32_t bmAlloc[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
113 /** Allocation boundrary bitmap. Set bits marks the start of
114 * allocations. */
115 uint32_t bmFirst[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
116 /** The number of free pages. */
117 uint32_t cFreePages;
118 /** Pointer back to the heap. */
119 PRTHEAPPAGE pHeap;
120} RTHEAPPAGEBLOCK;
121
122
123/**
124 * Argument package for rtHeapPageAllocCallback.
125 */
126typedef struct RTHEAPPAGEALLOCARGS
127{
128 /** The number of pages to allocate. */
129 size_t cPages;
130 /** Non-null on success. */
131 void *pvAlloc;
132 /** RTMEMPAGEALLOC_F_XXX. */
133 uint32_t fFlags;
134} RTHEAPPAGEALLOCARGS;
135
136
137/*********************************************************************************************************************************
138* Global Variables *
139*********************************************************************************************************************************/
140/** Initialize once structure. */
141static RTONCE g_MemPagePosixInitOnce = RTONCE_INITIALIZER;
142/** The page heap. */
143static RTHEAPPAGE g_MemPagePosixHeap;
144/** The exec page heap. */
145static RTHEAPPAGE g_MemExecPosixHeap;
146
147
148#ifdef RT_OS_OS2
149/*
150 * A quick mmap/munmap mockup for avoid duplicating lots of good code.
151 */
152# define INCL_BASE
153# include <os2.h>
154# undef MAP_PRIVATE
155# define MAP_PRIVATE 0
156# undef MAP_ANONYMOUS
157# define MAP_ANONYMOUS 0
158# undef MAP_FAILED
159# define MAP_FAILED (void *)-1
160# undef mmap
161# define mmap iprt_mmap
162# undef munmap
163# define munmap iprt_munmap
164
165static void *mmap(void *pvWhere, size_t cb, int fProt, int fFlags, int fd, off_t off)
166{
167 NOREF(pvWhere); NOREF(fd); NOREF(off);
168 void *pv = NULL;
169 ULONG fAlloc = OBJ_ANY | PAG_COMMIT;
170 if (fProt & PROT_EXEC)
171 fAlloc |= PAG_EXECUTE;
172 if (fProt & PROT_READ)
173 fAlloc |= PAG_READ;
174 if (fProt & PROT_WRITE)
175 fAlloc |= PAG_WRITE;
176 APIRET rc = DosAllocMem(&pv, cb, fAlloc);
177 if (rc == NO_ERROR)
178 return pv;
179 errno = ENOMEM;
180 return MAP_FAILED;
181}
182
183static int munmap(void *pv, size_t cb)
184{
185 APIRET rc = DosFreeMem(pv);
186 if (rc == NO_ERROR)
187 return 0;
188 errno = EINVAL;
189 return -1;
190}
191
192#endif
193
194/**
195 * Initializes the heap.
196 *
197 * @returns IPRT status code.
198 * @param pHeap The page heap to initialize.
199 * @param fExec Whether the heap memory should be marked as
200 * executable or not.
201 */
202int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
203{
204 int rc = RTCritSectInitEx(&pHeap->CritSect,
205 RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
206 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
207 if (RT_SUCCESS(rc))
208 {
209 pHeap->cHeapPages = 0;
210 pHeap->cFreePages = 0;
211 pHeap->cAllocCalls = 0;
212 pHeap->cFreeCalls = 0;
213 pHeap->uLastMinimizeCall = 0;
214 pHeap->BlockTree = NULL;
215 pHeap->fExec = fExec;
216 pHeap->u32Magic = RTHEAPPAGE_MAGIC;
217 }
218 return rc;
219}
220
221
222/**
223 * Deletes the heap and all the memory it tracks.
224 *
225 * @returns IPRT status code.
226 * @param pHeap The page heap to delete.
227 */
228int RTHeapPageDelete(PRTHEAPPAGE pHeap)
229{
230 NOREF(pHeap);
231 return VERR_NOT_IMPLEMENTED;
232}
233
234
235/**
236 * Applies flags to an allocation.
237 *
238 * @param pv The allocation.
239 * @param cb The size of the allocation (page aligned).
240 * @param fFlags RTMEMPAGEALLOC_F_XXX.
241 */
242DECLINLINE(void) rtMemPagePosixApplyFlags(void *pv, size_t cb, uint32_t fFlags)
243{
244#ifndef RT_OS_OS2
245 if (fFlags & RTMEMPAGEALLOC_F_ADVISE_LOCKED)
246 {
247 int rc = mlock(pv, cb);
248# ifndef RT_OS_SOLARIS /* mlock(3C) on Solaris requires the priv_lock_memory privilege */
249 AssertMsg(rc == 0, ("mlock %p LB %#zx -> %d errno=%d\n", pv, cb, rc, errno));
250# endif
251 NOREF(rc);
252 }
253
254# ifdef MADV_DONTDUMP
255 if (fFlags & RTMEMPAGEALLOC_F_ADVISE_NO_DUMP)
256 {
257 int rc = madvise(pv, cb, MADV_DONTDUMP);
258 AssertMsg(rc == 0, ("madvice %p LB %#zx MADV_DONTDUMP -> %d errno=%d\n", pv, cb, rc, errno));
259 NOREF(rc);
260 }
261# endif
262#endif
263
264 if (fFlags & RTMEMPAGEALLOC_F_ZERO)
265 RT_BZERO(pv, cb);
266}
267
268
269/**
270 * Avoids some gotos in rtHeapPageAllocFromBlock.
271 *
272 * @returns VINF_SUCCESS.
273 * @param pBlock The block.
274 * @param iPage The page to start allocating at.
275 * @param cPages The number of pages.
276 * @param fFlags RTMEMPAGEALLOC_F_XXX.
277 * @param ppv Where to return the allocation address.
278 */
279DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, uint32_t fFlags, void **ppv)
280{
281 PRTHEAPPAGE pHeap = pBlock->pHeap;
282
283 ASMBitSet(&pBlock->bmFirst[0], iPage);
284 pBlock->cFreePages -= cPages;
285 pHeap->cFreePages -= cPages;
286 if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
287 pHeap->pHint2 = pBlock;
288 pHeap->cAllocCalls++;
289
290 void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
291 *ppv = pv;
292
293 if (fFlags)
294 rtMemPagePosixApplyFlags(pv, cPages << PAGE_SHIFT, fFlags);
295
296 return VINF_SUCCESS;
297}
298
299
300/**
301 * Checks if a page range is free in the specified block.
302 *
303 * @returns @c true if the range is free, @c false if not.
304 * @param pBlock The block.
305 * @param iFirst The first page to check.
306 * @param cPages The number of pages to check.
307 */
308DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
309{
310 uint32_t i = iFirst + cPages;
311 while (i-- > iFirst)
312 {
313 if (ASMBitTest(&pBlock->bmAlloc[0], i))
314 return false;
315 Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
316 }
317 return true;
318}
319
320
321/**
322 * Tries to allocate a chunk of pages from a heap block.
323 *
324 * @retval VINF_SUCCESS on success.
325 * @retval VERR_NO_MEMORY if the allocation failed.
326 * @param pBlock The block to allocate from.
327 * @param cPages The size of the allocation.
328 * @param fFlags RTMEMPAGEALLOC_F_XXX.
329 * @param ppv Where to return the allocation address on success.
330 */
331DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, uint32_t fFlags, void **ppv)
332{
333 if (pBlock->cFreePages >= cPages)
334 {
335 int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT);
336 Assert(iPage >= 0);
337
338 /* special case: single page. */
339 if (cPages == 1)
340 {
341 ASMBitSet(&pBlock->bmAlloc[0], iPage);
342 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
343 }
344
345 while ( iPage >= 0
346 && (unsigned)iPage <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - cPages)
347 {
348 if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, cPages - 1))
349 {
350 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
351 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fFlags, ppv);
352 }
353
354 /* next */
355 iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
356 if (iPage < 0 || iPage >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - 1)
357 break;
358 iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
359 }
360 }
361
362 return VERR_NO_MEMORY;
363}
364
365
366/**
367 * RTAvlrPVDoWithAll callback.
368 *
369 * @returns 0 to continue the enum, non-zero to quit it.
370 * @param pNode The node.
371 * @param pvUser The user argument.
372 */
373static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
374{
375 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
376 RTHEAPPAGEALLOCARGS *pArgs = (RTHEAPPAGEALLOCARGS *)pvUser;
377 int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fFlags, &pArgs->pvAlloc);
378 return RT_SUCCESS(rc) ? 1 : 0;
379}
380
381
382/**
383 * Worker for RTHeapPageAlloc.
384 *
385 * @returns IPRT status code
386 * @param pHeap The heap - locked.
387 * @param cPages The page count.
388 * @param pszTag The tag.
389 * @param fFlags RTMEMPAGEALLOC_F_XXX.
390 * @param ppv Where to return the address of the allocation
391 * on success.
392 */
393static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
394{
395 int rc;
396 NOREF(pszTag);
397
398 /*
399 * Use the hints first.
400 */
401 if (pHeap->pHint1)
402 {
403 rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fFlags, ppv);
404 if (rc != VERR_NO_MEMORY)
405 return rc;
406 }
407 if (pHeap->pHint2)
408 {
409 rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fFlags, ppv);
410 if (rc != VERR_NO_MEMORY)
411 return rc;
412 }
413
414 /*
415 * Search the heap for a block with enough free space.
416 *
417 * N.B. This search algorithm is not optimal at all. What (hopefully) saves
418 * it are the two hints above.
419 */
420 if (pHeap->cFreePages >= cPages)
421 {
422 RTHEAPPAGEALLOCARGS Args;
423 Args.cPages = cPages;
424 Args.pvAlloc = NULL;
425 Args.fFlags = fFlags;
426 RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
427 if (Args.pvAlloc)
428 {
429 *ppv = Args.pvAlloc;
430 return VINF_SUCCESS;
431 }
432 }
433
434 /*
435 * Didn't find anytyhing, so expand the heap with a new block.
436 */
437 RTCritSectLeave(&pHeap->CritSect);
438 void *pvPages;
439 pvPages = mmap(NULL, RTMEMPAGEPOSIX_BLOCK_SIZE,
440 PROT_READ | PROT_WRITE | (pHeap->fExec ? PROT_EXEC : 0),
441 MAP_PRIVATE | MAP_ANONYMOUS,
442 -1, 0);
443 if (pvPages == MAP_FAILED)
444 {
445 RTCritSectEnter(&pHeap->CritSect);
446 return RTErrConvertFromErrno(errno);
447
448 }
449 /** @todo Eliminate this rtMemBaseAlloc dependency! */
450 PRTHEAPPAGEBLOCK pBlock;
451#ifdef RTALLOC_REPLACE_MALLOC
452 if (g_pfnOrgMalloc)
453 pBlock = (PRTHEAPPAGEBLOCK)g_pfnOrgMalloc(sizeof(*pBlock));
454 else
455#endif
456 pBlock = (PRTHEAPPAGEBLOCK)rtMemBaseAlloc(sizeof(*pBlock));
457 if (!pBlock)
458 {
459 munmap(pvPages, RTMEMPAGEPOSIX_BLOCK_SIZE);
460 RTCritSectEnter(&pHeap->CritSect);
461 return VERR_NO_MEMORY;
462 }
463
464 RT_ZERO(*pBlock);
465 pBlock->Core.Key = pvPages;
466 pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGEPOSIX_BLOCK_SIZE - 1;
467 pBlock->cFreePages = RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
468 pBlock->pHeap = pHeap;
469
470 RTCritSectEnter(&pHeap->CritSect);
471
472 bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
473 pHeap->cFreePages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
474 pHeap->cHeapPages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
475
476 /*
477 * Grab memory from the new block (cannot fail).
478 */
479 rc = rtHeapPageAllocFromBlock(pBlock, cPages, fFlags, ppv);
480 Assert(rc == VINF_SUCCESS);
481
482 return rc;
483}
484
485
486/**
487 * Allocates one or more pages off the heap.
488 *
489 * @returns IPRT status code.
490 * @param pHeap The page heap.
491 * @param cPages The number of pages to allocate.
492 * @param pszTag The allocation tag.
493 * @param fFlags RTMEMPAGEALLOC_F_XXX.
494 * @param ppv Where to return the pointer to the pages.
495 */
496int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, uint32_t fFlags, void **ppv)
497{
498 /*
499 * Validate input.
500 */
501 AssertPtr(ppv);
502 *ppv = NULL;
503 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
504 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
505 AssertMsgReturn(cPages < RTMEMPAGEPOSIX_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
506
507 /*
508 * Grab the lock and call a worker with many returns.
509 */
510 int rc = RTCritSectEnter(&pHeap->CritSect);
511 if (RT_SUCCESS(rc))
512 {
513 rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fFlags, ppv);
514 RTCritSectLeave(&pHeap->CritSect);
515 }
516
517 return rc;
518}
519
520
521/**
522 * RTAvlrPVDoWithAll callback.
523 *
524 * @returns 0 to continue the enum, non-zero to quit it.
525 * @param pNode The node.
526 * @param pvUser Pointer to a block pointer variable. For returning
527 * the address of the block to be freed.
528 */
529static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
530{
531 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
532 if (pBlock->cFreePages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT)
533 {
534 *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
535 return 1;
536 }
537 return 0;
538}
539
540
541/**
542 * Allocates one or more pages off the heap.
543 *
544 * @returns IPRT status code.
545 * @param pHeap The page heap.
546 * @param pv Pointer to what RTHeapPageAlloc returned.
547 * @param cPages The number of pages that was allocated.
548 */
549int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
550{
551 /*
552 * Validate input.
553 */
554 if (!pv)
555 return VINF_SUCCESS;
556 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
557 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
558
559 /*
560 * Grab the lock and look up the page.
561 */
562 int rc = RTCritSectEnter(&pHeap->CritSect);
563 if (RT_SUCCESS(rc))
564 {
565 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
566 if (pBlock)
567 {
568 /*
569 * Validate the specified address range.
570 */
571 uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
572 /* Check the range is within the block. */
573 bool fOk = iPage + cPages <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
574 /* Check that it's the start of an allocation. */
575 fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
576 /* Check that the range ends at an allocation boundrary. */
577 fOk = fOk && ( iPage + cPages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
578 || ASMBitTest(&pBlock->bmFirst[0], iPage + cPages)
579 || !ASMBitTest(&pBlock->bmAlloc[0], iPage + cPages));
580 /* Check the other pages. */
581 uint32_t const iLastPage = iPage + cPages - 1;
582 for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
583 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
584 && !ASMBitTest(&pBlock->bmFirst[0], i);
585 if (fOk)
586 {
587 /*
588 * Free the memory.
589 */
590 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
591 ASMBitClear(&pBlock->bmFirst[0], iPage);
592 pBlock->cFreePages += cPages;
593 pHeap->cFreePages += cPages;
594 pHeap->cFreeCalls++;
595 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
596 pHeap->pHint1 = pBlock;
597
598 /** @todo Add bitmaps for tracking madvice and mlock so we can undo those. */
599
600 /*
601 * Shrink the heap. Not very efficient because of the AVL tree.
602 */
603 if ( pHeap->cFreePages >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT * 3
604 && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
605 && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
606 )
607 {
608 uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
609 while (pHeap->cFreePages > cFreePageTarget)
610 {
611 pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
612
613 pBlock = NULL;
614 RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
615 rtHeapPageFindUnusedBlockCallback, &pBlock);
616 if (!pBlock)
617 break;
618
619 void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
620 pHeap->cHeapPages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
621 pHeap->cFreePages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
622 pHeap->pHint1 = NULL;
623 pHeap->pHint2 = NULL;
624 RTCritSectLeave(&pHeap->CritSect);
625
626 munmap(pBlock->Core.Key, RTMEMPAGEPOSIX_BLOCK_SIZE);
627 pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
628 pBlock->cFreePages = 0;
629#ifdef RTALLOC_REPLACE_MALLOC
630 if (g_pfnOrgFree)
631 g_pfnOrgFree(pBlock);
632 else
633#endif
634 rtMemBaseFree(pBlock);
635
636 RTCritSectEnter(&pHeap->CritSect);
637 }
638 }
639 }
640 else
641 rc = VERR_INVALID_POINTER;
642 }
643 else
644 rc = VERR_INVALID_POINTER;
645
646 RTCritSectLeave(&pHeap->CritSect);
647 }
648
649 return rc;
650}
651
652
653/**
654 * Initializes the heap.
655 *
656 * @returns IPRT status code
657 * @param pvUser Unused.
658 */
659static DECLCALLBACK(int) rtMemPagePosixInitOnce(void *pvUser)
660{
661 NOREF(pvUser);
662 int rc = RTHeapPageInit(&g_MemPagePosixHeap, false /*fExec*/);
663 if (RT_SUCCESS(rc))
664 {
665 rc = RTHeapPageInit(&g_MemExecPosixHeap, true /*fExec*/);
666 if (RT_SUCCESS(rc))
667 return rc;
668 RTHeapPageDelete(&g_MemPagePosixHeap);
669 }
670 return rc;
671}
672
673
674/**
675 * Allocates memory from the specified heap.
676 *
677 * @returns Address of the allocated memory.
678 * @param cb The number of bytes to allocate.
679 * @param pszTag The tag.
680 * @param fFlags RTMEMPAGEALLOC_F_XXX.
681 * @param pHeap The heap to use.
682 */
683static void *rtMemPagePosixAlloc(size_t cb, const char *pszTag, uint32_t fFlags, PRTHEAPPAGE pHeap)
684{
685 /*
686 * Validate & adjust the input.
687 */
688 Assert(cb > 0);
689 NOREF(pszTag);
690 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
691
692 /*
693 * If the allocation is relatively large, we use mmap/munmap directly.
694 */
695 void *pv;
696 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
697 {
698
699 pv = mmap(NULL, cb,
700 PROT_READ | PROT_WRITE | (pHeap == &g_MemExecPosixHeap ? PROT_EXEC : 0),
701 MAP_PRIVATE | MAP_ANONYMOUS,
702 -1, 0);
703 if (pv != MAP_FAILED)
704 {
705 AssertPtr(pv);
706
707 if (fFlags)
708 rtMemPagePosixApplyFlags(pv, cb, fFlags);
709 }
710 else
711 pv = NULL;
712 }
713 else
714 {
715 int rc = RTOnce(&g_MemPagePosixInitOnce, rtMemPagePosixInitOnce, NULL);
716 if (RT_SUCCESS(rc))
717 rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fFlags, &pv);
718 if (RT_FAILURE(rc))
719 pv = NULL;
720 }
721
722 return pv;
723}
724
725
726/**
727 * Free memory allocated by rtMemPagePosixAlloc.
728 *
729 * @param pv The address of the memory to free.
730 * @param cb The size.
731 * @param pHeap The heap.
732 */
733static void rtMemPagePosixFree(void *pv, size_t cb, PRTHEAPPAGE pHeap)
734{
735 /*
736 * Validate & adjust the input.
737 */
738 if (!pv)
739 return;
740 AssertPtr(pv);
741 Assert(cb > 0);
742 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
743 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
744
745 /*
746 * If the allocation is relatively large, we use mmap/munmap directly.
747 */
748 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
749 {
750 int rc = munmap(pv, cb);
751 AssertMsg(rc == 0, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb)); NOREF(rc);
752 }
753 else
754 {
755 int rc = RTHeapPageFree(pHeap, pv, cb >> PAGE_SHIFT);
756 AssertRC(rc);
757 }
758}
759
760
761
762
763
764RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
765{
766 return rtMemPagePosixAlloc(cb, pszTag, 0, &g_MemPagePosixHeap);
767}
768
769
770RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
771{
772 return rtMemPagePosixAlloc(cb, pszTag, RTMEMPAGEALLOC_F_ZERO, &g_MemPagePosixHeap);
773}
774
775
776RTDECL(void *) RTMemPageAllocExTag(size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
777{
778 AssertReturn(!(fFlags & ~RTMEMPAGEALLOC_F_VALID_MASK), NULL);
779 return rtMemPagePosixAlloc(cb, pszTag, fFlags, &g_MemPagePosixHeap);
780}
781
782
783RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW_DEF
784{
785 return rtMemPagePosixFree(pv, cb, &g_MemPagePosixHeap);
786}
787
788
789
790
791
792RTDECL(void *) RTMemExecAllocTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
793{
794 return rtMemPagePosixAlloc(cb, pszTag, 0, &g_MemExecPosixHeap);
795}
796
797
798RTDECL(void) RTMemExecFree(void *pv, size_t cb) RT_NO_THROW_DEF
799{
800 return rtMemPagePosixFree(pv, cb, &g_MemExecPosixHeap);
801}
802
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette