VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/rtmempage-exec-mmap-heap-posix.cpp@ 40305

Last change on this file since 40305 was 40305, checked in by vboxsync, 13 years ago

IPRT: Fixed unresolved symbols on OS/2.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.0 KB
Line 
1/* $Id: rtmempage-exec-mmap-heap-posix.cpp 40305 2012-02-29 21:34:36Z vboxsync $ */
2/** @file
3 * IPRT - RTMemPage*, POSIX with heap.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "internal/iprt.h"
32#include <iprt/mem.h>
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/avl.h>
37#include <iprt/critsect.h>
38#include <iprt/err.h>
39#include <iprt/once.h>
40#include <iprt/param.h>
41#include <iprt/string.h>
42#include "internal/mem.h"
43
44#include <stdlib.h>
45#include <errno.h>
46#include <sys/mman.h>
47#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
48# define MAP_ANONYMOUS MAP_ANON
49#endif
50
51
52/*******************************************************************************
53* Defined Constants And Macros *
54*******************************************************************************/
55/** Threshold at which to we switch to simply calling mmap. */
56#define RTMEMPAGEPOSIX_MMAP_THRESHOLD _128K
57/** The size of a heap block (power of two) - in bytes. */
58#define RTMEMPAGEPOSIX_BLOCK_SIZE _2M
59AssertCompile(RTMEMPAGEPOSIX_BLOCK_SIZE == (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE) * PAGE_SIZE);
60/** The number of pages per heap block. */
61#define RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT (RTMEMPAGEPOSIX_BLOCK_SIZE / PAGE_SIZE)
62
63
64/*******************************************************************************
65* Structures and Typedefs *
66*******************************************************************************/
67/** Pointer to a page heap block. */
68typedef struct RTHEAPPAGEBLOCK *PRTHEAPPAGEBLOCK;
69
70/**
71 * A simple page heap.
72 */
73typedef struct RTHEAPPAGE
74{
75 /** Magic number (RTHEAPPAGE_MAGIC). */
76 uint32_t u32Magic;
77 /** The number of pages in the heap (in BlockTree). */
78 uint32_t cHeapPages;
79 /** The number of currently free pages. */
80 uint32_t cFreePages;
81 /** Number of successful calls. */
82 uint32_t cAllocCalls;
83 /** Number of successful free calls. */
84 uint32_t cFreeCalls;
85 /** The free call number at which we last tried to minimize the heap. */
86 uint32_t uLastMinimizeCall;
87 /** Tree of heap blocks. */
88 AVLRPVTREE BlockTree;
89 /** Allocation hint no 1 (last freed). */
90 PRTHEAPPAGEBLOCK pHint1;
91 /** Allocation hint no 2 (last alloc). */
92 PRTHEAPPAGEBLOCK pHint2;
93 /** Critical section protecting the heap. */
94 RTCRITSECT CritSect;
95 /** Set if the memory must allocated with execute access. */
96 bool fExec;
97} RTHEAPPAGE;
98#define RTHEAPPAGE_MAGIC UINT32_C(0xfeedface)
99/** Pointer to a page heap. */
100typedef RTHEAPPAGE *PRTHEAPPAGE;
101
102
103/**
104 * Describes a page heap block.
105 */
106typedef struct RTHEAPPAGEBLOCK
107{
108 /** The AVL tree node core (void pointer range). */
109 AVLRPVNODECORE Core;
110 /** Allocation bitmap. Set bits marks allocated pages. */
111 uint32_t bmAlloc[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
112 /** Allocation boundrary bitmap. Set bits marks the start of
113 * allocations. */
114 uint32_t bmFirst[RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT / 32];
115 /** The number of free pages. */
116 uint32_t cFreePages;
117 /** Pointer back to the heap. */
118 PRTHEAPPAGE pHeap;
119} RTHEAPPAGEBLOCK;
120
121
122/**
123 * Argument package for rtHeapPageAllocCallback.
124 */
125typedef struct RTHEAPPAGEALLOCARGS
126{
127 /** The number of pages to allocate. */
128 size_t cPages;
129 /** Non-null on success. */
130 void *pvAlloc;
131 /** Whether the pages should be zeroed or not. */
132 bool fZero;
133} RTHEAPPAGEALLOCARGS;
134
135
136/*******************************************************************************
137* Global Variables *
138*******************************************************************************/
139/** Initialize once structure. */
140static RTONCE g_MemPagePosixInitOnce = RTONCE_INITIALIZER;
141/** The page heap. */
142static RTHEAPPAGE g_MemPagePosixHeap;
143/** The exec page heap. */
144static RTHEAPPAGE g_MemExecPosixHeap;
145
146
147#ifdef RT_OS_OS2
148/*
149 * A quick mmap/munmap mockup for avoid duplicating lots of good code.
150 */
151# define INCL_BASE
152# include <os2.h>
153# define MAP_PRIVATE 0
154# define MAP_ANONYMOUS 0
155# define MAP_FAILED (void *)-1
156
157static void *mmap(void *pvWhere, size_t cb, int fProt, int fFlags, int fd, off_t off)
158{
159 NOREF(pvWhere); NOREF(fd); NOREF(off);
160 void *pv = NULL;
161 ULONG fAlloc = OBJ_ANY | PAG_COMMIT;
162 if (fProt & PROT_EXEC)
163 fAlloc |= PAG_EXECUTE;
164 if (fProt & PROT_READ)
165 fAlloc |= PAG_READ;
166 if (fProt & PROT_WRITE)
167 fAlloc |= PAG_WRITE;
168 APIRET rc = DosAllocMem(&pv, cb, fAlloc);
169 if (rc == NO_ERROR)
170 return pv;
171 errno = ENOMEM;
172 return MAP_FAILED;
173}
174
175static int munmap(void *pv, size_t cb)
176{
177 APIRET rc = DosFreeMem(pv);
178 if (rc == NO_ERROR)
179 return 0;
180 errno = EINVAL;
181 return -1;
182}
183
184#endif
185
186/**
187 * Initializes the heap.
188 *
189 * @returns IPRT status code.
190 * @param pHeap The page heap to initialize.
191 * @param fExec Whether the heap memory should be marked as
192 * executable or not.
193 */
194int RTHeapPageInit(PRTHEAPPAGE pHeap, bool fExec)
195{
196 int rc = RTCritSectInitEx(&pHeap->CritSect,
197 RTCRITSECT_FLAGS_NO_LOCK_VAL | RTCRITSECT_FLAGS_NO_NESTING | RTCRITSECT_FLAGS_BOOTSTRAP_HACK,
198 NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, NULL);
199 if (RT_SUCCESS(rc))
200 {
201 pHeap->cHeapPages = 0;
202 pHeap->cFreePages = 0;
203 pHeap->cAllocCalls = 0;
204 pHeap->cFreeCalls = 0;
205 pHeap->uLastMinimizeCall = 0;
206 pHeap->BlockTree = NULL;
207 pHeap->fExec = fExec;
208 pHeap->u32Magic = RTHEAPPAGE_MAGIC;
209 }
210 return rc;
211}
212
213
214/**
215 * Deletes the heap and all the memory it tracks.
216 *
217 * @returns IPRT status code.
218 * @param pHeap The page heap to delete.
219 */
220int RTHeapPageDelete(PRTHEAPPAGE pHeap)
221{
222 NOREF(pHeap);
223 return VERR_NOT_IMPLEMENTED;
224}
225
226
227/**
228 * Avoids some gotos in rtHeapPageAllocFromBlock.
229 *
230 * @returns VINF_SUCCESS.
231 * @param pBlock The block.
232 * @param iPage The page to start allocating at.
233 * @param cPages The number of pages.
234 * @param fZero Whether to clear them.
235 * @param ppv Where to return the allocation address.
236 */
237DECLINLINE(int) rtHeapPageAllocFromBlockSuccess(PRTHEAPPAGEBLOCK pBlock, uint32_t iPage, size_t cPages, bool fZero, void **ppv)
238{
239 PRTHEAPPAGE pHeap = pBlock->pHeap;
240
241 ASMBitSet(&pBlock->bmFirst[0], iPage);
242 pBlock->cFreePages -= cPages;
243 pHeap->cFreePages -= cPages;
244 if (!pHeap->pHint2 || pHeap->pHint2->cFreePages < pBlock->cFreePages)
245 pHeap->pHint2 = pBlock;
246 pHeap->cAllocCalls++;
247
248 void *pv = (uint8_t *)pBlock->Core.Key + (iPage << PAGE_SHIFT);
249 *ppv = pv;
250 if (fZero)
251 RT_BZERO(pv, cPages << PAGE_SHIFT);
252
253 return VINF_SUCCESS;
254}
255
256
257/**
258 * Checks if a page range is free in the specified block.
259 *
260 * @returns @c true if the range is free, @c false if not.
261 * @param pBlock The block.
262 * @param iFirst The first page to check.
263 * @param cPages The number of pages to check.
264 */
265DECLINLINE(bool) rtHeapPageIsPageRangeFree(PRTHEAPPAGEBLOCK pBlock, uint32_t iFirst, uint32_t cPages)
266{
267 uint32_t i = iFirst + cPages;
268 while (i-- > iFirst)
269 {
270 if (ASMBitTest(&pBlock->bmAlloc[0], i))
271 return false;
272 Assert(!ASMBitTest(&pBlock->bmFirst[0], i));
273 }
274 return true;
275}
276
277
278/**
279 * Tries to allocate a chunk of pages from a heap block.
280 *
281 * @retval VINF_SUCCESS on success.
282 * @retval VERR_NO_MEMORY if the allocation failed.
283 * @param pBlock The block to allocate from.
284 * @param cPages The size of the allocation.
285 * @param fZero Whether it should be zeroed or not.
286 * @param ppv Where to return the allocation address on success.
287 */
288DECLINLINE(int) rtHeapPageAllocFromBlock(PRTHEAPPAGEBLOCK pBlock, size_t cPages, bool fZero, void **ppv)
289{
290 if (pBlock->cFreePages >= cPages)
291 {
292 int iPage = ASMBitFirstClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT);
293 Assert(iPage >= 0);
294
295 /* special case: single page. */
296 if (cPages == 1)
297 {
298 ASMBitSet(&pBlock->bmAlloc[0], iPage);
299 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fZero, ppv);
300 }
301
302 while ( iPage >= 0
303 && (unsigned)iPage <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - cPages)
304 {
305 if (rtHeapPageIsPageRangeFree(pBlock, iPage + 1, cPages - 1))
306 {
307 ASMBitSetRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
308 return rtHeapPageAllocFromBlockSuccess(pBlock, iPage, cPages, fZero, ppv);
309 }
310
311 /* next */
312 iPage = ASMBitNextSet(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
313 if (iPage < 0 || iPage >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT - 1)
314 break;
315 iPage = ASMBitNextClear(&pBlock->bmAlloc[0], RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT, iPage);
316 }
317 }
318
319 return VERR_NO_MEMORY;
320}
321
322
323/**
324 * RTAvlrPVDoWithAll callback.
325 *
326 * @returns 0 to continue the enum, non-zero to quit it.
327 * @param pNode The node.
328 * @param pvUser The user argument.
329 */
330static DECLCALLBACK(int) rtHeapPageAllocCallback(PAVLRPVNODECORE pNode, void *pvUser)
331{
332 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
333 RTHEAPPAGEALLOCARGS *pArgs = (RTHEAPPAGEALLOCARGS *)pvUser;
334 int rc = rtHeapPageAllocFromBlock(pBlock, pArgs->cPages, pArgs->fZero, &pArgs->pvAlloc);
335 return RT_SUCCESS(rc) ? 1 : 0;
336}
337
338
339/**
340 * Worker for RTHeapPageAlloc.
341 *
342 * @returns IPRT status code
343 * @param pHeap The heap - locked.
344 * @param cPages The page count.
345 * @param pszTag The tag.
346 * @param fZero Whether to zero the memory.
347 * @param ppv Where to return the address of the allocation
348 * on success.
349 */
350static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, bool fZero, void **ppv)
351{
352 int rc;
353 NOREF(pszTag);
354
355 /*
356 * Use the hints first.
357 */
358 if (pHeap->pHint1)
359 {
360 rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fZero, ppv);
361 if (rc != VERR_NO_MEMORY)
362 return rc;
363 }
364 if (pHeap->pHint2)
365 {
366 rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fZero, ppv);
367 if (rc != VERR_NO_MEMORY)
368 return rc;
369 }
370
371 /*
372 * Search the heap for a block with enough free space.
373 *
374 * N.B. This search algorithm is not optimal at all. What (hopefully) saves
375 * it are the two hints above.
376 */
377 if (pHeap->cFreePages >= cPages)
378 {
379 RTHEAPPAGEALLOCARGS Args;
380 Args.cPages = cPages;
381 Args.pvAlloc = NULL;
382 Args.fZero = fZero;
383 RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args);
384 if (Args.pvAlloc)
385 {
386 *ppv = Args.pvAlloc;
387 return VINF_SUCCESS;
388 }
389 }
390
391 /*
392 * Didn't find anytyhing, so expand the heap with a new block.
393 */
394 RTCritSectLeave(&pHeap->CritSect);
395 void *pvPages;
396 pvPages = mmap(NULL, RTMEMPAGEPOSIX_BLOCK_SIZE,
397 PROT_READ | PROT_WRITE | (pHeap->fExec ? PROT_EXEC : 0),
398 MAP_PRIVATE | MAP_ANONYMOUS,
399 -1, 0);
400 if (pvPages == MAP_FAILED)
401 {
402 RTCritSectEnter(&pHeap->CritSect);
403 return RTErrConvertFromErrno(errno);
404
405 }
406 /** @todo Eliminate this rtMemBaseAlloc dependency! */
407 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)rtMemBaseAlloc(sizeof(*pBlock));
408 if (!pBlock)
409 {
410 munmap(pvPages, RTMEMPAGEPOSIX_BLOCK_SIZE);
411 RTCritSectEnter(&pHeap->CritSect);
412 return VERR_NO_MEMORY;
413 }
414
415 RT_ZERO(*pBlock);
416 pBlock->Core.Key = pvPages;
417 pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGEPOSIX_BLOCK_SIZE - 1;
418 pBlock->cFreePages = RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
419 pBlock->pHeap = pHeap;
420
421 RTCritSectEnter(&pHeap->CritSect);
422
423 bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc);
424 pHeap->cFreePages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
425 pHeap->cHeapPages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
426
427 /*
428 * Grab memory from the new block (cannot fail).
429 */
430 rc = rtHeapPageAllocFromBlock(pBlock, cPages, fZero, ppv);
431 Assert(rc == VINF_SUCCESS);
432
433 return rc;
434}
435
436
437/**
438 * Allocates one or more pages off the heap.
439 *
440 * @returns IPRT status code.
441 * @param pHeap The page heap.
442 * @param cPages The number of pages to allocate.
443 * @param pszTag The allocation tag.
444 * @param fZero Set if the pages should be zeroed or not.
445 * @param ppv Where to return the pointer to the pages.
446 */
447int RTHeapPageAlloc(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, bool fZero, void **ppv)
448{
449 /*
450 * Validate input.
451 */
452 AssertPtr(ppv);
453 *ppv = NULL;
454 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
455 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
456 AssertMsgReturn(cPages < RTMEMPAGEPOSIX_BLOCK_SIZE, ("%#zx\n", cPages), VERR_OUT_OF_RANGE);
457
458 /*
459 * Grab the lock and call a worker with many returns.
460 */
461 int rc = RTCritSectEnter(&pHeap->CritSect);
462 if (RT_SUCCESS(rc))
463 {
464 rc = rtHeapPageAllocLocked(pHeap, cPages, pszTag, fZero, ppv);
465 RTCritSectLeave(&pHeap->CritSect);
466 }
467
468 return rc;
469}
470
471
472/**
473 * RTAvlrPVDoWithAll callback.
474 *
475 * @returns 0 to continue the enum, non-zero to quit it.
476 * @param pNode The node.
477 * @param pvUser Pointer to a block pointer variable. For returning
478 * the address of the block to be freed.
479 */
480static DECLCALLBACK(int) rtHeapPageFindUnusedBlockCallback(PAVLRPVNODECORE pNode, void *pvUser)
481{
482 PRTHEAPPAGEBLOCK pBlock = RT_FROM_MEMBER(pNode, RTHEAPPAGEBLOCK, Core);
483 if (pBlock->cFreePages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT)
484 {
485 *(PRTHEAPPAGEBLOCK *)pvUser = pBlock;
486 return 1;
487 }
488 return 0;
489}
490
491
492/**
493 * Allocates one or more pages off the heap.
494 *
495 * @returns IPRT status code.
496 * @param pHeap The page heap.
497 * @param pv Pointer to what RTHeapPageAlloc returned.
498 * @param cPages The number of pages that was allocated.
499 */
500int RTHeapPageFree(PRTHEAPPAGE pHeap, void *pv, size_t cPages)
501{
502 /*
503 * Validate input.
504 */
505 if (!pv)
506 return VINF_SUCCESS;
507 AssertPtrReturn(pHeap, VERR_INVALID_HANDLE);
508 AssertReturn(pHeap->u32Magic == RTHEAPPAGE_MAGIC, VERR_INVALID_HANDLE);
509
510 /*
511 * Grab the lock and look up the page.
512 */
513 int rc = RTCritSectEnter(&pHeap->CritSect);
514 if (RT_SUCCESS(rc))
515 {
516 PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)RTAvlrPVRangeGet(&pHeap->BlockTree, pv);
517 if (pBlock)
518 {
519 /*
520 * Validate the specified address range.
521 */
522 uint32_t const iPage = (uint32_t)(((uintptr_t)pv - (uintptr_t)pBlock->Core.Key) >> PAGE_SHIFT);
523 /* Check the range is within the block. */
524 bool fOk = iPage + cPages <= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
525 /* Check that it's the start of an allocation. */
526 fOk = fOk && ASMBitTest(&pBlock->bmFirst[0], iPage);
527 /* Check that the range ends at an allocation boundrary. */
528 fOk = fOk && ( iPage + cPages == RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
529 || ASMBitTest(&pBlock->bmFirst[0], iPage + cPages)
530 || !ASMBitTest(&pBlock->bmAlloc[0], iPage + cPages));
531 /* Check the other pages. */
532 uint32_t const iLastPage = iPage + cPages - 1;
533 for (uint32_t i = iPage + 1; i < iLastPage && fOk; i++)
534 fOk = ASMBitTest(&pBlock->bmAlloc[0], i)
535 && !ASMBitTest(&pBlock->bmFirst[0], i);
536 if (fOk)
537 {
538 /*
539 * Free the memory.
540 */
541 ASMBitClearRange(&pBlock->bmAlloc[0], iPage, iPage + cPages);
542 ASMBitClear(&pBlock->bmFirst[0], iPage);
543 pBlock->cFreePages += cPages;
544 pHeap->cFreePages += cPages;
545 pHeap->cFreeCalls++;
546 if (!pHeap->pHint1 || pHeap->pHint1->cFreePages < pBlock->cFreePages)
547 pHeap->pHint1 = pBlock;
548
549 /*
550 * Shrink the heap. Not very efficient because of the AVL tree.
551 */
552 if ( pHeap->cFreePages >= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT * 3
553 && pHeap->cFreePages >= pHeap->cHeapPages / 2 /* 50% free */
554 && pHeap->cFreeCalls - pHeap->uLastMinimizeCall > RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT
555 )
556 {
557 uint32_t cFreePageTarget = pHeap->cHeapPages / 4; /* 25% free */
558 while (pHeap->cFreePages > cFreePageTarget)
559 {
560 pHeap->uLastMinimizeCall = pHeap->cFreeCalls;
561
562 pBlock = NULL;
563 RTAvlrPVDoWithAll(&pHeap->BlockTree, false /*fFromLeft*/,
564 rtHeapPageFindUnusedBlockCallback, &pBlock);
565 if (!pBlock)
566 break;
567
568 void *pv2 = RTAvlrPVRemove(&pHeap->BlockTree, pBlock->Core.Key); Assert(pv2); NOREF(pv2);
569 pHeap->cHeapPages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
570 pHeap->cFreePages -= RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT;
571 pHeap->pHint1 = NULL;
572 pHeap->pHint2 = NULL;
573 RTCritSectLeave(&pHeap->CritSect);
574
575 munmap(pBlock->Core.Key, RTMEMPAGEPOSIX_BLOCK_SIZE);
576 pBlock->Core.Key = pBlock->Core.KeyLast = NULL;
577 pBlock->cFreePages = 0;
578 rtMemBaseFree(pBlock);
579
580 RTCritSectEnter(&pHeap->CritSect);
581 }
582 }
583 }
584 else
585 rc = VERR_INVALID_POINTER;
586 }
587 else
588 rc = VERR_INVALID_POINTER;
589
590 RTCritSectLeave(&pHeap->CritSect);
591 }
592
593 return rc;
594}
595
596
597/**
598 * Initializes the heap.
599 *
600 * @returns IPRT status code
601 * @param pvUser1 Unused.
602 * @param pvUser2 Unused.
603 */
604static DECLCALLBACK(int) rtMemPagePosixInitOnce(void *pvUser1, void *pvUser2)
605{
606 NOREF(pvUser1); NOREF(pvUser2);
607 int rc = RTHeapPageInit(&g_MemPagePosixHeap, false /*fExec*/);
608 if (RT_SUCCESS(rc))
609 {
610 rc = RTHeapPageInit(&g_MemExecPosixHeap, true /*fExec*/);
611 if (RT_SUCCESS(rc))
612 return rc;
613 RTHeapPageDelete(&g_MemPagePosixHeap);
614 }
615 return rc;
616}
617
618
619/**
620 * Allocates memory from the specified heap.
621 *
622 * @returns Address of the allocated memory.
623 * @param cb The number of bytes to allocate.
624 * @param pszTag The tag.
625 * @param fZero Whether to zero the memory or not.
626 * @param pHeap The heap to use.
627 */
628static void *rtMemPagePosixAlloc(size_t cb, const char *pszTag, bool fZero, PRTHEAPPAGE pHeap)
629{
630 /*
631 * Validate & adjust the input.
632 */
633 Assert(cb > 0);
634 NOREF(pszTag);
635 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
636
637 /*
638 * If the allocation is relatively large, we use mmap/munmap directly.
639 */
640 void *pv;
641 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
642 {
643
644 pv = mmap(NULL, cb,
645 PROT_READ | PROT_WRITE | (pHeap == &g_MemExecPosixHeap ? PROT_EXEC : 0),
646 MAP_PRIVATE | MAP_ANONYMOUS,
647 -1, 0);
648 if (pv != MAP_FAILED)
649 {
650 AssertPtr(pv);
651 if (fZero)
652 RT_BZERO(pv, cb);
653 }
654 else
655 pv = NULL;
656 }
657 else
658 {
659 int rc = RTOnce(&g_MemPagePosixInitOnce, rtMemPagePosixInitOnce, NULL, NULL);
660 if (RT_SUCCESS(rc))
661 rc = RTHeapPageAlloc(pHeap, cb >> PAGE_SHIFT, pszTag, fZero, &pv);
662 if (RT_FAILURE(rc))
663 pv = NULL;
664 }
665
666 return pv;
667}
668
669
670/**
671 * Free memory allocated by rtMemPagePosixAlloc.
672 *
673 * @param pv The address of the memory to free.
674 * @param cb The size.
675 * @param pHeap The heap.
676 */
677static void rtMemPagePosixFree(void *pv, size_t cb, PRTHEAPPAGE pHeap)
678{
679 /*
680 * Validate & adjust the input.
681 */
682 if (!pv)
683 return;
684 AssertPtr(pv);
685 Assert(cb > 0);
686 Assert(!((uintptr_t)pv & PAGE_OFFSET_MASK));
687 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
688
689 /*
690 * If the allocation is relatively large, we use mmap/munmap directly.
691 */
692 if (cb >= RTMEMPAGEPOSIX_MMAP_THRESHOLD)
693 {
694 int rc = munmap(pv, cb);
695 AssertMsg(rc == 0, ("rc=%d pv=%p cb=%#zx\n", rc, pv, cb)); NOREF(rc);
696 }
697 else
698 {
699 int rc = RTHeapPageFree(pHeap, pv, cb >> PAGE_SHIFT);
700 AssertRC(rc);
701 }
702}
703
704
705
706
707
708RTDECL(void *) RTMemPageAllocTag(size_t cb, const char *pszTag) RT_NO_THROW
709{
710 return rtMemPagePosixAlloc(cb, pszTag, false /*fZero*/, &g_MemPagePosixHeap);
711}
712
713
714RTDECL(void *) RTMemPageAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW
715{
716 return rtMemPagePosixAlloc(cb, pszTag, true /*fZero*/, &g_MemPagePosixHeap);
717}
718
719
720RTDECL(void) RTMemPageFree(void *pv, size_t cb) RT_NO_THROW
721{
722 return rtMemPagePosixFree(pv, cb, &g_MemPagePosixHeap);
723}
724
725
726
727
728
729RTDECL(void *) RTMemExecAllocTag(size_t cb, const char *pszTag) RT_NO_THROW
730{
731 return rtMemPagePosixAlloc(cb, pszTag, false /*fZero*/, &g_MemExecPosixHeap);
732}
733
734
735RTDECL(void) RTMemExecFree(void *pv, size_t cb) RT_NO_THROW
736{
737 return rtMemPagePosixFree(pv, cb, &g_MemExecPosixHeap);
738}
739
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette