VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/alloc/heapoffset.cpp@ 25059

Last change on this file since 25059 was 25059, checked in by vboxsync, 15 years ago

RTHeapOffset: Initial conversion of RTHeapSimple.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 33.1 KB
Line 
1/* $Id: heapoffset.cpp 25059 2009-11-27 18:17:44Z vboxsync $ */
2/** @file
3 * IPRT - An Offset Based Heap.
4 */
5
6/*
7 * Copyright (C) 2006-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#define LOG_GROUP RTLOGGROUP_DEFAULT
36#include <iprt/heap.h>
37#include "internal/iprt.h"
38
39#include <iprt/assert.h>
40#include <iprt/asm.h>
41#include <iprt/string.h>
42#include <iprt/err.h>
43#include <iprt/log.h>
44#include <iprt/param.h>
45
46#include "internal/magics.h"
47
48
49/*******************************************************************************
50* Structures and Typedefs *
51*******************************************************************************/
52/** Pointer to the heap anchor block. */
53typedef struct RTHEAPOFFSETINTERNAL *PRTHEAPOFFSETINTERNAL;
54/** Pointer to a heap block. */
55typedef struct RTHEAPOFFSETBLOCK *PRTHEAPOFFSETBLOCK;
56/** Pointer to a free heap block. */
57typedef struct RTHEAPOFFSETFREE *PRTHEAPOFFSETFREE;
58
59/**
60 * Structure describing a block in an offset based heap.
61 *
62 * If this block is allocated, it is followed by the user data.
63 * If this block is free, see RTHEAPOFFSETFREE.
64 */
65typedef struct RTHEAPOFFSETBLOCK
66{
67 /** The next block in the global block list. */
68 uint32_t /*PRTHEAPOFFSETBLOCK*/ offNext;
69 /** The previous block in the global block list. */
70 uint32_t /*PRTHEAPOFFSETBLOCK*/ offPrev;
71 /** Offset into the heap of this block. Used to locate the anchor block. */
72 uint32_t /*PRTHEAPOFFSETINTERNAL*/ offSelf;
73 /** Flags + magic. */
74 uint32_t fFlags;
75} RTHEAPOFFSETBLOCK;
76AssertCompileSizeAlignment(RTHEAPOFFSETBLOCK, 16);
77
78/** The block is free if this flag is set. When cleared it's allocated. */
79#define RTHEAPOFFSETBLOCK_FLAGS_FREE (RT_BIT_32(0))
80/** The magic value. */
81#define RTHEAPOFFSETBLOCK_FLAGS_MAGIC (UINT32_C(0xabcdef00))
82/** The mask that needs to be applied to RTHEAPOFFSETBLOCK::fFlags to obtain the magic value. */
83#define RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK (~RT_BIT_32(0))
84
85/**
86 * Checks if the specified block is valid or not.
87 * @returns boolean answer.
88 * @param pBlock Pointer to a RTHEAPOFFSETBLOCK structure.
89 */
90#define RTHEAPOFFSETBLOCK_IS_VALID(pBlock) \
91 ( ((pBlock)->fFlags & RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK) == RTHEAPOFFSETBLOCK_FLAGS_MAGIC )
92
93/**
94 * Checks if the specified block is valid and in use.
95 * @returns boolean answer.
96 * @param pBlock Pointer to a RTHEAPOFFSETBLOCK structure.
97 */
98#define RTHEAPOFFSETBLOCK_IS_VALID_USED(pBlock) \
99 ( ((pBlock)->fFlags & (RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK | RTHEAPOFFSETBLOCK_FLAGS_FREE)) \
100 == RTHEAPOFFSETBLOCK_FLAGS_MAGIC )
101
102/**
103 * Checks if the specified block is valid and free.
104 * @returns boolean answer.
105 * @param pBlock Pointer to a RTHEAPOFFSETBLOCK structure.
106 */
107#define RTHEAPOFFSETBLOCK_IS_VALID_FREE(pBlock) \
108 ( ((pBlock)->fFlags & (RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK | RTHEAPOFFSETBLOCK_FLAGS_FREE)) \
109 == (RTHEAPOFFSETBLOCK_FLAGS_MAGIC | RTHEAPOFFSETBLOCK_FLAGS_FREE) )
110
111/**
112 * Checks if the specified block is free or not.
113 * @returns boolean answer.
114 * @param pBlock Pointer to a valid RTHEAPOFFSETBLOCK structure.
115 */
116#define RTHEAPOFFSETBLOCK_IS_FREE(pBlock) (!!((pBlock)->fFlags & RTHEAPOFFSETBLOCK_FLAGS_FREE))
117
118/**
119 * A free heap block.
120 * This is an extended version of RTHEAPOFFSETBLOCK that takes the unused
121 * user data to store free list pointers and a cached size value.
122 */
123typedef struct RTHEAPOFFSETFREE
124{
125 /** Core stuff. */
126 RTHEAPOFFSETBLOCK Core;
127 /** Pointer to the next free block. */
128 uint32_t /*PRTHEAPOFFSETFREE*/ offNext;
129 /** Pointer to the previous free block. */
130 uint32_t /*PRTHEAPOFFSETFREE*/ offPrev;
131 /** The size of the block (excluding the RTHEAPOFFSETBLOCK part). */
132 uint32_t cb;
133 /** An alignment filler to make it a multiple of 16 bytes. */
134 uint32_t Alignment;
135} RTHEAPOFFSETFREE;
136AssertCompileSizeAlignment(RTHEAPOFFSETFREE, 16);
137
138
139/**
140 * The heap anchor block.
141 * This structure is placed at the head of the memory block specified to RTHeapOffsetInit(),
142 * which means that the first RTHEAPOFFSETBLOCK appears immediately after this structure.
143 */
144typedef struct RTHEAPOFFSETINTERNAL
145{
146 /** The typical magic (RTHEAPOFFSET_MAGIC). */
147 uint32_t u32Magic;
148 /** The heap size. (This structure is included!) */
149 uint32_t cbHeap;
150 /** The amount of free memory in the heap. */
151 uint32_t cbFree;
152 /** Free head pointer. */
153 uint32_t /*PRTHEAPOFFSETFREE*/ offFreeHead;
154 /** Free tail pointer. */
155 uint32_t /*PRTHEAPOFFSETFREE*/ offFreeTail;
156 /** Make the size of this structure is a multiple of 32. */
157 uint32_t au32Alignment[3];
158} RTHEAPOFFSETINTERNAL;
159AssertCompileSizeAlignment(RTHEAPOFFSETINTERNAL, 32);
160
161
162/** The minimum allocation size. */
163#define RTHEAPOFFSET_MIN_BLOCK (sizeof(RTHEAPOFFSETBLOCK))
164AssertCompile(RTHEAPOFFSET_MIN_BLOCK >= sizeof(RTHEAPOFFSETBLOCK));
165AssertCompile(RTHEAPOFFSET_MIN_BLOCK >= sizeof(RTHEAPOFFSETFREE) - sizeof(RTHEAPOFFSETBLOCK));
166
167/** The minimum and default alignment. */
168#define RTHEAPOFFSET_ALIGNMENT (sizeof(RTHEAPOFFSETBLOCK))
169
170
171/*******************************************************************************
172* Defined Constants And Macros *
173*******************************************************************************/
174#ifdef RT_STRICT
175# define RTHEAPOFFSET_STRICT 1
176#endif
177
178/**
179 * Converts RTHEAPOFFSETBLOCK::offSelf into a heap anchor block pointer.
180 *
181 * @returns Pointer of given type.
182 * @param pBlock The block to find the heap anchor block for.
183 */
184#define RTHEAPOFF_GET_ANCHOR(pBlock) ( (PRTHEAPOFFSETINTERNAL)((uint8_t *)(pBlock) - (pBlock)->offSelf ) )
185
186
187/**
188 * Converts an offset to a pointer.
189 *
190 * All offsets are relative to the heap to make life simple.
191 *
192 * @returns Pointer of given type.
193 * @param pHeapInt Pointer to the heap anchor block.
194 * @param off The offset to convert.
195 * @param type The desired type.
196 */
197#ifdef RTHEAPOFFSET_STRICT
198# define RTHEAPOFF_TO_PTR(pHeapInt, off, type) ( (type)rtHeapOffCheckedOffToPtr(pHeapInt, off) )
199#else
200# define RTHEAPOFF_TO_PTR(pHeapInt, off, type) ( (type)((off) ? (uint8_t *)(pHeapInt) + (off) : NULL) )
201#endif
202
203/**
204 * Converts a pointer to an offset.
205 *
206 * All offsets are relative to the heap to make life simple.
207 *
208 * @returns Offset into the heap.
209 * @param pHeapInt Pointer to the heap anchor block.
210 * @param ptr The pointer to convert.
211 */
212#ifdef RTHEAPOFFSET_STRICT
213# define RTHEAPOFF_TO_OFF(pHeapInt, ptr) rtHeapOffCheckedPtrToOff(pHeapInt, ptr)
214#else
215# define RTHEAPOFF_TO_OFF(pHeapInt, ptr) ( (uint32_t)((ptr) ? (uintptr_t)(ptr) - (uintptr_t)(pHeapInt) : UINT32_C(0)) )
216#endif
217
218#define ASSERT_L(a, b) AssertMsg((a) < (b), ("a=%08x b=%08x\n", (a), (b)))
219#define ASSERT_LE(a, b) AssertMsg((a) <= (b), ("a=%08x b=%08x\n", (a), (b)))
220#define ASSERT_G(a, b) AssertMsg((a) > (b), ("a=%08x b=%08x\n", (a), (b)))
221#define ASSERT_GE(a, b) AssertMsg((a) >= (b), ("a=%08x b=%08x\n", (a), (b)))
222#define ASSERT_ALIGN(a) AssertMsg(!((uintptr_t)(a) & (RTHEAPOFFSET_ALIGNMENT - 1)), ("a=%p\n", (uintptr_t)(a)))
223
224#define ASSERT_PREV(pHeapInt, pBlock) \
225 do { ASSERT_ALIGN((pBlock)->offPrev); \
226 if ((pBlock)->offPrev) \
227 { \
228 ASSERT_L((pBlock)->offPrev, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
229 ASSERT_GE((pBlock)->offPrev, sizeof(RTHEAPOFFSETINTERNAL)); \
230 } \
231 else \
232 Assert((pBlock) == (PRTHEAPOFFSETBLOCK)((pHeapInt) + 1)); \
233 } while (0)
234
235#define ASSERT_NEXT(pHeap, pBlock) \
236 do { ASSERT_ALIGN((pBlock)->offNext); \
237 if ((pBlock)->offNext) \
238 { \
239 ASSERT_L((pBlock)->offNext, (pHeapInt)->cbHeap); \
240 ASSERT_G((pBlock)->offNext, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
241 } \
242 } while (0)
243
244#define ASSERT_BLOCK(pHeapInt, pBlock) \
245 do { AssertMsg(RTHEAPOFFSETBLOCK_IS_VALID(pBlock), ("%#x\n", (pBlock)->fFlags)); \
246 AssertMsg(RTHEAPOFF_GET_ANCHOR(pBlock) == (pHeapInt), ("%p != %p\n", RTHEAPOFF_GET_ANCHOR(pBlock), (pHeapInt))); \
247 ASSERT_GE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), sizeof(RTHEAPOFFSETINTERNAL)); \
248 ASSERT_L( RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->cbHeap); \
249 ASSERT_NEXT(pHeapInt, pBlock); \
250 ASSERT_PREV(pHeapInt, pBlock); \
251 } while (0)
252
253#define ASSERT_BLOCK_USED(pHeapInt, pBlock) \
254 do { AssertMsg(RTHEAPOFFSETBLOCK_IS_VALID_USED((pBlock)), ("%#x\n", (pBlock)->fFlags)); \
255 AssertMsg(RTHEAPOFF_GET_ANCHOR(pBlock) == (pHeapInt), ("%p != %p\n", RTHEAPOFF_GET_ANCHOR(pBlock), (pHeapInt))); \
256 ASSERT_GE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), sizeof(RTHEAPOFFSETINTERNAL)); \
257 ASSERT_L( RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->cbHeap); \
258 ASSERT_NEXT(pHeapInt, pBlock); \
259 ASSERT_PREV(pHeapInt, pBlock); \
260 } while (0)
261
262#define ASSERT_FREE_PREV(pHeapInt, pBlock) \
263 do { ASSERT_ALIGN((pBlock)->offPrev); \
264 if ((pBlock)->offPrev) \
265 { \
266 ASSERT_GE((pBlock)->offPrev, (pHeapInt)->offFreeHead); \
267 ASSERT_L((pBlock)->offPrev, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
268 ASSERT_LE((pBlock)->offPrev, (pBlock)->Core.offPrev); \
269 } \
270 else \
271 Assert((pBlock) == RTHEAPOFF_TO_PTR(pHeapInt, (pHeapInt)->offFreeHead, PRTHEAPOFFSETFREE) ); \
272 } while (0)
273
274#define ASSERT_FREE_NEXT(pHeapInt, pBlock) \
275 do { ASSERT_ALIGN((pBlock)->offNext); \
276 if ((pBlock)->offNext) \
277 { \
278 ASSERT_LE((pBlock)->offNext, (pHeapInt)->offFreeTail); \
279 ASSERT_G((pBlock)->offNext, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
280 ASSERT_GE((pBlock)->offNext, (pBlock)->Core.offNext); \
281 } \
282 else \
283 Assert((pBlock) == RTHEAPOFF_TO_PTR(pHeapInt, (pHeapInt)->offFreeTail, PRTHEAPOFFSETFREE)); \
284 } while (0)
285
286#ifdef RTHEAPOFFSET_STRICT
287# define ASSERT_FREE_CB(pHeapInt, pBlock) \
288 do { size_t cbCalc = ((pBlock)->Core.offNext ? (pBlock)->Core.offNext : (pHeapInt)->cbHeap) \
289 - RTHEAPOFF_TO_OFF((pHeapInt), (pBlock)) - sizeof(RTHEAPOFFSETBLOCK); \
290 AssertMsg((pBlock)->cb == cbCalc, ("cb=%#zx cbCalc=%#zx\n", (pBlock)->cb, cbCalc)); \
291 } while (0)
292#else
293# define ASSERT_FREE_CB(pHeapInt, pBlock) do {} while (0)
294#endif
295
296/** Asserts that a free block is valid. */
297#define ASSERT_BLOCK_FREE(pHeapInt, pBlock) \
298 do { ASSERT_BLOCK(pHeapInt, &(pBlock)->Core); \
299 Assert(RTHEAPOFFSETBLOCK_IS_VALID_FREE(&(pBlock)->Core)); \
300 ASSERT_GE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->offFreeHead); \
301 ASSERT_LE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->offFreeTail); \
302 ASSERT_FREE_NEXT(pHeapInt, pBlock); \
303 ASSERT_FREE_PREV(pHeapInt, pBlock); \
304 ASSERT_FREE_CB(pHeapInt, pBlock); \
305 } while (0)
306
307/** Asserts that the heap anchor block is ok. */
308#define ASSERT_ANCHOR(pHeapInt) \
309 do { AssertPtr(pHeapInt);\
310 Assert((pHeapInt)->u32Magic == RTHEAPOFFSET_MAGIC); \
311 } while (0)
312
313
314/*******************************************************************************
315* Internal Functions *
316*******************************************************************************/
317#ifdef RTHEAPOFFSET_STRICT
318static void rtHeapOffsetAssertAll(PRTHEAPOFFSETINTERNAL pHeapInt);
319#endif
320static PRTHEAPOFFSETBLOCK rtHeapOffsetAllocBlock(PRTHEAPOFFSETINTERNAL pHeapInt, size_t cb, size_t uAlignment);
321static void rtHeapOffsetFreeBlock(PRTHEAPOFFSETINTERNAL pHeapInt, PRTHEAPOFFSETBLOCK pBlock);
322
323#ifdef RTHEAPOFFSET_STRICT
324
325/** Checked version of RTHEAPOFF_TO_PTR. */
326DECLINLINE(void *) rtHeapOffCheckedOffToPtr(PRTHEAPOFFSETINTERNAL pHeapInt, uint32_t off)
327{
328 if (!off)
329 return NULL;
330 AssertMsg(off < pHeapInt->cbHeap, ("%#x %#x\n", off, pHeapInt->cbHeap));
331 AssertMsg(off >= sizeof(*pHeapInt), ("%#x %#x\n", off, sizeof(*pHeapInt)));
332 return (uint8_t *)pHeapInt + off;
333}
334
335/** Checked version of RTHEAPOFF_TO_OFF. */
336DECLINLINE(uint32_t) rtHeapOffCheckedPtrToOff(PRTHEAPOFFSETINTERNAL pHeapInt, void *pv)
337{
338 if (!pv)
339 return 0;
340 uintptr_t off = (uintptr_t)pv - (uintptr_t)pHeapInt;
341 AssertMsg(off < pHeapInt->cbHeap, ("%#x %#x\n", off, pHeapInt->cbHeap));
342 AssertMsg(off >= sizeof(*pHeapInt), ("%#x %#x\n", off, sizeof(*pHeapInt)));
343 return (uint32_t)off;
344}
345
346#endif /* RTHEAPOFFSET_STRICT */
347
348
349
350RTDECL(int) RTHeapOffsetInit(PRTHEAPOFFSET phHeap, void *pvMemory, size_t cbMemory)
351{
352 PRTHEAPOFFSETINTERNAL pHeapInt;
353 PRTHEAPOFFSETFREE pFree;
354 unsigned i;
355
356 /*
357 * Validate input. The imposed minimum heap size is just a convenient value.
358 */
359 AssertReturn(cbMemory >= PAGE_SIZE, VERR_INVALID_PARAMETER);
360 AssertPtrReturn(pvMemory, VERR_INVALID_POINTER);
361 AssertReturn((uintptr_t)pvMemory + (cbMemory - 1) > (uintptr_t)cbMemory, VERR_INVALID_PARAMETER);
362
363 /*
364 * Place the heap anchor block at the start of the heap memory,
365 * enforce 32 byte alignment of it. Also align the heap size correctly.
366 */
367 pHeapInt = (PRTHEAPOFFSETINTERNAL)pvMemory;
368 if ((uintptr_t)pvMemory & 31)
369 {
370 const uintptr_t off = 32 - ((uintptr_t)pvMemory & 31);
371 cbMemory -= off;
372 pHeapInt = (PRTHEAPOFFSETINTERNAL)((uintptr_t)pvMemory + off);
373 }
374 cbMemory &= ~(RTHEAPOFFSET_ALIGNMENT - 1);
375
376
377 /* Init the heap anchor block. */
378 pHeapInt->u32Magic = RTHEAPOFFSET_MAGIC;
379 pHeapInt->cbHeap = cbMemory;
380 pHeapInt->cbFree = cbMemory
381 - sizeof(RTHEAPOFFSETBLOCK)
382 - sizeof(RTHEAPOFFSETINTERNAL);
383 pHeapInt->offFreeTail = pHeapInt->offFreeHead = sizeof(*pHeapInt);
384 for (i = 0; i < RT_ELEMENTS(pHeapInt->au32Alignment); i++)
385 pHeapInt->au32Alignment[i] = UINT32_MAX;
386
387 /* Init the single free block. */
388 pFree = RTHEAPOFF_TO_PTR(pHeapInt, pHeapInt->offFreeHead, PRTHEAPOFFSETFREE);
389 pFree->Core.offNext = 0;
390 pFree->Core.offPrev = 0;
391 pFree->Core.offSelf = pHeapInt->offFreeHead;
392 pFree->Core.fFlags = RTHEAPOFFSETBLOCK_FLAGS_MAGIC | RTHEAPOFFSETBLOCK_FLAGS_FREE;
393 pFree->offNext = 0;
394 pFree->offPrev = 0;
395 pFree->cb = pHeapInt->cbFree;
396
397 *phHeap = pHeapInt;
398
399#ifdef RTHEAPOFFSET_STRICT
400 rtHeapOffsetAssertAll(pHeapInt);
401#endif
402 return VINF_SUCCESS;
403}
404RT_EXPORT_SYMBOL(RTHeapOffsetInit);
405
406
407RTDECL(void *) RTHeapOffsetAlloc(RTHEAPOFFSET hHeap, size_t cb, size_t cbAlignment)
408{
409 PRTHEAPOFFSETINTERNAL pHeapInt = hHeap;
410 PRTHEAPOFFSETBLOCK pBlock;
411
412 /*
413 * Validate and adjust the input.
414 */
415 AssertPtrReturn(pHeapInt, NULL);
416 if (cb < RTHEAPOFFSET_MIN_BLOCK)
417 cb = RTHEAPOFFSET_MIN_BLOCK;
418 else
419 cb = RT_ALIGN_Z(cb, RTHEAPOFFSET_ALIGNMENT);
420 if (!cbAlignment)
421 cbAlignment = RTHEAPOFFSET_ALIGNMENT;
422 else
423 {
424 Assert(!(cbAlignment & (cbAlignment - 1)));
425 Assert((cbAlignment & ~(cbAlignment - 1)) == cbAlignment);
426 if (cbAlignment < RTHEAPOFFSET_ALIGNMENT)
427 cbAlignment = RTHEAPOFFSET_ALIGNMENT;
428 }
429
430 /*
431 * Do the allocation.
432 */
433 pBlock = rtHeapOffsetAllocBlock(pHeapInt, cb, cbAlignment);
434 if (RT_LIKELY(pBlock))
435 {
436 void *pv = pBlock + 1;
437 return pv;
438 }
439 return NULL;
440}
441RT_EXPORT_SYMBOL(RTHeapOffsetAlloc);
442
443
444RTDECL(void *) RTHeapOffsetAllocZ(RTHEAPOFFSET hHeap, size_t cb, size_t cbAlignment)
445{
446 PRTHEAPOFFSETINTERNAL pHeapInt = hHeap;
447 PRTHEAPOFFSETBLOCK pBlock;
448
449 /*
450 * Validate and adjust the input.
451 */
452 AssertPtrReturn(pHeapInt, NULL);
453 if (cb < RTHEAPOFFSET_MIN_BLOCK)
454 cb = RTHEAPOFFSET_MIN_BLOCK;
455 else
456 cb = RT_ALIGN_Z(cb, RTHEAPOFFSET_ALIGNMENT);
457 if (!cbAlignment)
458 cbAlignment = RTHEAPOFFSET_ALIGNMENT;
459 else
460 {
461 Assert(!(cbAlignment & (cbAlignment - 1)));
462 Assert((cbAlignment & ~(cbAlignment - 1)) == cbAlignment);
463 if (cbAlignment < RTHEAPOFFSET_ALIGNMENT)
464 cbAlignment = RTHEAPOFFSET_ALIGNMENT;
465 }
466
467 /*
468 * Do the allocation.
469 */
470 pBlock = rtHeapOffsetAllocBlock(pHeapInt, cb, cbAlignment);
471 if (RT_LIKELY(pBlock))
472 {
473 void *pv = pBlock + 1;
474 memset(pv, 0, cb);
475 return pv;
476 }
477 return NULL;
478}
479RT_EXPORT_SYMBOL(RTHeapOffsetAllocZ);
480
481
482/**
483 * Allocates a block of memory from the specified heap.
484 *
485 * No parameter validation or adjustment is performed.
486 *
487 * @returns Pointer to the allocated block.
488 * @returns NULL on failure.
489 *
490 * @param pHeapInt The heap.
491 * @param cb Size of the memory block to allocate.
492 * @param uAlignment The alignment specifications for the allocated block.
493 */
494static PRTHEAPOFFSETBLOCK rtHeapOffsetAllocBlock(PRTHEAPOFFSETINTERNAL pHeapInt, size_t cb, size_t uAlignment)
495{
496 PRTHEAPOFFSETBLOCK pRet = NULL;
497 PRTHEAPOFFSETFREE pFree;
498
499 AssertReturn((pHeapInt)->u32Magic == RTHEAPOFFSET_MAGIC, NULL);
500#ifdef RTHEAPOFFSET_STRICT
501 rtHeapOffsetAssertAll(pHeapInt);
502#endif
503
504 /*
505 * Search for a fitting block from the lower end of the heap.
506 */
507 for (pFree = RTHEAPOFF_TO_PTR(pHeapInt, pHeapInt->offFreeHead, PRTHEAPOFFSETFREE);
508 pFree;
509 pFree = RTHEAPOFF_TO_PTR(pHeapInt, pFree->offNext, PRTHEAPOFFSETFREE))
510 {
511 uintptr_t offAlign;
512 ASSERT_BLOCK_FREE(pHeapInt, pFree);
513
514 /*
515 * Match for size and alignment.
516 */
517 if (pFree->cb < cb)
518 continue;
519 offAlign = (uintptr_t)(&pFree->Core + 1) & (uAlignment - 1);
520 if (offAlign)
521 {
522 RTHEAPOFFSETFREE Free;
523 PRTHEAPOFFSETBLOCK pPrev;
524
525 offAlign = uAlignment - offAlign;
526 if (pFree->cb - offAlign < cb)
527 continue;
528
529 /*
530 * Make a stack copy of the free block header and adjust the pointer.
531 */
532 Free = *pFree;
533 pFree = (PRTHEAPOFFSETFREE)((uintptr_t)pFree + offAlign);
534
535 /*
536 * Donate offAlign bytes to the node in front of us.
537 * If we're the head node, we'll have to create a fake node. We'll
538 * mark it USED for simplicity.
539 *
540 * (Should this policy of donating memory to the guy in front of us
541 * cause big 'leaks', we could create a new free node if there is room
542 * for that.)
543 */
544 pPrev = RTHEAPOFF_TO_PTR(pHeapInt, Free.Core.offPrev, PRTHEAPOFFSETBLOCK);
545 if (pPrev)
546 {
547 AssertMsg(!RTHEAPOFFSETBLOCK_IS_FREE(pPrev), ("Impossible!\n"));
548 pPrev->offNext = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
549 }
550 else
551 {
552 pPrev = (PRTHEAPOFFSETBLOCK)(pHeapInt + 1);
553 Assert(pPrev == &pFree->Core);
554 pPrev->offPrev = 0;
555 pPrev->offNext = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
556 pPrev->offSelf = RTHEAPOFF_TO_OFF(pHeapInt, pPrev);
557 pPrev->fFlags = RTHEAPOFFSETBLOCK_FLAGS_MAGIC;
558 }
559 pHeapInt->cbFree -= offAlign;
560
561 /*
562 * Recreate pFree in the new position and adjust the neighbors.
563 */
564 *pFree = Free;
565 pFree->Core.offSelf = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
566
567 /* the core */
568 if (pFree->Core.offNext)
569 RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = pFree->Core.offSelf;
570 pFree->Core.offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pPrev);
571
572 /* the free part */
573 pFree->cb -= offAlign;
574 if (pFree->offNext)
575 RTHEAPOFF_TO_PTR(pHeapInt, pFree->offNext, PRTHEAPOFFSETFREE)->offPrev = pFree->Core.offSelf;
576 else
577 pHeapInt->offFreeTail = pFree->Core.offSelf;
578 if (pFree->offPrev)
579 RTHEAPOFF_TO_PTR(pHeapInt, pFree->offPrev, PRTHEAPOFFSETFREE)->offNext = pFree->Core.offSelf;
580 else
581 pHeapInt->offFreeHead = pFree->Core.offSelf;
582 ASSERT_BLOCK_FREE(pHeapInt, pFree);
583 ASSERT_BLOCK_USED(pHeapInt, pPrev);
584 }
585
586 /*
587 * Split off a new FREE block?
588 */
589 if (pFree->cb >= cb + RT_ALIGN_Z(sizeof(RTHEAPOFFSETFREE), RTHEAPOFFSET_ALIGNMENT))
590 {
591 /*
592 * Move the FREE block up to make room for the new USED block.
593 */
594 PRTHEAPOFFSETFREE pNew = (PRTHEAPOFFSETFREE)((uintptr_t)&pFree->Core + cb + sizeof(RTHEAPOFFSETBLOCK));
595
596 pNew->Core.offSelf = RTHEAPOFF_TO_OFF(pHeapInt, pNew);
597 pNew->Core.offNext = pFree->Core.offNext;
598 if (pFree->Core.offNext)
599 RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = pNew->Core.offSelf;
600 pNew->Core.offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
601 pNew->Core.fFlags = RTHEAPOFFSETBLOCK_FLAGS_MAGIC | RTHEAPOFFSETBLOCK_FLAGS_FREE;
602
603 pNew->offNext = pFree->offNext;
604 if (pNew->offNext)
605 RTHEAPOFF_TO_PTR(pHeapInt, pNew->offNext, PRTHEAPOFFSETFREE)->offPrev = pNew->Core.offSelf;
606 else
607 pHeapInt->offFreeTail = pNew->Core.offSelf;
608 pNew->offPrev = pFree->offPrev;
609 if (pNew->offPrev)
610 RTHEAPOFF_TO_PTR(pHeapInt, pNew->offPrev, PRTHEAPOFFSETFREE)->offNext = pNew->Core.offSelf;
611 else
612 pHeapInt->offFreeHead = pNew->Core.offSelf;
613 pNew->cb = (pNew->Core.offNext ? pNew->Core.offNext : pHeapInt->cbHeap) \
614 - pNew->Core.offSelf - sizeof(RTHEAPOFFSETBLOCK);
615 ASSERT_BLOCK_FREE(pHeapInt, pNew);
616
617 /*
618 * Update the old FREE node making it a USED node.
619 */
620 pFree->Core.fFlags &= ~RTHEAPOFFSETBLOCK_FLAGS_FREE;
621 pFree->Core.offNext = pNew->Core.offSelf;
622 pHeapInt->cbFree -= pFree->cb;
623 pHeapInt->cbFree += pNew->cb;
624 pRet = &pFree->Core;
625 ASSERT_BLOCK_USED(pHeapInt, pRet);
626 }
627 else
628 {
629 /*
630 * Link it out of the free list.
631 */
632 if (pFree->offNext)
633 RTHEAPOFF_TO_PTR(pHeapInt, pFree->offNext, PRTHEAPOFFSETFREE)->offPrev = pFree->offPrev;
634 else
635 pHeapInt->offFreeTail = pFree->offPrev;
636 if (pFree->offPrev)
637 RTHEAPOFF_TO_PTR(pHeapInt, pFree->offPrev, PRTHEAPOFFSETFREE)->offNext = pFree->offNext;
638 else
639 pHeapInt->offFreeHead = pFree->offNext;
640
641 /*
642 * Convert it to a used block.
643 */
644 pHeapInt->cbFree -= pFree->cb;
645 pFree->Core.fFlags &= ~RTHEAPOFFSETBLOCK_FLAGS_FREE;
646 pRet = &pFree->Core;
647 ASSERT_BLOCK_USED(pHeapInt, pRet);
648 }
649 break;
650 }
651
652#ifdef RTHEAPOFFSET_STRICT
653 rtHeapOffsetAssertAll(pHeapInt);
654#endif
655 return pRet;
656}
657
658
659RTDECL(void) RTHeapOffsetFree(RTHEAPOFFSET hHeap, void *pv)
660{
661 PRTHEAPOFFSETINTERNAL pHeapInt;
662 PRTHEAPOFFSETBLOCK pBlock;
663
664 /*
665 * Validate input.
666 */
667 if (!pv)
668 return;
669 AssertPtr(pv);
670 Assert(RT_ALIGN_P(pv, RTHEAPOFFSET_ALIGNMENT) == pv);
671
672 /*
673 * Get the block and heap. If in strict mode, validate these.
674 */
675 pBlock = (PRTHEAPOFFSETBLOCK)pv - 1;
676 pHeapInt = RTHEAPOFF_GET_ANCHOR(pBlock);
677 ASSERT_BLOCK_USED(pHeapInt, pBlock);
678 ASSERT_ANCHOR(pHeapInt);
679 Assert(pHeapInt == (PRTHEAPOFFSETINTERNAL)hHeap || !hHeap);
680
681#ifdef RTHEAPOFFSET_FREE_POISON
682 /*
683 * Poison the block.
684 */
685 const size_t cbBlock = (pBlock->pNext ? (uintptr_t)pBlock->pNext : (uintptr_t)pHeapInt->pvEnd)
686 - (uintptr_t)pBlock - sizeof(RTHEAPOFFSETBLOCK);
687 memset(pBlock + 1, RTHEAPOFFSET_FREE_POISON, cbBlock);
688#endif
689
690 /*
691 * Call worker which does the actual job.
692 */
693 rtHeapOffsetFreeBlock(pHeapInt, pBlock);
694}
695RT_EXPORT_SYMBOL(RTHeapOffsetFree);
696
697
698/**
699 * Free a memory block.
700 *
701 * @param pHeapInt The heap.
702 * @param pBlock The memory block to free.
703 */
704static void rtHeapOffsetFreeBlock(PRTHEAPOFFSETINTERNAL pHeapInt, PRTHEAPOFFSETBLOCK pBlock)
705{
706 PRTHEAPOFFSETFREE pFree = (PRTHEAPOFFSETFREE)pBlock;
707 PRTHEAPOFFSETFREE pLeft;
708 PRTHEAPOFFSETFREE pRight;
709
710#ifdef RTHEAPOFFSET_STRICT
711 rtHeapOffsetAssertAll(pHeapInt);
712#endif
713
714 /*
715 * Look for the closest free list blocks by walking the blocks right
716 * of us (both lists are sorted by address).
717 */
718 pLeft = NULL;
719 pRight = NULL;
720 if (pHeapInt->offFreeTail)
721 {
722 pRight = RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETFREE);
723 while (pRight && !RTHEAPOFFSETBLOCK_IS_FREE(&pRight->Core))
724 {
725 ASSERT_BLOCK(pHeapInt, &pRight->Core);
726 pRight = RTHEAPOFF_TO_PTR(pHeapInt, pRight->Core.offNext, PRTHEAPOFFSETFREE);
727 }
728 if (!pRight)
729 pLeft = RTHEAPOFF_TO_PTR(pHeapInt, pHeapInt->offFreeTail, PRTHEAPOFFSETFREE);
730 else
731 {
732 ASSERT_BLOCK_FREE(pHeapInt, pRight);
733 pLeft = RTHEAPOFF_TO_PTR(pHeapInt, pRight->offPrev, PRTHEAPOFFSETFREE);
734 }
735 if (pLeft)
736 ASSERT_BLOCK_FREE(pHeapInt, pLeft);
737 }
738 AssertMsgReturnVoid(pLeft != pFree, ("Freed twice! pv=%p (pBlock=%p)\n", pBlock + 1, pBlock));
739 ASSERT_L(RTHEAPOFF_TO_OFF(pHeapInt, pLeft), RTHEAPOFF_TO_OFF(pHeapInt, pFree));
740 Assert(!pRight || (uintptr_t)pRight > (uintptr_t)pFree);
741 Assert(!pLeft || RTHEAPOFF_TO_PTR(pHeapInt, pLeft->offNext, PRTHEAPOFFSETFREE) == pRight);
742
743 /*
744 * Insert at the head of the free block list?
745 */
746 if (!pLeft)
747 {
748 Assert(pRight == RTHEAPOFF_TO_PTR(pHeapInt, pHeapInt->offFreeHead, PRTHEAPOFFSETFREE));
749 pFree->Core.fFlags |= RTHEAPOFFSETBLOCK_FLAGS_FREE;
750 pFree->offPrev = 0;
751 pFree->offNext = RTHEAPOFF_TO_OFF(pHeapInt, pRight);
752 if (pRight)
753 pRight->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
754 else
755 pHeapInt->offFreeTail = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
756 pHeapInt->offFreeHead = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
757 }
758 else
759 {
760 /*
761 * Can we merge with left hand free block?
762 */
763 if (pLeft->Core.offNext == RTHEAPOFF_TO_OFF(pHeapInt, pFree))
764 {
765 pLeft->Core.offNext = pFree->Core.offNext;
766 if (pFree->Core.offNext)
767 RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pLeft);
768 pHeapInt->cbFree -= pLeft->cb;
769 pFree = pLeft;
770 }
771 /*
772 * No, just link it into the free list then.
773 */
774 else
775 {
776 pFree->Core.fFlags |= RTHEAPOFFSETBLOCK_FLAGS_FREE;
777 pFree->offNext = RTHEAPOFF_TO_OFF(pHeapInt, pRight);
778 pFree->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pLeft);
779 pLeft->offNext = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
780 if (pRight)
781 pRight->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
782 else
783 pHeapInt->offFreeTail = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
784 }
785 }
786
787 /*
788 * Can we merge with right hand free block?
789 */
790 if ( pRight
791 && pRight->Core.offPrev == RTHEAPOFF_TO_OFF(pHeapInt, pFree))
792 {
793 /* core */
794 pFree->Core.offNext = pRight->Core.offNext;
795 if (pRight->Core.offNext)
796 RTHEAPOFF_TO_PTR(pHeapInt, pRight->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
797
798 /* free */
799 pFree->offNext = pRight->offNext;
800 if (pRight->offNext)
801 RTHEAPOFF_TO_PTR(pHeapInt, pRight->offNext, PRTHEAPOFFSETFREE)->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
802 else
803 pHeapInt->offFreeTail = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
804 pHeapInt->cbFree -= pRight->cb;
805 }
806
807 /*
808 * Calculate the size and update free stats.
809 */
810 pFree->cb = (pFree->Core.offNext ? pFree->Core.offNext : pHeapInt->cbHeap)
811 - RTHEAPOFF_TO_OFF(pHeapInt, pFree) - sizeof(RTHEAPOFFSETBLOCK);
812 pHeapInt->cbFree += pFree->cb;
813 ASSERT_BLOCK_FREE(pHeapInt, pFree);
814
815#ifdef RTHEAPOFFSET_STRICT
816 rtHeapOffsetAssertAll(pHeapInt);
817#endif
818}
819
820
821#ifdef RTHEAPOFFSET_STRICT
822/**
823 * Internal consistency check (relying on assertions).
824 * @param pHeapInt
825 */
826static void rtHeapOffsetAssertAll(PRTHEAPOFFSETINTERNAL pHeapInt)
827{
828 PRTHEAPOFFSETFREE pPrev = NULL;
829 PRTHEAPOFFSETFREE pPrevFree = NULL;
830 PRTHEAPOFFSETFREE pBlock;
831 for (pBlock = (PRTHEAPOFFSETFREE)(pHeapInt + 1);
832 pBlock;
833 pBlock = RTHEAPOFF_TO_PTR(pHeapInt, pBlock->Core.offNext, PRTHEAPOFFSETFREE))
834 {
835 if (RTHEAPOFFSETBLOCK_IS_FREE(&pBlock->Core))
836 {
837 ASSERT_BLOCK_FREE(pHeapInt, pBlock);
838 Assert(pBlock->offPrev == RTHEAPOFF_TO_OFF(pHeapInt, pPrevFree));
839 Assert(pPrevFree || pHeapInt->offFreeHead == RTHEAPOFF_TO_OFF(pHeapInt, pBlock));
840 pPrevFree = pBlock;
841 }
842 else
843 ASSERT_BLOCK_USED(pHeapInt, &pBlock->Core);
844 Assert(!pPrev || RTHEAPOFF_TO_OFF(pHeapInt, pPrev) == pBlock->Core.offPrev);
845 pPrev = pBlock;
846 }
847 Assert(pHeapInt->offFreeTail == RTHEAPOFF_TO_OFF(pHeapInt, pPrevFree));
848}
849#endif
850
851
852RTDECL(size_t) RTHeapOffsetSize(RTHEAPOFFSET hHeap, void *pv)
853{
854 PRTHEAPOFFSETINTERNAL pHeapInt;
855 PRTHEAPOFFSETBLOCK pBlock;
856 size_t cbBlock;
857
858 /*
859 * Validate input.
860 */
861 if (!pv)
862 return 0;
863 AssertPtrReturn(pv, 0);
864 AssertReturn(RT_ALIGN_P(pv, RTHEAPOFFSET_ALIGNMENT) == pv, 0);
865
866 /*
867 * Get the block and heap. If in strict mode, validate these.
868 */
869 pBlock = (PRTHEAPOFFSETBLOCK)pv - 1;
870 pHeapInt = RTHEAPOFF_GET_ANCHOR(pBlock);
871 ASSERT_BLOCK_USED(pHeapInt, pBlock);
872 ASSERT_ANCHOR(pHeapInt);
873 Assert(pHeapInt == (PRTHEAPOFFSETINTERNAL)hHeap || !hHeap);
874
875 /*
876 * Calculate the block size.
877 */
878 cbBlock = (pBlock->offNext ? pBlock->offNext : pHeapInt->cbHeap)
879 - RTHEAPOFF_TO_OFF(pHeapInt, pBlock) - sizeof(RTHEAPOFFSETBLOCK);
880 return cbBlock;
881}
882RT_EXPORT_SYMBOL(RTHeapOffsetSize);
883
884
885RTDECL(size_t) RTHeapOffsetGetHeapSize(RTHEAPOFFSET hHeap)
886{
887 PRTHEAPOFFSETINTERNAL pHeapInt;
888
889 if (hHeap == NIL_RTHEAPOFFSET)
890 return 0;
891
892 pHeapInt = hHeap;
893 AssertPtrReturn(pHeapInt, 0);
894 ASSERT_ANCHOR(pHeapInt);
895 return pHeapInt->cbHeap;
896}
897RT_EXPORT_SYMBOL(RTHeapOffsetGetHeapSize);
898
899
900RTDECL(size_t) RTHeapOffsetGetFreeSize(RTHEAPOFFSET hHeap)
901{
902 PRTHEAPOFFSETINTERNAL pHeapInt;
903
904 if (hHeap == NIL_RTHEAPOFFSET)
905 return 0;
906
907 pHeapInt = hHeap;
908 AssertPtrReturn(pHeapInt, 0);
909 ASSERT_ANCHOR(pHeapInt);
910 return pHeapInt->cbFree;
911}
912RT_EXPORT_SYMBOL(RTHeapOffsetGetFreeSize);
913
914
915RTDECL(void) RTHeapOffsetDump(RTHEAPOFFSET hHeap, PFNRTHEAPOFFSETPRINTF pfnPrintf)
916{
917 PRTHEAPOFFSETINTERNAL pHeapInt = (PRTHEAPOFFSETINTERNAL)hHeap;
918 PRTHEAPOFFSETFREE pBlock;
919
920 pfnPrintf("**** Dumping Heap %p - cbHeap=%x cbFree=%x ****\n",
921 hHeap, pHeapInt->cbHeap, pHeapInt->cbFree);
922
923 for (pBlock = (PRTHEAPOFFSETFREE)(pHeapInt + 1);
924 pBlock;
925 pBlock = RTHEAPOFF_TO_PTR(pHeapInt, pBlock->Core.offNext, PRTHEAPOFFSETFREE))
926 {
927 size_t cb = (pBlock->offNext ? pBlock->Core.offNext : pHeapInt->cbHeap)
928 - RTHEAPOFF_TO_OFF(pHeapInt, pBlock) - sizeof(RTHEAPOFFSETBLOCK);
929 if (RTHEAPOFFSETBLOCK_IS_FREE(&pBlock->Core))
930 pfnPrintf("%p %06x FREE offNext=%06x offPrev=%06x fFlags=%#x cb=%#06x : cb=%#06x offNext=%06x offPrev=%06x\n",
931 pBlock, pBlock->Core.offSelf, pBlock->Core.offNext, pBlock->Core.offPrev, pBlock->Core.fFlags, cb,
932 pBlock->cb, pBlock->offNext, pBlock->offPrev);
933 else
934 pfnPrintf("%p %06x USED offNext=%06x offPrev=%06x fFlags=%#x cb=%#06x\n",
935 pBlock, pBlock->Core.offSelf, pBlock->Core.offNext, pBlock->Core.offPrev, pBlock->Core.fFlags, cb);
936 }
937 pfnPrintf("**** Done dumping Heap %p ****\n", hHeap);
938}
939RT_EXPORT_SYMBOL(RTHeapOffsetDump);
940
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette