VirtualBox

Ignore:
Timestamp:
Jan 2, 2023 3:08:43 PM (2 years ago)
Author:
vboxsync
Message:

Add/VBoxGuestR0LibPhysHeap.cpp: Reworked the block management so we can merge with both the preceeding and succeeding blocks when freeing, rather than having expensive hacks for doing this. This raises the minimum alocation to sizeof(uintptr_t) * 2, which makes not differences to the heap users which has a minimum allocation size of 24 bytes.

Location:
trunk/src/VBox/Additions/common/VBoxGuest/lib
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibInternal.h

    r97930 r97937  
    107107struct VBGLPHYSHEAPBLOCK;
    108108typedef struct VBGLPHYSHEAPBLOCK VBGLPHYSHEAPBLOCK;
    109 struct _VBGLPHYSHEAPCHUNK;
     109struct VBGLPHYSHEAPFREEBLOCK;
     110typedef struct VBGLPHYSHEAPFREEBLOCK VBGLPHYSHEAPFREEBLOCK;
     111struct VBGLPHYSHEAPCHUNK;
    110112typedef struct VBGLPHYSHEAPCHUNK VBGLPHYSHEAPCHUNK;
    111113
     
    132134     * @{
    133135     */
    134     RTSEMFASTMUTEX     mutexHeap;
    135     /** Block list heads. */
    136     union
    137     {
    138         struct
    139         {
    140             VBGLPHYSHEAPBLOCK *pFreeBlocksHead;
    141             VBGLPHYSHEAPBLOCK *pAllocBlocksHead;
    142         } s;
    143         /** Indexed by VBGLPHYSHEAPBLOCK::fAllocated, so zero is the free ones
    144          * and one the allocated ones. */
    145         VBGLPHYSHEAPBLOCK *apHeads[2];
    146     } u;
    147     /** Indexed by VBGLPHYSHEAPBLOCK::fAllocated, so zero is the free ones and
    148      * one the allocated ones. */
    149     int32_t            acBlocks[2];
    150     /** Chunk    */
    151     VBGLPHYSHEAPCHUNK *pChunkHead;
     136    RTSEMFASTMUTEX          mutexHeap;
     137    /** Head of the block list (both free and allocated blocks).
     138     * This runs parallel to the chunk list and is sorted by address within each
     139     * chunk.  This allows merging with blocks both after and before when
     140     * freeing. */
     141    VBGLPHYSHEAPBLOCK      *pBlockHead;
     142    /** Head of the free list.
     143     *  This is not sorted and more on the most recently freed approach. */
     144    VBGLPHYSHEAPFREEBLOCK  *pFreeHead;
     145    /** Number of block of any kind. */
     146    int32_t                 cBlocks;
     147    /** Number of free blocks. */
     148    int32_t                 cFreeBlocks;
     149    /** Head of the chunk list. */
     150    VBGLPHYSHEAPCHUNK      *pChunkHead;
    152151    /** @} */
    153152
  • trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp

    r97930 r97937  
    2929 */
    3030
     31/** @page pg_vbglr0_phys_heap   VBoxGuestLibR0 - Physical memory heap.
     32 *
     33 * Traditional heap implementation keeping all blocks in a ordered list and
     34 * keeping free blocks on additional list via pointers in the user area.  This
     35 * is similar to @ref grp_rt_heap_simple "RTHeapSimple" and
     36 * @ref grp_rt_heap_offset "RTHeapOffset" in IPRT, except that this code handles
     37 * mutiple chunks and has a physical address associated with each chunk and
     38 * block.
     39 *
     40 * When allocating memory, a free block is found that satisfies the request,
     41 * extending the heap with another chunk if needed.  The block is split if it's
     42 * too large, and the tail end is put on the free list.
     43 *
     44 * When freeing memory, the block being freed is put back on the free list and
     45 * we use the block list to check whether it can be merged with adjacent blocks.
     46 *
     47 * @note The original code managed the blocks in two separate lists for free and
     48 *       allocated blocks, which had the disadvantage only allowing merging with
     49 *       the block after the block being freed.  On the plus side, it had the
     50 *       potential for slightly better locality when examining the free list,
     51 *       since the next pointer and block size members were closer to one
     52 *       another.
     53 */
     54
    3155
    3256/*********************************************************************************************************************************
     
    3963#include <iprt/mem.h>
    4064#include <iprt/semaphore.h>
    41 
    42 /** @page pg_vbglr0_phys_heap   VBoxGuestLibR0 - Physical memory heap.
    43  *
    44  * The physical memory heap consists of a doubly linked list of large chunks
    45  * (VBGLDATA::pChunkHead), memory blocks are allocated within these chunks and
    46  * are members of allocated (VBGLDATA::u.s.pAllocBlocksHead) and free
    47  * (VBGLDATA::u.s.pFreeBlocksHead) doubly linked lists.
    48  *
    49  * When allocating a block, we search the free list for a suitable free block.
    50  * If there is no such block, a new chunk is allocated and the new block is
    51  * taken from the new chunk as the only chunk-sized free block. The allocated
    52  * block is unlinked from the free list and goes to alloc list.
    53  *
    54  * When freeing block, we check the pointer and then unlink the block from the
    55  * alloc list and move it to the free list.
    56  *
    57  * For each chunk we maintain the allocated blocks counter (as well as a count
    58  * of free blocks).  If 2 (or more) entire chunks are free they are immediately
    59  * deallocated, so we always have at most 1 free chunk.
    60  *
    61  * When freeing blocks, two subsequent free blocks are always merged together.
    62  *
    63  * Since the free list isn't sorted by address or anything, it is expensive to
    64  * determine whether the block before us is free and we should merge with it.
    65  * The current implementation merges blocks only when there is a free block
    66  * after the just freed one, never when there is one before it as that's too
    67  * expensive.  So, to counter this free list fragmentation, we will occasionally
    68  * scan chunks with a majority of free blocks and merge adjacent ones.
    69  */
    7065
    7166
     
    7671#define VBGL_PH_ASSERT_MSG  AssertMsg
    7772
    78 // #define DUMPHEAP
     73/*#define DUMPHEAP*/
    7974
    8075#ifdef DUMPHEAP
     
    8479#endif
    8580
    86 /* Heap block signature */
    87 #define VBGL_PH_BLOCKSIGNATURE          UINT32_C(0xADDBBBBB)
    88 
    8981/* Heap chunk signature */
    9082#define VBGL_PH_CHUNKSIGNATURE          UINT32_C(0xADDCCCCC)
     
    9284#define VBGL_PH_CHUNKSIZE               (0x10000)
    9385
     86/* Heap block signature */
     87#define VBGL_PH_BLOCKSIGNATURE          UINT32_C(0xADDBBBBB)
     88
    9489/** The allocation block alignment.
    9590 *
    96  * This is also the the minimum block size.
     91 * This cannot be larger than VBGLPHYSHEAPBLOCK.
    9792 */
    9893#define VBGL_PH_ALLOC_ALIGN             (sizeof(void *))
     
    135130 */
    136131#define VBGL_PH_MIN_SPLIT_FREE_BLOCK    32
     132
     133
     134/** The smallest amount of user data that can be allocated.
     135 *
     136 * This is to ensure that the block can be converted into a
     137 * VBGLPHYSHEAPFREEBLOCK structure when freed.  This must be smaller or equal
     138 * to VBGL_PH_MIN_SPLIT_FREE_BLOCK.
     139 */
     140#define VBGL_PH_SMALLEST_ALLOC_SIZE     16
     141
     142/** The maximum allocation request size. */
     143#define VBGL_PH_LARGEST_ALLOC_SIZE      RT_ALIGN_32(  _128M  \
     144                                                    - sizeof(VBGLPHYSHEAPBLOCK) \
     145                                                    - sizeof(VBGLPHYSHEAPCHUNK) \
     146                                                    - VBGL_PH_ALLOC_ALIGN, \
     147                                                    VBGL_PH_ALLOC_ALIGN)
    137148
    138149
     
    165176
    166177/**
     178 * A free block.
     179 */
     180struct VBGLPHYSHEAPFREEBLOCK
     181{
     182    /** Core block data. */
     183    VBGLPHYSHEAPBLOCK       Core;
     184    /** Pointer to the next free list entry. */
     185    VBGLPHYSHEAPFREEBLOCK  *pNextFree;
     186    /** Pointer to the previous free list entry. */
     187    VBGLPHYSHEAPFREEBLOCK  *pPrevFree;
     188};
     189AssertCompile(VBGL_PH_SMALLEST_ALLOC_SIZE  >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK));
     190AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK));
     191AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= VBGL_PH_SMALLEST_ALLOC_SIZE);
     192
     193/**
    167194 * A chunk of memory used by the heap for sub-allocations.
    168195 *
     
    182209    uint32_t uPadding1;
    183210
    184     /** Indexed by VBGLPHYSHEAPBLOCK::fAllocated, so zero is the free ones and
    185      *  one the allocated ones. */
    186     int32_t acBlocks[2];
     211    /** Number of block of any kind. */
     212    int32_t  cBlocks;
     213    /** Number of free blocks. */
     214    int32_t  cFreeBlocks;
    187215
    188216    /** Pointer to the next chunk. */
     
    208236
    209237   VBGL_PH_dprintf(("Chunks:\n"));
    210 
    211    VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead;
    212 
    213    while (pChunk)
    214    {
    215        VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, allocated = %8d, phys = %08X\n",
    216                         pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbSize, pChunk->cAllocatedBlocks, pChunk->physAddr));
    217 
    218        pChunk = pChunk->pNext;
    219    }
     238   for (VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead; pChunk; pChunk = pChunk->pNext)
     239       VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, cBlocks = %8d, cFreeBlocks=%8d, phys = %08X\n",
     240                        pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbSize,
     241                        pChunk->cBlocks, pChunk->cFreeBlocks, pChunk->physAddr));
    220242
    221243   VBGL_PH_dprintf(("Allocated blocks:\n"));
    222 
    223    VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.u.s.pAllocBlocksHead;
    224 
    225    while (pBlock)
    226    {
    227        VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, %s, pChunk = %p\n",
    228                         pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize,
    229                         pBlock->fAllocated ? "allocated" : "free", pBlock->pChunk));
    230 
    231        pBlock = pBlock->pNext;
    232    }
     244   for (VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pBlockHead; pBlock; pBlock = pBlock->pNext)
     245       VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, size = %05x, sign = %08X, %s, pChunk = %p\n",
     246                        pBlock, pBlock->pNext, pBlock->pPrev, pBlock->cbDataSize,
     247                        pBlock->u32Signature,  pBlock->fAllocated ? "allocated" : "     free", pBlock->pChunk));
    233248
    234249   VBGL_PH_dprintf(("Free blocks:\n"));
    235 
    236    pBlock = g_vbgldata.u.s.pFreeBlocksHead;
    237 
    238    while (pBlock)
    239    {
    240        VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, %s, pChunk = %p\n",
    241                         pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize,
    242                         pBlock->fAllocated ? "allocated" : "free", pBlock->pChunk));
    243 
    244        pBlock = pBlock->pNext;
    245    }
     250   for (VBGLPHYSHEAPFREEBLOCK *pBlock = g_vbgldata.pFreeHead; pBlock; pBlock = pBlock->pNextFree)
     251       VBGL_PH_dprintf(("%p: pNextFree = %p, pPrevFree = %p, size = %05x, sign = %08X, pChunk = %p%s\n",
     252                        pBlock, pBlock->pNextFree, pBlock->pPrevFree, pBlock->Core.cbDataSize,
     253                        pBlock->Core.u32Signature, pBlock->Core.pChunk,
     254                        !pBlock->Core.fAllocated ? "" : " !!allocated-block-on-freelist!!"));
    246255
    247256   VBGL_PH_dprintf(("VBGL_PH dump at '%s' done\n", pszWhere));
     
    250259
    251260
    252 static void vbglPhysHeapInitFreeBlock(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbDataSize)
     261static void vbglPhysHeapInitFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbDataSize)
    253262{
    254263    VBGL_PH_ASSERT(pBlock != NULL);
    255264    VBGL_PH_ASSERT(pChunk != NULL);
    256265
    257     pBlock->u32Signature = VBGL_PH_BLOCKSIGNATURE;
    258     pBlock->cbDataSize   = cbDataSize;
    259     pBlock->fAllocated   = false;
    260     pBlock->pNext        = NULL;
    261     pBlock->pPrev        = NULL;
    262     pBlock->pChunk       = pChunk;
    263 }
    264 
    265 
    266 /**
    267  * Links @a pBlock onto the appropriate chain accoring to @a pInsertAfter or @a
    268  * pBlock->fAllocated.
     266    pBlock->Core.u32Signature = VBGL_PH_BLOCKSIGNATURE;
     267    pBlock->Core.cbDataSize   = cbDataSize;
     268    pBlock->Core.fAllocated   = false;
     269    pBlock->Core.pNext        = NULL;
     270    pBlock->Core.pPrev        = NULL;
     271    pBlock->Core.pChunk       = pChunk;
     272    pBlock->pNextFree         = NULL;
     273    pBlock->pPrevFree         = NULL;
     274}
     275
     276
     277/**
     278 * Updates block statistics when a block is added.
     279 */
     280DECLINLINE(void) vbglPhysHeapStatsBlockAdded(VBGLPHYSHEAPBLOCK *pBlock)
     281{
     282    g_vbgldata.cBlocks      += 1;
     283    pBlock->pChunk->cBlocks += 1;
     284    AssertMsg((uint32_t)pBlock->pChunk->cBlocks <= pBlock->pChunk->cbSize / sizeof(VBGLPHYSHEAPFREEBLOCK),
     285              ("pChunk=%p: cbSize=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbSize, pBlock->pChunk->cBlocks));
     286}
     287
     288
     289/**
     290 * Links @a pBlock onto the head of block list.
    269291 *
    270292 * This also update the per-chunk block counts.
    271293 */
    272 static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pInsertAfter, VBGLPHYSHEAPBLOCK *pBlock)
    273 {
    274     bool const fAllocated = pBlock->fAllocated;
    275 
     294static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pBlock)
     295{
    276296    VBGL_PH_ASSERT_MSG(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
    277297    VBGL_PH_ASSERT_MSG(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
    278298
    279     if (pInsertAfter)
    280     {
    281         pBlock->pNext = pInsertAfter->pNext;
    282         pBlock->pPrev = pInsertAfter;
    283 
    284         if (pInsertAfter->pNext)
    285             pInsertAfter->pNext->pPrev = pBlock;
    286 
    287         pInsertAfter->pNext = pBlock;
    288     }
    289     else
    290     {
    291         /* inserting to head of list */
    292         VBGLPHYSHEAPBLOCK *pOldHead = g_vbgldata.u.apHeads[fAllocated];
    293 
    294         pBlock->pNext = pOldHead;
    295         pBlock->pPrev = NULL;
    296 
    297         if (pOldHead)
    298             pOldHead->pPrev = pBlock;
    299         g_vbgldata.u.apHeads[fAllocated] = pBlock;
    300     }
    301 
    302     /* Update the block counts: */
    303     g_vbgldata.acBlocks[fAllocated]      += 1;
    304     pBlock->pChunk->acBlocks[fAllocated] += 1;
    305     AssertMsg(   (uint32_t)pBlock->pChunk->acBlocks[fAllocated]
    306               <= pBlock->pChunk->cbSize / (sizeof(*pBlock) + VBGL_PH_ALLOC_ALIGN),
    307               ("pChunk=%p: cbSize=%#x acBlocks[%u]=%d\n",
    308                pBlock->pChunk, pBlock->pChunk->cbSize, fAllocated, pBlock->pChunk->acBlocks[fAllocated]));
    309 }
    310 
    311 
    312 /**
    313  * Unlinks @a pBlock from the chain it's on.
     299    /* inserting to head of list */
     300    VBGLPHYSHEAPBLOCK *pOldHead = g_vbgldata.pBlockHead;
     301
     302    pBlock->pNext = pOldHead;
     303    pBlock->pPrev = NULL;
     304
     305    if (pOldHead)
     306        pOldHead->pPrev = pBlock;
     307    g_vbgldata.pBlockHead = pBlock;
     308
     309    /* Update the stats: */
     310    vbglPhysHeapStatsBlockAdded(pBlock);
     311}
     312
     313
     314/**
     315 * Links @a pBlock onto the block list after @a pInsertAfter.
    314316 *
    315317 * This also update the per-chunk block counts.
    316318 */
     319static void vbglPhysHeapInsertBlockAfter(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPBLOCK *pInsertAfter)
     320{
     321    VBGL_PH_ASSERT_MSG(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
     322    VBGL_PH_ASSERT_MSG(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
     323
     324    pBlock->pNext = pInsertAfter->pNext;
     325    pBlock->pPrev = pInsertAfter;
     326
     327    if (pInsertAfter->pNext)
     328        pInsertAfter->pNext->pPrev = pBlock;
     329
     330    pInsertAfter->pNext = pBlock;
     331
     332    /* Update the stats: */
     333    vbglPhysHeapStatsBlockAdded(pBlock);
     334}
     335
     336
     337/**
     338 * Unlinks @a pBlock from the block list.
     339 *
     340 * This also update the per-chunk block counts.
     341 */
    317342static void vbglPhysHeapUnlinkBlock(VBGLPHYSHEAPBLOCK *pBlock)
    318343{
    319     bool const fAllocated = pBlock->fAllocated;
    320 
    321344    VBGLPHYSHEAPBLOCK *pOtherBlock = pBlock->pNext;
    322345    if (pOtherBlock)
     
    329352    else
    330353    {
    331         Assert(g_vbgldata.u.apHeads[fAllocated] == pBlock);
    332         g_vbgldata.u.apHeads[fAllocated] = pBlock->pNext;
     354        Assert(g_vbgldata.pBlockHead == pBlock);
     355        g_vbgldata.pBlockHead = pBlock->pNext;
    333356    }
    334357
     
    336359    pBlock->pPrev = NULL;
    337360
    338     /* Update the per-chunk counts: */
    339     g_vbgldata.acBlocks[fAllocated]      -= 1;
    340     pBlock->pChunk->acBlocks[fAllocated] -= 1;
    341     AssertMsg(pBlock->pChunk->acBlocks[fAllocated] >= 0,
    342               ("pChunk=%p: cbSize=%#x acBlocks[%u]=%d\n",
    343                pBlock->pChunk, pBlock->pChunk->cbSize, fAllocated, pBlock->pChunk->acBlocks[fAllocated]));
    344     Assert(g_vbgldata.acBlocks[fAllocated] >= 0);
    345 }
    346 
    347 
    348 static VBGLPHYSHEAPBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock)
     361    /* Update the stats: */
     362    g_vbgldata.cBlocks      -= 1;
     363    pBlock->pChunk->cBlocks -= 1;
     364    AssertMsg(pBlock->pChunk->cBlocks >= 0,
     365              ("pChunk=%p: cbSize=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbSize, pBlock->pChunk->cBlocks));
     366    Assert(g_vbgldata.cBlocks >= 0);
     367}
     368
     369
     370
     371/**
     372 * Updates statistics after adding a free block.
     373 */
     374DECLINLINE(void) vbglPhysHeapStatsFreeBlockAdded(VBGLPHYSHEAPFREEBLOCK *pBlock)
     375{
     376    g_vbgldata.cFreeBlocks           += 1;
     377    pBlock->Core.pChunk->cFreeBlocks += 1;
     378}
     379
     380
     381/**
     382 * Links @a pBlock onto head of the free chain.
     383 *
     384 * This is used during block freeing and when adding a new chunk.
     385 *
     386 * This also update the per-chunk block counts.
     387 */
     388static void vbglPhysHeapInsertFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock)
     389{
     390    Assert(!pBlock->Core.fAllocated);
     391    VBGL_PH_ASSERT_MSG(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree));
     392    VBGL_PH_ASSERT_MSG(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree));
     393
     394    /* inserting to head of list */
     395    VBGLPHYSHEAPFREEBLOCK *pOldHead = g_vbgldata.pFreeHead;
     396
     397    pBlock->pNextFree = pOldHead;
     398    pBlock->pPrevFree = NULL;
     399
     400    if (pOldHead)
     401        pOldHead->pPrevFree = pBlock;
     402    g_vbgldata.pFreeHead = pBlock;
     403
     404    /* Update the stats: */
     405    vbglPhysHeapStatsFreeBlockAdded(pBlock);
     406}
     407
     408
     409/**
     410 * Links @a pBlock after @a pInsertAfter.
     411 *
     412 * This is used when splitting a free block during allocation to preserve the
     413 * place in the free list.
     414 *
     415 * This also update the per-chunk block counts.
     416 */
     417static void vbglPhysHeapInsertFreeBlockAfter(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPFREEBLOCK *pInsertAfter)
     418{
     419    Assert(!pBlock->Core.fAllocated);
     420    VBGL_PH_ASSERT_MSG(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree));
     421    VBGL_PH_ASSERT_MSG(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree));
     422
     423    /* inserting after the tiven node */
     424    pBlock->pNextFree = pInsertAfter->pNextFree;
     425    pBlock->pPrevFree = pInsertAfter;
     426
     427    if (pInsertAfter->pNextFree)
     428        pInsertAfter->pNextFree->pPrevFree = pBlock;
     429
     430    pInsertAfter->pNextFree = pBlock;
     431
     432    /* Update the stats: */
     433    vbglPhysHeapStatsFreeBlockAdded(pBlock);
     434}
     435
     436
     437/**
     438 * Unlinks @a pBlock from the free list.
     439 *
     440 * This also update the per-chunk block counts.
     441 */
     442static void vbglPhysHeapUnlinkFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock)
     443{
     444    Assert(!pBlock->Core.fAllocated);
     445
     446    VBGLPHYSHEAPFREEBLOCK *pOtherBlock = pBlock->pNextFree;
     447    if (pOtherBlock)
     448        pOtherBlock->pPrevFree = pBlock->pPrevFree;
     449    /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
     450
     451    pOtherBlock = pBlock->pPrevFree;
     452    if (pOtherBlock)
     453        pOtherBlock->pNextFree = pBlock->pNextFree;
     454    else
     455    {
     456        Assert(g_vbgldata.pFreeHead == pBlock);
     457        g_vbgldata.pFreeHead = pBlock->pNextFree;
     458    }
     459
     460    pBlock->pNextFree = NULL;
     461    pBlock->pPrevFree = NULL;
     462
     463    /* Update the stats: */
     464    g_vbgldata.cFreeBlocks      -= 1;
     465    pBlock->Core.pChunk->cFreeBlocks -= 1;
     466    AssertMsg(pBlock->Core.pChunk->cFreeBlocks >= 0,
     467              ("pChunk=%p: cbSize=%#x cFreeBlocks=%d\n",
     468               pBlock->Core.pChunk, pBlock->Core.pChunk->cbSize, pBlock->Core.pChunk->cFreeBlocks));
     469    Assert(g_vbgldata.cFreeBlocks >= 0);
     470}
     471
     472
     473/**
     474 * Allocate another chunk and add it to the heap.
     475 *
     476 * @returns Pointer to the free block in the new chunk on success, NULL on
     477 *          allocation failure.
     478 * @param   cbMinBlock  The size of the user block we need this chunk for.
     479 */
     480static VBGLPHYSHEAPFREEBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock)
    349481{
    350482    RTCCPHYS           PhysAddr = NIL_RTHCPHYS;
     
    352484    uint32_t           cbChunk;
    353485    VBGL_PH_dprintf(("Allocating new chunk for %#x byte allocation\n", cbMinBlock));
    354     AssertReturn(cbMinBlock < _128M, NULL); /* paranoia */
    355 
    356     /* Compute the size of the new chunk, rounding up to next chunk size,
    357        which must be power of 2. */
     486    AssertReturn(cbMinBlock <= VBGL_PH_LARGEST_ALLOC_SIZE, NULL); /* paranoia */
     487
     488    /*
     489     * Compute the size of the new chunk, rounding up to next chunk size,
     490     * which must be power of 2.
     491     *
     492     * Note! Using VBGLPHYSHEAPFREEBLOCK here means the minimum block size is
     493     *       8 or 16 bytes too high, but safer this way since cbMinBlock is
     494     *       zero during the init code call.
     495     */
    358496    Assert(RT_IS_POWER_OF_TWO(VBGL_PH_CHUNKSIZE));
    359     cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPBLOCK);
     497    cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK);
    360498    cbChunk = RT_ALIGN_32(cbChunk, VBGL_PH_CHUNKSIZE);
    361499
    362     /* This function allocates physical contiguous memory below 4 GB.  This 4GB
    363        limitation stems from using a 32-bit OUT instruction to pass a block
    364        physical address to the host. */
     500    /*
     501     * This function allocates physical contiguous memory below 4 GB.  This 4GB
     502     * limitation stems from using a 32-bit OUT instruction to pass a block
     503     * physical address to the host.
     504     */
    365505    pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
    366     /** @todo retry with smaller size if it fails, treating VBGL_PH_CHUNKSIZE as
    367      *        a guideline rather than absolute minimum size. */
     506    if (!pChunk)
     507    {
     508        /* If the allocation fail, halv the size till and try again. */
     509        uint32_t cbMinChunk = RT_MAX(cbMinBlock, PAGE_SIZE / 2) + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK);
     510        cbMinChunk = RT_ALIGN_32(cbMinChunk, PAGE_SIZE);
     511        if (cbChunk > cbMinChunk)
     512            do
     513            {
     514                cbChunk >>= 2;
     515                cbChunk = RT_ALIGN_32(cbChunk, PAGE_SIZE);
     516                pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
     517            } while (!pChunk && cbChunk > cbMinChunk);
     518    }
    368519    if (pChunk)
    369520    {
    370         VBGLPHYSHEAPCHUNK *pOldHeadChunk;
    371         VBGLPHYSHEAPBLOCK *pBlock;
     521        VBGLPHYSHEAPCHUNK     *pOldHeadChunk;
     522        VBGLPHYSHEAPFREEBLOCK *pBlock;
    372523        AssertRelease(PhysAddr < _4G && PhysAddr + cbChunk <= _4G);
    373524
    374         /* Init the new chunk. */
     525        /*
     526         * Init the new chunk.
     527         */
    375528        pChunk->u32Signature     = VBGL_PH_CHUNKSIGNATURE;
    376529        pChunk->cbSize           = cbChunk;
    377530        pChunk->physAddr         = (uint32_t)PhysAddr;
    378         pChunk->acBlocks[0]      = 0;
    379         pChunk->acBlocks[1]      = 0;
     531        pChunk->cBlocks          = 0;
     532        pChunk->cFreeBlocks      = 0;
    380533        pChunk->pNext            = NULL;
    381534        pChunk->pPrev            = NULL;
     
    387540#endif
    388541
    389         /* Initialize the free block, which now occupies entire chunk. */
    390         pBlock = (VBGLPHYSHEAPBLOCK *)(pChunk + 1);
     542        /*
     543         * Initialize the free block, which now occupies entire chunk.
     544         */
     545        pBlock = (VBGLPHYSHEAPFREEBLOCK *)(pChunk + 1);
    391546        vbglPhysHeapInitFreeBlock(pBlock, pChunk, cbChunk - sizeof(VBGLPHYSHEAPCHUNK) - sizeof(VBGLPHYSHEAPBLOCK));
    392         vbglPhysHeapInsertBlock(NULL, pBlock);
    393 
    394         /* Add the chunk to the list. */
     547        vbglPhysHeapInsertBlock(&pBlock->Core);
     548        vbglPhysHeapInsertFreeBlock(pBlock);
     549
     550        /*
     551         * Add the chunk to the list.
     552         */
    395553        pOldHeadChunk = g_vbgldata.pChunkHead;
    396554        pChunk->pNext = pOldHeadChunk;
     
    399557        g_vbgldata.pChunkHead    = pChunk;
    400558
    401         VBGL_PH_dprintf(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->cbDataSize));
     559        VBGL_PH_dprintf(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->Core.cbDataSize));
    402560        return pBlock;
    403561    }
     
    407565
    408566
     567/**
     568 * Deletes a chunk: Unlinking all its blocks and freeing its memory.
     569 */
    409570static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk)
    410571{
     
    415576    VBGL_PH_dprintf(("Deleting chunk %p size %x\n", pChunk, pChunk->cbSize));
    416577
    417     /* first scan the chunk and unlink all blocks from the lists */
    418 
     578    /*
     579     * First scan the chunk and unlink all blocks from the lists.
     580     *
     581     * Note! We could do this by finding the first and last block list entries
     582     *       and just drop the whole chain relating to this chunk, rather than
     583     *       doing it one by one.  But doing it one by one is simpler and will
     584     *       continue to work if the block list ends in an unsorted state.
     585     */
    419586    uEnd = (uintptr_t)pChunk + pChunk->cbSize;
    420587    uCur = (uintptr_t)(pChunk + 1);
     
    423590    {
    424591        VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur;
     592        Assert(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
     593        Assert(pBlock->pChunk == pChunk);
    425594
    426595        uCur += pBlock->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
    427 
     596        Assert(uCur == (uintptr_t)pBlock->pNext || uCur >= uEnd);
     597
     598        if (!pBlock->fAllocated)
     599            vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pBlock);
    428600        vbglPhysHeapUnlinkBlock(pBlock);
    429601    }
     
    431603    VBGL_PH_ASSERT_MSG(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbSize = %08X\n", uCur, uEnd, pChunk->cbSize));
    432604
    433     /* Unlink the chunk from the chunk list. */
     605    /*
     606     * Unlink the chunk from the chunk list.
     607     */
    434608    if (pChunk->pNext)
    435609        pChunk->pNext->pPrev = pChunk->pPrev;
     
    444618    }
    445619
     620    /*
     621     * Finally, free the chunk memory.
     622     */
    446623    RTMemContFree(pChunk, pChunk->cbSize);
    447624}
    448625
    449626
    450 /**
    451  * Worker for VbglR0PhysHeapFree that merges adjacent free nodes in @a pChunk.
    452  *
    453  * The caller has check that there must be 1 or more such.
    454  */
    455 static void vbglR0PhysHeapMergeAdjacentFreeBlocksInChunk(VBGLPHYSHEAPCHUNK *pChunk)
    456 {
    457     uintptr_t const    uEnd       = (uintptr_t)pChunk + pChunk->cbSize;
    458     VBGLPHYSHEAPBLOCK *pPrevBlock = NULL;
    459     VBGLPHYSHEAPBLOCK *pCurBlock  = (VBGLPHYSHEAPBLOCK *)(pChunk + 1);
    460     while ((uintptr_t)pCurBlock < uEnd)
    461     {
    462         VBGLPHYSHEAPBLOCK * const pNextBlock = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbDataSize);
    463 
    464         AssertReturnVoid(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
    465         AssertReturnVoid(pCurBlock->pChunk == pChunk);
    466         if (   !pCurBlock->fAllocated
    467             && pPrevBlock
    468             && !pPrevBlock->fAllocated)
    469         {
    470             /* Adjust size of the previous memory block. */
    471             pPrevBlock->cbDataSize += sizeof(*pCurBlock) + pCurBlock->cbDataSize;
    472 
    473             /* Unlink the current block and invalid it. */
    474             vbglPhysHeapUnlinkBlock(pCurBlock);
    475 
    476             pCurBlock->u32Signature = ~VBGL_PH_BLOCKSIGNATURE;
    477             pCurBlock->cbDataSize   = UINT32_MAX / 4;
    478 
    479             /* The previous node remains the same as we advance to the next one. */
    480         }
    481         else
    482             pPrevBlock = pCurBlock;
    483         pCurBlock = pNextBlock;
    484     }
    485 }
    486 
    487 
    488627DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cbSize)
    489628{
    490     VBGLPHYSHEAPBLOCK *pBlock;
    491     VBGLPHYSHEAPBLOCK *pIter;
    492     int32_t            cLeft;
     629    VBGLPHYSHEAPFREEBLOCK *pBlock;
     630    VBGLPHYSHEAPFREEBLOCK *pIter;
     631    int32_t                 cLeft;
    493632#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
    494     uint32_t           cbAlwaysSplit;
     633    uint32_t                cbAlwaysSplit;
    495634#endif
    496     int                rc;
     635    int                     rc;
    497636
    498637    /*
    499      * Align the size to a pointer size to avoid getting misaligned header pointers and whatnot.
     638     * Make sure we don't allocate anything too small to turn into a free node
     639     * and align the size to prevent pointer misalignment and whatnot.
    500640     */
     641    cbSize = RT_MAX(cbSize, VBGL_PH_SMALLEST_ALLOC_SIZE);
    501642    cbSize = RT_ALIGN_32(cbSize, VBGL_PH_ALLOC_ALIGN);
    502     AssertStmt(cbSize > 0, cbSize = VBGL_PH_ALLOC_ALIGN); /* avoid allocating zero bytes */
     643    AssertCompile(VBGL_PH_ALLOC_ALIGN <= sizeof(pBlock->Core));
    503644
    504645    rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
     
    520661        /* Smaller than 3/4 page:  Prefer a free block that can keep the request within a single page,
    521662           so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
    522         VBGLPHYSHEAPBLOCK *pFallback = NULL;
    523         for (pIter = g_vbgldata.u.s.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext, cLeft--)
     663        VBGLPHYSHEAPFREEBLOCK *pFallback = NULL;
     664        for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--)
    524665        {
    525             AssertBreak(pIter->u32Signature == VBGL_PH_BLOCKSIGNATURE);
    526             if (pIter->cbDataSize >= cbSize)
     666            AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
     667            if (pIter->Core.cbDataSize >= cbSize)
    527668            {
    528                 if (pIter->cbDataSize == cbSize)
     669                if (pIter->Core.cbDataSize == cbSize)
    529670                {
    530671                    if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cbSize)
     
    537678                else
    538679                {
    539                     if (!pFallback || pIter->cbDataSize < pFallback->cbDataSize)
     680                    if (!pFallback || pIter->Core.cbDataSize < pFallback->Core.cbDataSize)
    540681                        pFallback = pIter;
    541682                    if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cbSize)
    542683                    {
    543                         if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
     684                        if (!pBlock || pIter->Core.cbDataSize < pBlock->Core.cbDataSize)
    544685                            pBlock = pIter;
    545686#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
    546                         else if (pIter->cbDataSize >= cbAlwaysSplit)
     687                        else if (pIter->Core.cbDataSize >= cbAlwaysSplit)
    547688                        {
    548689                            pBlock = pIter;
     
    566707    {
    567708        /* Large than 3/4 page:  Find closest free list match. */
    568         for (pIter = g_vbgldata.u.s.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext, cLeft--)
     709        for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--)
    569710        {
    570             AssertBreak(pIter->u32Signature == VBGL_PH_BLOCKSIGNATURE);
    571             if (pIter->cbDataSize >= cbSize)
     711            AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
     712            if (pIter->Core.cbDataSize >= cbSize)
    572713            {
    573                 if (pIter->cbDataSize == cbSize)
     714                if (pIter->Core.cbDataSize == cbSize)
    574715                {
    575716                    /* Exact match - we're done! */
     
    579720
    580721#ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS
    581                 if (pIter->cbDataSize >= cbAlwaysSplit)
     722                if (pIter->Core.cbDataSize >= cbAlwaysSplit)
    582723                {
    583724                    /* Really big block - no point continue searching! */
     
    587728#endif
    588729                /* Looking for a free block with nearest size. */
    589                 if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
     730                if (!pBlock || pIter->Core.cbDataSize < pBlock->Core.cbDataSize)
    590731                    pBlock = pIter;
    591732
     
    608749    {
    609750        /* We have a free block, either found or allocated. */
    610         VBGL_PH_ASSERT_MSG(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
    611                            ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->u32Signature));
    612         VBGL_PH_ASSERT_MSG(!pBlock->fAllocated, ("pBlock = %p\n", pBlock));
     751        VBGL_PH_ASSERT_MSG(pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE,
     752                           ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->Core.u32Signature));
     753        VBGL_PH_ASSERT_MSG(!pBlock->Core.fAllocated, ("pBlock = %p\n", pBlock));
    613754
    614755        /*
     
    617758         * We do this before unlinking the block so we can preserve the location
    618759         * in the free list.
     760         *
     761         * Note! We cannot split off and return the tail end here, because that may
     762         *       violate the same page requirements for requests smaller than 3/4 page.
    619763         */
    620         if (pBlock->cbDataSize >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cbSize)
     764        AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(*pBlock) - sizeof(pBlock->Core));
     765        if (pBlock->Core.cbDataSize >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cbSize)
    621766        {
    622             pIter = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + cbSize);
    623             vbglPhysHeapInitFreeBlock(pIter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof(VBGLPHYSHEAPBLOCK));
    624 
    625             pBlock->cbDataSize = cbSize;
    626 
    627             /* Insert the new 'pIter' block after the 'pBlock' in the free list. */
    628             vbglPhysHeapInsertBlock(pBlock, pIter);
     767            pIter = (VBGLPHYSHEAPFREEBLOCK *)((uintptr_t)(&pBlock->Core + 1) + cbSize);
     768            vbglPhysHeapInitFreeBlock(pIter, pBlock->Core.pChunk, pBlock->Core.cbDataSize - cbSize - sizeof(VBGLPHYSHEAPBLOCK));
     769
     770            pBlock->Core.cbDataSize = cbSize;
     771
     772            /* Insert the new 'pIter' block after the 'pBlock' in the block list
     773               and in the free list. */
     774            vbglPhysHeapInsertBlockAfter(&pIter->Core, &pBlock->Core);
     775            vbglPhysHeapInsertFreeBlockAfter(pIter, pBlock);
    629776        }
    630777
    631778        /*
    632          * Unlink the block from the free list, mark it as allocated and insert
    633          * it in the allocated list.
     779         * Unlink the block from the free list and mark it as allocated.
    634780         */
    635         vbglPhysHeapUnlinkBlock(pBlock);
    636         pBlock->fAllocated = true;
    637         vbglPhysHeapInsertBlock(NULL, pBlock);
     781        vbglPhysHeapUnlinkFreeBlock(pBlock);
     782        pBlock->Core.fAllocated = true;
    638783
    639784        dumpheap("post alloc");
     
    644789        rc = RTSemFastMutexRelease(g_vbgldata.mutexHeap);
    645790
    646         VBGL_PH_dprintf(("VbglR0PhysHeapAlloc: returns %p size %x\n", pBlock + 1, pBlock->cbDataSize));
    647         return pBlock + 1;
     791        VBGL_PH_dprintf(("VbglR0PhysHeapAlloc: returns %p size %x\n", pBlock + 1, pBlock->Core.cbDataSize));
     792        return &pBlock->Core + 1;
    648793    }
    649794
     
    673818             * Calculate and return its physical address.
    674819             */
    675             return pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pBlock->pChunk);
     820            VBGLPHYSHEAPCHUNK  *pChunk = pBlock->pChunk;
     821            return pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pChunk);
    676822        }
    677823
     
    689835    if (pv != NULL)
    690836    {
    691         VBGLPHYSHEAPBLOCK *pBlock;
     837        VBGLPHYSHEAPFREEBLOCK *pBlock;
    692838
    693839        int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
     
    699845         * Validate the block header.
    700846         */
    701         pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
    702         if (   pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE
    703             && pBlock->fAllocated)
     847        pBlock = (VBGLPHYSHEAPFREEBLOCK *)((VBGLPHYSHEAPBLOCK *)pv - 1);
     848        if (   pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE
     849            && pBlock->Core.fAllocated
     850            && pBlock->Core.cbDataSize >= VBGL_PH_SMALLEST_ALLOC_SIZE)
    704851        {
    705852            VBGLPHYSHEAPCHUNK *pChunk;
     
    707854
    708855            /*
    709              * Move the block from the allocated list to the free list.
     856             * Change the block status to freeed.
    710857             */
    711             VBGL_PH_dprintf(("VbglR0PhysHeapFree: %p size %#x\n", pv, pBlock->cbDataSize));
    712             vbglPhysHeapUnlinkBlock(pBlock);
    713 
    714             dumpheap("post unlink");
    715 
    716             pBlock->fAllocated = false;
    717             vbglPhysHeapInsertBlock(NULL, pBlock);
     858            VBGL_PH_dprintf(("VbglR0PhysHeapFree: %p size %#x\n", pv, pBlock->Core.cbDataSize));
     859
     860            pBlock->Core.fAllocated = false;
     861            pBlock->pNextFree = pBlock->pPrevFree = NULL;
     862            vbglPhysHeapInsertFreeBlock(pBlock);
    718863
    719864            dumpheap("post insert");
    720865
    721866            /*
    722              * Check if the block after this one is also free and we can merge it into
    723              * this one.
     867             * Check if the block after this one is also free and we can merge it into this one.
    724868             */
    725             pChunk = pBlock->pChunk;
    726             pNeighbour = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + pBlock->cbDataSize);
    727             if (   (uintptr_t)pNeighbour <= (uintptr_t)pChunk + pChunk->cbSize - sizeof(*pNeighbour)
    728                 && !pNeighbour->fAllocated)
     869            pChunk = pBlock->Core.pChunk;
     870
     871            pNeighbour = pBlock->Core.pNext;
     872            if (   pNeighbour
     873                && !pNeighbour->fAllocated
     874                && pNeighbour->pChunk == pChunk)
    729875            {
     876                Assert((uintptr_t)pBlock + sizeof(pBlock->Core) + pBlock->Core.cbDataSize == (uintptr_t)pNeighbour);
     877
    730878                /* Adjust size of current memory block */
    731                 pBlock->cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
     879                pBlock->Core.cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
    732880
    733881                /* Unlink the following node and invalid it. */
     882                vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pNeighbour);
    734883                vbglPhysHeapUnlinkBlock(pNeighbour);
    735884
     
    737886                pNeighbour->cbDataSize   = UINT32_MAX / 4;
    738887
    739                 dumpheap("post merge");
     888                dumpheap("post merge after");
     889            }
     890
     891            /*
     892             * Same check for the block before us.  This invalidates pBlock.
     893             */
     894            pNeighbour = pBlock->Core.pPrev;
     895            if (   pNeighbour
     896                && !pNeighbour->fAllocated
     897                && pNeighbour->pChunk == pChunk)
     898            {
     899                Assert((uintptr_t)pNeighbour + sizeof(*pNeighbour) + pNeighbour->cbDataSize == (uintptr_t)pBlock);
     900
     901                /* Adjust size of the block before us */
     902                pNeighbour->cbDataSize += pBlock->Core.cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
     903
     904                /* Unlink this node and invalid it. */
     905                vbglPhysHeapUnlinkFreeBlock(pBlock);
     906                vbglPhysHeapUnlinkBlock(&pBlock->Core);
     907
     908                pBlock->Core.u32Signature = ~VBGL_PH_BLOCKSIGNATURE;
     909                pBlock->Core.cbDataSize   = UINT32_MAX / 8;
     910
     911                pBlock = NULL; /* invalid */
     912
     913                dumpheap("post merge before");
    740914            }
    741915
     
    744918             * more completely free ones.
    745919             */
    746             if (   pChunk->acBlocks[1 /*allocated*/] == 0
     920            if (   pChunk->cFreeBlocks == pChunk->cBlocks
    747921                && (pChunk->pPrev || pChunk->pNext))
    748922            {
     
    752926                {
    753927                    AssertBreak(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE);
    754                     if (pCurChunk->acBlocks[1 /*allocated*/] == 0)
     928                    if (pCurChunk->cFreeBlocks == pCurChunk->cBlocks)
    755929                    {
    756930                        cUnusedChunks++;
     
    762936                             */
    763937                            vbglPhysHeapChunkDelete(pChunk);
    764                             pNeighbour = pBlock = NULL; /* invalid */
     938                            pBlock = NULL;      /* invalid */
    765939                            pChunk = NULL;
     940                            pNeighbour = NULL;
    766941                            break;
    767942                        }
     
    770945            }
    771946
    772             /*
    773              * Hack to counter the lack of merging with the node before us, scan
    774              * the chunk and merge adjacent free nodes.
    775              */
    776             if (pChunk)
    777             {
    778                 int32_t const cAllocBlocks  = pChunk->acBlocks[1 /*alloc*/];
    779                 int32_t const cApproxExcess = pChunk->acBlocks[0 /*free*/] - cAllocBlocks - 1;
    780                 if (   cApproxExcess > 0 /* there are too many free nodes */
    781                     && cAllocBlocks >= 0 /* sanity check */)
    782                 {
    783                     /* Just do the merging if there aren't that many blocks: */
    784                     if (cAllocBlocks <= 4)
    785                         vbglR0PhysHeapMergeAdjacentFreeBlocksInChunk(pChunk);
    786                     /* Do the merging if we're above a 1:2 ratio: */
    787                     else if (cApproxExcess >= cAllocBlocks)
    788                         vbglR0PhysHeapMergeAdjacentFreeBlocksInChunk(pChunk);
    789                 }
    790             }
    791 
    792947            dumpheap("post free");
    793948        }
    794949        else
    795950            AssertMsgFailed(("pBlock: %p: u32Signature=%#x cb=%#x fAllocated=%d - double free?\n",
    796                              pBlock, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fAllocated));
     951                             pBlock, pBlock->Core.u32Signature, pBlock->Core.cbDataSize, pBlock->Core.fAllocated));
    797952
    798953        rc = RTSemFastMutexRelease(g_vbgldata.mutexHeap);
     
    819974
    820975    size_t cbTotal = 0;
    821     for (VBGLPHYSHEAPBLOCK *pCurBlock = g_vbgldata.u.s.pFreeBlocksHead; pCurBlock; pCurBlock = pCurBlock->pNext)
    822     {
    823         Assert(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
    824         cbTotal += pCurBlock->cbDataSize;
     976    for (VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead; pCurBlock; pCurBlock = pCurBlock->pNextFree)
     977    {
     978        Assert(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE);
     979        Assert(!pCurBlock->Core.fAllocated);
     980        cbTotal += pCurBlock->Core.cbDataSize;
    825981    }
    826982
     
    839995{
    840996    /*
    841      * Scan the blocks in each chunk.
     997     * Scan the blocks in each chunk, walking the block list in parallel.
    842998     */
    843     unsigned acTotalBlocks[2] = { 0, 0 };
     999    const VBGLPHYSHEAPBLOCK *pPrevBlockListEntry = NULL;
     1000    const VBGLPHYSHEAPBLOCK *pCurBlockListEntry  = g_vbgldata.pBlockHead;
     1001    unsigned                 acTotalBlocks[2]    = { 0, 0 };
    8441002    for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead, *pPrevChunk = NULL; pCurChunk; pCurChunk = pCurChunk->pNext)
    8451003    {
     
    8501008                                   "pCurChunk=%p: pPrev=%p, expected %p", pCurChunk, pCurChunk->pPrev, pPrevChunk));
    8511009
     1010        const VBGLPHYSHEAPBLOCK *pCurBlock   = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);
    8521011        uintptr_t const          uEnd        = (uintptr_t)pCurChunk + pCurChunk->cbSize;
    853         const VBGLPHYSHEAPBLOCK *pCurBlock   = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);
    8541012        unsigned                 acBlocks[2] = { 0, 0 };
    8551013        while ((uintptr_t)pCurBlock < uEnd)
     
    8611019                         RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
    8621020                                       "pCurBlock=%p: pChunk=%p, expected %p", pCurBlock, pCurBlock->pChunk, pCurChunk));
    863             AssertReturn(   pCurBlock->cbDataSize >= VBGL_PH_ALLOC_ALIGN
    864                          && pCurBlock->cbDataSize < _128M
     1021            AssertReturn(   pCurBlock->cbDataSize >= VBGL_PH_SMALLEST_ALLOC_SIZE
     1022                         && pCurBlock->cbDataSize <= VBGL_PH_LARGEST_ALLOC_SIZE
    8651023                         && RT_ALIGN_32(pCurBlock->cbDataSize, VBGL_PH_ALLOC_ALIGN) == pCurBlock->cbDataSize,
    8661024                         RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
    8671025                                       "pCurBlock=%p: cbDataSize=%#x", pCurBlock, pCurBlock->cbDataSize));
     1026            AssertReturn(pCurBlock == pCurBlockListEntry,
     1027                         RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
     1028                                       "pCurChunk=%p: pCurBlock=%p, pCurBlockListEntry=%p\n",
     1029                                       pCurChunk, pCurBlock, pCurBlockListEntry));
     1030            AssertReturn(pCurBlock->pPrev == pPrevBlockListEntry,
     1031                         RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
     1032                                       "pCurChunk=%p: pCurBlock->pPrev=%p, pPrevBlockListEntry=%p\n",
     1033                                       pCurChunk, pCurBlock->pPrev, pPrevBlockListEntry));
     1034
    8681035            acBlocks[pCurBlock->fAllocated] += 1;
    8691036
    8701037            /* advance */
     1038            pPrevBlockListEntry = pCurBlock;
     1039            pCurBlockListEntry  = pCurBlock->pNext;
    8711040            pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbDataSize);
    8721041        }
     
    8761045
    8771046        acTotalBlocks[1] += acBlocks[1];
    878         AssertReturn(acBlocks[1] == (uint32_t)pCurChunk->acBlocks[1],
     1047        AssertReturn(acBlocks[0] + acBlocks[1] == (uint32_t)pCurChunk->cBlocks,
    8791048                     RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
    880                                    "pCurChunk=%p: cAllocatedBlocks=%u, expected %u",
    881                                    pCurChunk, pCurChunk->acBlocks[1], acBlocks[1]));
     1049                                   "pCurChunk=%p: cBlocks=%u, expected %u",
     1050                                   pCurChunk, pCurChunk->cBlocks, acBlocks[0] + acBlocks[1]));
    8821051
    8831052        acTotalBlocks[0] += acBlocks[0];
    884         AssertReturn(acBlocks[0] == (uint32_t)pCurChunk->acBlocks[0],
     1053        AssertReturn(acBlocks[0] == (uint32_t)pCurChunk->cFreeBlocks,
    8851054                     RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
    8861055                                   "pCurChunk=%p: cFreeBlocks=%u, expected %u",
    887                                    pCurChunk, pCurChunk->acBlocks[0], acBlocks[0]));
     1056                                   pCurChunk, pCurChunk->cFreeBlocks, acBlocks[0]));
    8881057
    8891058        pPrevChunk = pCurChunk;
    8901059    }
    8911060
    892     AssertReturn(acTotalBlocks[0] == (uint32_t)g_vbgldata.acBlocks[0],
     1061    AssertReturn(acTotalBlocks[0] == (uint32_t)g_vbgldata.cFreeBlocks,
    8931062                 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
    894                                "g_vbgldata.acBlocks[0]=%u, expected %u", g_vbgldata.acBlocks[0], acTotalBlocks[0]));
    895     AssertReturn(acTotalBlocks[1] == (uint32_t)g_vbgldata.acBlocks[1],
     1063                               "g_vbgldata.cFreeBlocks=%u, expected %u", g_vbgldata.cFreeBlocks, acTotalBlocks[0]));
     1064    AssertReturn(acTotalBlocks[0] + acTotalBlocks[1] == (uint32_t)g_vbgldata.cBlocks,
    8961065                 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
    897                                "g_vbgldata.acBlocks[1]=%u, expected %u", g_vbgldata.acBlocks[1], acTotalBlocks[1]));
     1066                               "g_vbgldata.cBlocks=%u, expected %u", g_vbgldata.cBlocks, acTotalBlocks[0] + acTotalBlocks[1]));
    8981067
    8991068    /*
    900      * Count each list and check that they contain the same number of nodes.
     1069     * Check that the free list contains the same number of blocks as we
     1070     * encountered during the above scan.
    9011071     */
    902     for (unsigned iType = 0; iType < RT_ELEMENTS(g_vbgldata.u.apHeads); iType++)
    903     {
    904         unsigned           cBlocks    = 0;
    905         VBGLPHYSHEAPBLOCK *pPrevBlock = NULL;
    906         for (VBGLPHYSHEAPBLOCK *pCurBlock = g_vbgldata.u.apHeads[iType]; pCurBlock; pCurBlock = pCurBlock->pNext)
     1072    {
     1073        unsigned cFreeListBlocks = 0;
     1074        for (const VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead, *pPrevBlock = NULL;
     1075             pCurBlock;
     1076             pCurBlock = pCurBlock->pNextFree)
    9071077        {
    908             AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
     1078            AssertReturn(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE,
    9091079                         RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
    910                                        "pCurBlock=%p/%u: magic=%#x", pCurBlock, iType, pCurBlock->u32Signature));
    911             AssertReturn(pCurBlock->pPrev == pPrevBlock,
     1080                                       "pCurBlock=%p/free: magic=%#x", pCurBlock, pCurBlock->Core.u32Signature));
     1081            AssertReturn(pCurBlock->pPrevFree == pPrevBlock,
    9121082                         RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
    913                                        "pCurBlock=%p/%u: pPrev=%p, expected %p", pCurBlock, iType, pCurBlock->pPrev, pPrevBlock));
    914             AssertReturn(pCurBlock->pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
    915                          RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurBlock=%p/%u: chunk (%p) magic=%#x",
    916                                        pCurBlock, iType, pCurBlock->pChunk, pCurBlock->pChunk->u32Signature));
    917             cBlocks++;
     1083                                       "pCurBlock=%p/free: pPrev=%p, expected %p", pCurBlock, pCurBlock->pPrevFree, pPrevBlock));
     1084            AssertReturn(pCurBlock->Core.pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
     1085                         RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurBlock=%p/free: chunk (%p) magic=%#x",
     1086                                       pCurBlock, pCurBlock->Core.pChunk, pCurBlock->Core.pChunk->u32Signature));
     1087            cFreeListBlocks++;
    9181088            pPrevBlock = pCurBlock;
    9191089        }
    9201090
    921         AssertReturn(cBlocks == acTotalBlocks[iType],
     1091        AssertReturn(cFreeListBlocks == acTotalBlocks[0],
    9221092                     RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
    923                                    "iType=%u: Found %u in list, expected %u", iType, cBlocks, acTotalBlocks[iType]));
    924     }
    925 
     1093                                   "Found %u in free list, expected %u", cFreeListBlocks, acTotalBlocks[0]));
     1094    }
    9261095    return VINF_SUCCESS;
    9271096}
     
    9521121
    9531122    /* Allocate the first chunk of the heap. */
    954     VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapChunkAlloc(0);
     1123    VBGLPHYSHEAPFREEBLOCK *pBlock = vbglPhysHeapChunkAlloc(0);
    9551124    if (pBlock)
    9561125        return RTSemFastMutexCreate(&g_vbgldata.mutexHeap);
  • trunk/src/VBox/Additions/common/VBoxGuest/lib/testcase/tstVbglR0PhysHeap-1.cpp

    r97926 r97937  
    181181    if (RT_FAILURE(rc))
    182182        return RTTestSummaryAndDestroy(hTest);
     183    RTTESTI_CHECK_RC_OK(VbglR0PhysHeapCheck(NULL));
    183184
    184185#define CHECK_PHYS_ADDR(a_pv) do { \
     
    239240
    240241        CHECK_PHYS_ADDR(s_aOps[i].pvAlloc);
     242
     243        /* Check heap integrity: */
     244        RTTESTI_CHECK_RC_OK(VbglR0PhysHeapCheck(NULL));
    241245    }
    242246
     
    251255        VbglR0PhysHeapFree(s_aOps[i].pvAlloc);
    252256        size_t cbAfterSubFree = VbglR0PhysHeapGetFreeSize();
     257        RTTESTI_CHECK_RC_OK(VbglR0PhysHeapCheck(NULL));
    253258
    254259        void *pv;
     
    258263            return RTTestSummaryAndDestroy(hTest);
    259264        CHECK_PHYS_ADDR(pv);
     265        RTTESTI_CHECK_RC_OK(VbglR0PhysHeapCheck(NULL));
    260266
    261267        //RTPrintf("debug: i=%d pv=%p cbReal=%#zx cbBeforeSub=%#zx cbAfterSubFree=%#zx cbAfterSubAlloc=%#zx \n", i, pv, RTHeapOffsetSize(Heap, pv),
     
    376382            /* free all */
    377383            RTTestIPrintf(RTTESTLVL_ALWAYS, "Free-all-pre:  cFreeBlocks=%u cAllocedBlocks=%u in %u chunk(s)\n",
    378                           g_vbgldata.acBlocks[0], g_vbgldata.acBlocks[1], g_cChunks);
     384                          g_vbgldata.cFreeBlocks, g_vbgldata.cBlocks - g_vbgldata.cFreeBlocks, g_cChunks);
    379385            for (i = 0; i < RT_ELEMENTS(s_aHistory); i++)
    380386            {
     
    382388                s_aHistory[i].pv = NULL;
    383389            }
    384             RTTestIPrintf(RTTESTLVL_ALWAYS, "Free-all-post: cFreeBlocks=%u in %u chunk(s)\n", g_vbgldata.acBlocks[0], g_cChunks);
     390            RTTestIPrintf(RTTESTLVL_ALWAYS, "Free-all-post: cFreeBlocks=%u in %u chunk(s)\n", g_vbgldata.cFreeBlocks, g_cChunks);
    385391            RTTESTI_CHECK_MSG(g_cChunks == 1, ("g_cChunks=%d\n", g_cChunks));
    386             RTTESTI_CHECK_MSG(g_vbgldata.acBlocks[1] == 0, ("g_vbgldata.acBlocks[1]=%d\n", g_vbgldata.acBlocks[0]));
     392            RTTESTI_CHECK_MSG(g_vbgldata.cFreeBlocks == g_vbgldata.cBlocks,
     393                              ("g_vbgldata.cFreeBlocks=%d cBlocks=%d\n", g_vbgldata.cFreeBlocks, g_vbgldata.cBlocks));
    387394
    388395            //size_t cbAfterRand = VbglR0PhysHeapGetFreeSize();
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette