Changeset 97937 in vbox for trunk/src/VBox/Additions/common/VBoxGuest
- Timestamp:
- Jan 2, 2023 3:08:43 PM (2 years ago)
- Location:
- trunk/src/VBox/Additions/common/VBoxGuest/lib
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibInternal.h
r97930 r97937 107 107 struct VBGLPHYSHEAPBLOCK; 108 108 typedef struct VBGLPHYSHEAPBLOCK VBGLPHYSHEAPBLOCK; 109 struct _VBGLPHYSHEAPCHUNK; 109 struct VBGLPHYSHEAPFREEBLOCK; 110 typedef struct VBGLPHYSHEAPFREEBLOCK VBGLPHYSHEAPFREEBLOCK; 111 struct VBGLPHYSHEAPCHUNK; 110 112 typedef struct VBGLPHYSHEAPCHUNK VBGLPHYSHEAPCHUNK; 111 113 … … 132 134 * @{ 133 135 */ 134 RTSEMFASTMUTEX mutexHeap; 135 /** Block list heads. */ 136 union 137 { 138 struct 139 { 140 VBGLPHYSHEAPBLOCK *pFreeBlocksHead; 141 VBGLPHYSHEAPBLOCK *pAllocBlocksHead; 142 } s; 143 /** Indexed by VBGLPHYSHEAPBLOCK::fAllocated, so zero is the free ones 144 * and one the allocated ones. */ 145 VBGLPHYSHEAPBLOCK *apHeads[2]; 146 } u; 147 /** Indexed by VBGLPHYSHEAPBLOCK::fAllocated, so zero is the free ones and 148 * one the allocated ones. */ 149 int32_t acBlocks[2]; 150 /** Chunk */ 151 VBGLPHYSHEAPCHUNK *pChunkHead; 136 RTSEMFASTMUTEX mutexHeap; 137 /** Head of the block list (both free and allocated blocks). 138 * This runs parallel to the chunk list and is sorted by address within each 139 * chunk. This allows merging with blocks both after and before when 140 * freeing. */ 141 VBGLPHYSHEAPBLOCK *pBlockHead; 142 /** Head of the free list. 143 * This is not sorted and more on the most recently freed approach. */ 144 VBGLPHYSHEAPFREEBLOCK *pFreeHead; 145 /** Number of block of any kind. */ 146 int32_t cBlocks; 147 /** Number of free blocks. */ 148 int32_t cFreeBlocks; 149 /** Head of the chunk list. */ 150 VBGLPHYSHEAPCHUNK *pChunkHead; 152 151 /** @} */ 153 152 -
trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp
r97930 r97937 29 29 */ 30 30 31 /** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap. 32 * 33 * Traditional heap implementation keeping all blocks in a ordered list and 34 * keeping free blocks on additional list via pointers in the user area. This 35 * is similar to @ref grp_rt_heap_simple "RTHeapSimple" and 36 * @ref grp_rt_heap_offset "RTHeapOffset" in IPRT, except that this code handles 37 * mutiple chunks and has a physical address associated with each chunk and 38 * block. 39 * 40 * When allocating memory, a free block is found that satisfies the request, 41 * extending the heap with another chunk if needed. The block is split if it's 42 * too large, and the tail end is put on the free list. 43 * 44 * When freeing memory, the block being freed is put back on the free list and 45 * we use the block list to check whether it can be merged with adjacent blocks. 46 * 47 * @note The original code managed the blocks in two separate lists for free and 48 * allocated blocks, which had the disadvantage only allowing merging with 49 * the block after the block being freed. On the plus side, it had the 50 * potential for slightly better locality when examining the free list, 51 * since the next pointer and block size members were closer to one 52 * another. 53 */ 54 31 55 32 56 /********************************************************************************************************************************* … … 39 63 #include <iprt/mem.h> 40 64 #include <iprt/semaphore.h> 41 42 /** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap.43 *44 * The physical memory heap consists of a doubly linked list of large chunks45 * (VBGLDATA::pChunkHead), memory blocks are allocated within these chunks and46 * are members of allocated (VBGLDATA::u.s.pAllocBlocksHead) and free47 * (VBGLDATA::u.s.pFreeBlocksHead) doubly linked lists.48 *49 * When allocating a block, we search the free list for a suitable free block.50 * If there is no such block, a new chunk is allocated and the new block is51 * taken from the new chunk as the only chunk-sized free block. The allocated52 * block is unlinked from the free list and goes to alloc list.53 *54 * When freeing block, we check the pointer and then unlink the block from the55 * alloc list and move it to the free list.56 *57 * For each chunk we maintain the allocated blocks counter (as well as a count58 * of free blocks). If 2 (or more) entire chunks are free they are immediately59 * deallocated, so we always have at most 1 free chunk.60 *61 * When freeing blocks, two subsequent free blocks are always merged together.62 *63 * Since the free list isn't sorted by address or anything, it is expensive to64 * determine whether the block before us is free and we should merge with it.65 * The current implementation merges blocks only when there is a free block66 * after the just freed one, never when there is one before it as that's too67 * expensive. So, to counter this free list fragmentation, we will occasionally68 * scan chunks with a majority of free blocks and merge adjacent ones.69 */70 65 71 66 … … 76 71 #define VBGL_PH_ASSERT_MSG AssertMsg 77 72 78 / / #define DUMPHEAP73 /*#define DUMPHEAP*/ 79 74 80 75 #ifdef DUMPHEAP … … 84 79 #endif 85 80 86 /* Heap block signature */87 #define VBGL_PH_BLOCKSIGNATURE UINT32_C(0xADDBBBBB)88 89 81 /* Heap chunk signature */ 90 82 #define VBGL_PH_CHUNKSIGNATURE UINT32_C(0xADDCCCCC) … … 92 84 #define VBGL_PH_CHUNKSIZE (0x10000) 93 85 86 /* Heap block signature */ 87 #define VBGL_PH_BLOCKSIGNATURE UINT32_C(0xADDBBBBB) 88 94 89 /** The allocation block alignment. 95 90 * 96 * This is also the the minimum block size.91 * This cannot be larger than VBGLPHYSHEAPBLOCK. 97 92 */ 98 93 #define VBGL_PH_ALLOC_ALIGN (sizeof(void *)) … … 135 130 */ 136 131 #define VBGL_PH_MIN_SPLIT_FREE_BLOCK 32 132 133 134 /** The smallest amount of user data that can be allocated. 135 * 136 * This is to ensure that the block can be converted into a 137 * VBGLPHYSHEAPFREEBLOCK structure when freed. This must be smaller or equal 138 * to VBGL_PH_MIN_SPLIT_FREE_BLOCK. 139 */ 140 #define VBGL_PH_SMALLEST_ALLOC_SIZE 16 141 142 /** The maximum allocation request size. */ 143 #define VBGL_PH_LARGEST_ALLOC_SIZE RT_ALIGN_32( _128M \ 144 - sizeof(VBGLPHYSHEAPBLOCK) \ 145 - sizeof(VBGLPHYSHEAPCHUNK) \ 146 - VBGL_PH_ALLOC_ALIGN, \ 147 VBGL_PH_ALLOC_ALIGN) 137 148 138 149 … … 165 176 166 177 /** 178 * A free block. 179 */ 180 struct VBGLPHYSHEAPFREEBLOCK 181 { 182 /** Core block data. */ 183 VBGLPHYSHEAPBLOCK Core; 184 /** Pointer to the next free list entry. */ 185 VBGLPHYSHEAPFREEBLOCK *pNextFree; 186 /** Pointer to the previous free list entry. */ 187 VBGLPHYSHEAPFREEBLOCK *pPrevFree; 188 }; 189 AssertCompile(VBGL_PH_SMALLEST_ALLOC_SIZE >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK)); 190 AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(VBGLPHYSHEAPFREEBLOCK) - sizeof(VBGLPHYSHEAPBLOCK)); 191 AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= VBGL_PH_SMALLEST_ALLOC_SIZE); 192 193 /** 167 194 * A chunk of memory used by the heap for sub-allocations. 168 195 * … … 182 209 uint32_t uPadding1; 183 210 184 /** Indexed by VBGLPHYSHEAPBLOCK::fAllocated, so zero is the free ones and 185 * one the allocated ones. */ 186 int32_t acBlocks[2]; 211 /** Number of block of any kind. */ 212 int32_t cBlocks; 213 /** Number of free blocks. */ 214 int32_t cFreeBlocks; 187 215 188 216 /** Pointer to the next chunk. */ … … 208 236 209 237 VBGL_PH_dprintf(("Chunks:\n")); 210 211 VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead; 212 213 while (pChunk) 214 { 215 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, allocated = %8d, phys = %08X\n", 216 pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbSize, pChunk->cAllocatedBlocks, pChunk->physAddr)); 217 218 pChunk = pChunk->pNext; 219 } 238 for (VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead; pChunk; pChunk = pChunk->pNext) 239 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, cBlocks = %8d, cFreeBlocks=%8d, phys = %08X\n", 240 pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbSize, 241 pChunk->cBlocks, pChunk->cFreeBlocks, pChunk->physAddr)); 220 242 221 243 VBGL_PH_dprintf(("Allocated blocks:\n")); 222 223 VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.u.s.pAllocBlocksHead; 224 225 while (pBlock) 226 { 227 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, %s, pChunk = %p\n", 228 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize, 229 pBlock->fAllocated ? "allocated" : "free", pBlock->pChunk)); 230 231 pBlock = pBlock->pNext; 232 } 244 for (VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pBlockHead; pBlock; pBlock = pBlock->pNext) 245 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, size = %05x, sign = %08X, %s, pChunk = %p\n", 246 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->cbDataSize, 247 pBlock->u32Signature, pBlock->fAllocated ? "allocated" : " free", pBlock->pChunk)); 233 248 234 249 VBGL_PH_dprintf(("Free blocks:\n")); 235 236 pBlock = g_vbgldata.u.s.pFreeBlocksHead; 237 238 while (pBlock) 239 { 240 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, %s, pChunk = %p\n", 241 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize, 242 pBlock->fAllocated ? "allocated" : "free", pBlock->pChunk)); 243 244 pBlock = pBlock->pNext; 245 } 250 for (VBGLPHYSHEAPFREEBLOCK *pBlock = g_vbgldata.pFreeHead; pBlock; pBlock = pBlock->pNextFree) 251 VBGL_PH_dprintf(("%p: pNextFree = %p, pPrevFree = %p, size = %05x, sign = %08X, pChunk = %p%s\n", 252 pBlock, pBlock->pNextFree, pBlock->pPrevFree, pBlock->Core.cbDataSize, 253 pBlock->Core.u32Signature, pBlock->Core.pChunk, 254 !pBlock->Core.fAllocated ? "" : " !!allocated-block-on-freelist!!")); 246 255 247 256 VBGL_PH_dprintf(("VBGL_PH dump at '%s' done\n", pszWhere)); … … 250 259 251 260 252 static void vbglPhysHeapInitFreeBlock(VBGLPHYSHEAP BLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbDataSize)261 static void vbglPhysHeapInitFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbDataSize) 253 262 { 254 263 VBGL_PH_ASSERT(pBlock != NULL); 255 264 VBGL_PH_ASSERT(pChunk != NULL); 256 265 257 pBlock->u32Signature = VBGL_PH_BLOCKSIGNATURE; 258 pBlock->cbDataSize = cbDataSize; 259 pBlock->fAllocated = false; 260 pBlock->pNext = NULL; 261 pBlock->pPrev = NULL; 262 pBlock->pChunk = pChunk; 263 } 264 265 266 /** 267 * Links @a pBlock onto the appropriate chain accoring to @a pInsertAfter or @a 268 * pBlock->fAllocated. 266 pBlock->Core.u32Signature = VBGL_PH_BLOCKSIGNATURE; 267 pBlock->Core.cbDataSize = cbDataSize; 268 pBlock->Core.fAllocated = false; 269 pBlock->Core.pNext = NULL; 270 pBlock->Core.pPrev = NULL; 271 pBlock->Core.pChunk = pChunk; 272 pBlock->pNextFree = NULL; 273 pBlock->pPrevFree = NULL; 274 } 275 276 277 /** 278 * Updates block statistics when a block is added. 279 */ 280 DECLINLINE(void) vbglPhysHeapStatsBlockAdded(VBGLPHYSHEAPBLOCK *pBlock) 281 { 282 g_vbgldata.cBlocks += 1; 283 pBlock->pChunk->cBlocks += 1; 284 AssertMsg((uint32_t)pBlock->pChunk->cBlocks <= pBlock->pChunk->cbSize / sizeof(VBGLPHYSHEAPFREEBLOCK), 285 ("pChunk=%p: cbSize=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbSize, pBlock->pChunk->cBlocks)); 286 } 287 288 289 /** 290 * Links @a pBlock onto the head of block list. 269 291 * 270 292 * This also update the per-chunk block counts. 271 293 */ 272 static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pInsertAfter, VBGLPHYSHEAPBLOCK *pBlock) 273 { 274 bool const fAllocated = pBlock->fAllocated; 275 294 static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pBlock) 295 { 276 296 VBGL_PH_ASSERT_MSG(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext)); 277 297 VBGL_PH_ASSERT_MSG(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev)); 278 298 279 if (pInsertAfter) 280 { 281 pBlock->pNext = pInsertAfter->pNext; 282 pBlock->pPrev = pInsertAfter; 283 284 if (pInsertAfter->pNext) 285 pInsertAfter->pNext->pPrev = pBlock; 286 287 pInsertAfter->pNext = pBlock; 288 } 289 else 290 { 291 /* inserting to head of list */ 292 VBGLPHYSHEAPBLOCK *pOldHead = g_vbgldata.u.apHeads[fAllocated]; 293 294 pBlock->pNext = pOldHead; 295 pBlock->pPrev = NULL; 296 297 if (pOldHead) 298 pOldHead->pPrev = pBlock; 299 g_vbgldata.u.apHeads[fAllocated] = pBlock; 300 } 301 302 /* Update the block counts: */ 303 g_vbgldata.acBlocks[fAllocated] += 1; 304 pBlock->pChunk->acBlocks[fAllocated] += 1; 305 AssertMsg( (uint32_t)pBlock->pChunk->acBlocks[fAllocated] 306 <= pBlock->pChunk->cbSize / (sizeof(*pBlock) + VBGL_PH_ALLOC_ALIGN), 307 ("pChunk=%p: cbSize=%#x acBlocks[%u]=%d\n", 308 pBlock->pChunk, pBlock->pChunk->cbSize, fAllocated, pBlock->pChunk->acBlocks[fAllocated])); 309 } 310 311 312 /** 313 * Unlinks @a pBlock from the chain it's on. 299 /* inserting to head of list */ 300 VBGLPHYSHEAPBLOCK *pOldHead = g_vbgldata.pBlockHead; 301 302 pBlock->pNext = pOldHead; 303 pBlock->pPrev = NULL; 304 305 if (pOldHead) 306 pOldHead->pPrev = pBlock; 307 g_vbgldata.pBlockHead = pBlock; 308 309 /* Update the stats: */ 310 vbglPhysHeapStatsBlockAdded(pBlock); 311 } 312 313 314 /** 315 * Links @a pBlock onto the block list after @a pInsertAfter. 314 316 * 315 317 * This also update the per-chunk block counts. 316 318 */ 319 static void vbglPhysHeapInsertBlockAfter(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPBLOCK *pInsertAfter) 320 { 321 VBGL_PH_ASSERT_MSG(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext)); 322 VBGL_PH_ASSERT_MSG(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev)); 323 324 pBlock->pNext = pInsertAfter->pNext; 325 pBlock->pPrev = pInsertAfter; 326 327 if (pInsertAfter->pNext) 328 pInsertAfter->pNext->pPrev = pBlock; 329 330 pInsertAfter->pNext = pBlock; 331 332 /* Update the stats: */ 333 vbglPhysHeapStatsBlockAdded(pBlock); 334 } 335 336 337 /** 338 * Unlinks @a pBlock from the block list. 339 * 340 * This also update the per-chunk block counts. 341 */ 317 342 static void vbglPhysHeapUnlinkBlock(VBGLPHYSHEAPBLOCK *pBlock) 318 343 { 319 bool const fAllocated = pBlock->fAllocated;320 321 344 VBGLPHYSHEAPBLOCK *pOtherBlock = pBlock->pNext; 322 345 if (pOtherBlock) … … 329 352 else 330 353 { 331 Assert(g_vbgldata. u.apHeads[fAllocated]== pBlock);332 g_vbgldata. u.apHeads[fAllocated]= pBlock->pNext;354 Assert(g_vbgldata.pBlockHead == pBlock); 355 g_vbgldata.pBlockHead = pBlock->pNext; 333 356 } 334 357 … … 336 359 pBlock->pPrev = NULL; 337 360 338 /* Update the per-chunk counts: */ 339 g_vbgldata.acBlocks[fAllocated] -= 1; 340 pBlock->pChunk->acBlocks[fAllocated] -= 1; 341 AssertMsg(pBlock->pChunk->acBlocks[fAllocated] >= 0, 342 ("pChunk=%p: cbSize=%#x acBlocks[%u]=%d\n", 343 pBlock->pChunk, pBlock->pChunk->cbSize, fAllocated, pBlock->pChunk->acBlocks[fAllocated])); 344 Assert(g_vbgldata.acBlocks[fAllocated] >= 0); 345 } 346 347 348 static VBGLPHYSHEAPBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock) 361 /* Update the stats: */ 362 g_vbgldata.cBlocks -= 1; 363 pBlock->pChunk->cBlocks -= 1; 364 AssertMsg(pBlock->pChunk->cBlocks >= 0, 365 ("pChunk=%p: cbSize=%#x cBlocks=%d\n", pBlock->pChunk, pBlock->pChunk->cbSize, pBlock->pChunk->cBlocks)); 366 Assert(g_vbgldata.cBlocks >= 0); 367 } 368 369 370 371 /** 372 * Updates statistics after adding a free block. 373 */ 374 DECLINLINE(void) vbglPhysHeapStatsFreeBlockAdded(VBGLPHYSHEAPFREEBLOCK *pBlock) 375 { 376 g_vbgldata.cFreeBlocks += 1; 377 pBlock->Core.pChunk->cFreeBlocks += 1; 378 } 379 380 381 /** 382 * Links @a pBlock onto head of the free chain. 383 * 384 * This is used during block freeing and when adding a new chunk. 385 * 386 * This also update the per-chunk block counts. 387 */ 388 static void vbglPhysHeapInsertFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock) 389 { 390 Assert(!pBlock->Core.fAllocated); 391 VBGL_PH_ASSERT_MSG(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree)); 392 VBGL_PH_ASSERT_MSG(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree)); 393 394 /* inserting to head of list */ 395 VBGLPHYSHEAPFREEBLOCK *pOldHead = g_vbgldata.pFreeHead; 396 397 pBlock->pNextFree = pOldHead; 398 pBlock->pPrevFree = NULL; 399 400 if (pOldHead) 401 pOldHead->pPrevFree = pBlock; 402 g_vbgldata.pFreeHead = pBlock; 403 404 /* Update the stats: */ 405 vbglPhysHeapStatsFreeBlockAdded(pBlock); 406 } 407 408 409 /** 410 * Links @a pBlock after @a pInsertAfter. 411 * 412 * This is used when splitting a free block during allocation to preserve the 413 * place in the free list. 414 * 415 * This also update the per-chunk block counts. 416 */ 417 static void vbglPhysHeapInsertFreeBlockAfter(VBGLPHYSHEAPFREEBLOCK *pBlock, VBGLPHYSHEAPFREEBLOCK *pInsertAfter) 418 { 419 Assert(!pBlock->Core.fAllocated); 420 VBGL_PH_ASSERT_MSG(pBlock->pNextFree == NULL, ("pBlock->pNextFree = %p\n", pBlock->pNextFree)); 421 VBGL_PH_ASSERT_MSG(pBlock->pPrevFree == NULL, ("pBlock->pPrevFree = %p\n", pBlock->pPrevFree)); 422 423 /* inserting after the tiven node */ 424 pBlock->pNextFree = pInsertAfter->pNextFree; 425 pBlock->pPrevFree = pInsertAfter; 426 427 if (pInsertAfter->pNextFree) 428 pInsertAfter->pNextFree->pPrevFree = pBlock; 429 430 pInsertAfter->pNextFree = pBlock; 431 432 /* Update the stats: */ 433 vbglPhysHeapStatsFreeBlockAdded(pBlock); 434 } 435 436 437 /** 438 * Unlinks @a pBlock from the free list. 439 * 440 * This also update the per-chunk block counts. 441 */ 442 static void vbglPhysHeapUnlinkFreeBlock(VBGLPHYSHEAPFREEBLOCK *pBlock) 443 { 444 Assert(!pBlock->Core.fAllocated); 445 446 VBGLPHYSHEAPFREEBLOCK *pOtherBlock = pBlock->pNextFree; 447 if (pOtherBlock) 448 pOtherBlock->pPrevFree = pBlock->pPrevFree; 449 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */ 450 451 pOtherBlock = pBlock->pPrevFree; 452 if (pOtherBlock) 453 pOtherBlock->pNextFree = pBlock->pNextFree; 454 else 455 { 456 Assert(g_vbgldata.pFreeHead == pBlock); 457 g_vbgldata.pFreeHead = pBlock->pNextFree; 458 } 459 460 pBlock->pNextFree = NULL; 461 pBlock->pPrevFree = NULL; 462 463 /* Update the stats: */ 464 g_vbgldata.cFreeBlocks -= 1; 465 pBlock->Core.pChunk->cFreeBlocks -= 1; 466 AssertMsg(pBlock->Core.pChunk->cFreeBlocks >= 0, 467 ("pChunk=%p: cbSize=%#x cFreeBlocks=%d\n", 468 pBlock->Core.pChunk, pBlock->Core.pChunk->cbSize, pBlock->Core.pChunk->cFreeBlocks)); 469 Assert(g_vbgldata.cFreeBlocks >= 0); 470 } 471 472 473 /** 474 * Allocate another chunk and add it to the heap. 475 * 476 * @returns Pointer to the free block in the new chunk on success, NULL on 477 * allocation failure. 478 * @param cbMinBlock The size of the user block we need this chunk for. 479 */ 480 static VBGLPHYSHEAPFREEBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock) 349 481 { 350 482 RTCCPHYS PhysAddr = NIL_RTHCPHYS; … … 352 484 uint32_t cbChunk; 353 485 VBGL_PH_dprintf(("Allocating new chunk for %#x byte allocation\n", cbMinBlock)); 354 AssertReturn(cbMinBlock < _128M, NULL); /* paranoia */ 355 356 /* Compute the size of the new chunk, rounding up to next chunk size, 357 which must be power of 2. */ 486 AssertReturn(cbMinBlock <= VBGL_PH_LARGEST_ALLOC_SIZE, NULL); /* paranoia */ 487 488 /* 489 * Compute the size of the new chunk, rounding up to next chunk size, 490 * which must be power of 2. 491 * 492 * Note! Using VBGLPHYSHEAPFREEBLOCK here means the minimum block size is 493 * 8 or 16 bytes too high, but safer this way since cbMinBlock is 494 * zero during the init code call. 495 */ 358 496 Assert(RT_IS_POWER_OF_TWO(VBGL_PH_CHUNKSIZE)); 359 cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAP BLOCK);497 cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK); 360 498 cbChunk = RT_ALIGN_32(cbChunk, VBGL_PH_CHUNKSIZE); 361 499 362 /* This function allocates physical contiguous memory below 4 GB. This 4GB 363 limitation stems from using a 32-bit OUT instruction to pass a block 364 physical address to the host. */ 500 /* 501 * This function allocates physical contiguous memory below 4 GB. This 4GB 502 * limitation stems from using a 32-bit OUT instruction to pass a block 503 * physical address to the host. 504 */ 365 505 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk); 366 /** @todo retry with smaller size if it fails, treating VBGL_PH_CHUNKSIZE as 367 * a guideline rather than absolute minimum size. */ 506 if (!pChunk) 507 { 508 /* If the allocation fail, halv the size till and try again. */ 509 uint32_t cbMinChunk = RT_MAX(cbMinBlock, PAGE_SIZE / 2) + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPFREEBLOCK); 510 cbMinChunk = RT_ALIGN_32(cbMinChunk, PAGE_SIZE); 511 if (cbChunk > cbMinChunk) 512 do 513 { 514 cbChunk >>= 2; 515 cbChunk = RT_ALIGN_32(cbChunk, PAGE_SIZE); 516 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk); 517 } while (!pChunk && cbChunk > cbMinChunk); 518 } 368 519 if (pChunk) 369 520 { 370 VBGLPHYSHEAPCHUNK *pOldHeadChunk;371 VBGLPHYSHEAP BLOCK *pBlock;521 VBGLPHYSHEAPCHUNK *pOldHeadChunk; 522 VBGLPHYSHEAPFREEBLOCK *pBlock; 372 523 AssertRelease(PhysAddr < _4G && PhysAddr + cbChunk <= _4G); 373 524 374 /* Init the new chunk. */ 525 /* 526 * Init the new chunk. 527 */ 375 528 pChunk->u32Signature = VBGL_PH_CHUNKSIGNATURE; 376 529 pChunk->cbSize = cbChunk; 377 530 pChunk->physAddr = (uint32_t)PhysAddr; 378 pChunk-> acBlocks[0]= 0;379 pChunk-> acBlocks[1]= 0;531 pChunk->cBlocks = 0; 532 pChunk->cFreeBlocks = 0; 380 533 pChunk->pNext = NULL; 381 534 pChunk->pPrev = NULL; … … 387 540 #endif 388 541 389 /* Initialize the free block, which now occupies entire chunk. */ 390 pBlock = (VBGLPHYSHEAPBLOCK *)(pChunk + 1); 542 /* 543 * Initialize the free block, which now occupies entire chunk. 544 */ 545 pBlock = (VBGLPHYSHEAPFREEBLOCK *)(pChunk + 1); 391 546 vbglPhysHeapInitFreeBlock(pBlock, pChunk, cbChunk - sizeof(VBGLPHYSHEAPCHUNK) - sizeof(VBGLPHYSHEAPBLOCK)); 392 vbglPhysHeapInsertBlock(NULL, pBlock); 393 394 /* Add the chunk to the list. */ 547 vbglPhysHeapInsertBlock(&pBlock->Core); 548 vbglPhysHeapInsertFreeBlock(pBlock); 549 550 /* 551 * Add the chunk to the list. 552 */ 395 553 pOldHeadChunk = g_vbgldata.pChunkHead; 396 554 pChunk->pNext = pOldHeadChunk; … … 399 557 g_vbgldata.pChunkHead = pChunk; 400 558 401 VBGL_PH_dprintf(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock-> cbDataSize));559 VBGL_PH_dprintf(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->Core.cbDataSize)); 402 560 return pBlock; 403 561 } … … 407 565 408 566 567 /** 568 * Deletes a chunk: Unlinking all its blocks and freeing its memory. 569 */ 409 570 static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk) 410 571 { … … 415 576 VBGL_PH_dprintf(("Deleting chunk %p size %x\n", pChunk, pChunk->cbSize)); 416 577 417 /* first scan the chunk and unlink all blocks from the lists */ 418 578 /* 579 * First scan the chunk and unlink all blocks from the lists. 580 * 581 * Note! We could do this by finding the first and last block list entries 582 * and just drop the whole chain relating to this chunk, rather than 583 * doing it one by one. But doing it one by one is simpler and will 584 * continue to work if the block list ends in an unsorted state. 585 */ 419 586 uEnd = (uintptr_t)pChunk + pChunk->cbSize; 420 587 uCur = (uintptr_t)(pChunk + 1); … … 423 590 { 424 591 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur; 592 Assert(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE); 593 Assert(pBlock->pChunk == pChunk); 425 594 426 595 uCur += pBlock->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK); 427 596 Assert(uCur == (uintptr_t)pBlock->pNext || uCur >= uEnd); 597 598 if (!pBlock->fAllocated) 599 vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pBlock); 428 600 vbglPhysHeapUnlinkBlock(pBlock); 429 601 } … … 431 603 VBGL_PH_ASSERT_MSG(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbSize = %08X\n", uCur, uEnd, pChunk->cbSize)); 432 604 433 /* Unlink the chunk from the chunk list. */ 605 /* 606 * Unlink the chunk from the chunk list. 607 */ 434 608 if (pChunk->pNext) 435 609 pChunk->pNext->pPrev = pChunk->pPrev; … … 444 618 } 445 619 620 /* 621 * Finally, free the chunk memory. 622 */ 446 623 RTMemContFree(pChunk, pChunk->cbSize); 447 624 } 448 625 449 626 450 /**451 * Worker for VbglR0PhysHeapFree that merges adjacent free nodes in @a pChunk.452 *453 * The caller has check that there must be 1 or more such.454 */455 static void vbglR0PhysHeapMergeAdjacentFreeBlocksInChunk(VBGLPHYSHEAPCHUNK *pChunk)456 {457 uintptr_t const uEnd = (uintptr_t)pChunk + pChunk->cbSize;458 VBGLPHYSHEAPBLOCK *pPrevBlock = NULL;459 VBGLPHYSHEAPBLOCK *pCurBlock = (VBGLPHYSHEAPBLOCK *)(pChunk + 1);460 while ((uintptr_t)pCurBlock < uEnd)461 {462 VBGLPHYSHEAPBLOCK * const pNextBlock = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbDataSize);463 464 AssertReturnVoid(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);465 AssertReturnVoid(pCurBlock->pChunk == pChunk);466 if ( !pCurBlock->fAllocated467 && pPrevBlock468 && !pPrevBlock->fAllocated)469 {470 /* Adjust size of the previous memory block. */471 pPrevBlock->cbDataSize += sizeof(*pCurBlock) + pCurBlock->cbDataSize;472 473 /* Unlink the current block and invalid it. */474 vbglPhysHeapUnlinkBlock(pCurBlock);475 476 pCurBlock->u32Signature = ~VBGL_PH_BLOCKSIGNATURE;477 pCurBlock->cbDataSize = UINT32_MAX / 4;478 479 /* The previous node remains the same as we advance to the next one. */480 }481 else482 pPrevBlock = pCurBlock;483 pCurBlock = pNextBlock;484 }485 }486 487 488 627 DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cbSize) 489 628 { 490 VBGLPHYSHEAP BLOCK*pBlock;491 VBGLPHYSHEAP BLOCK*pIter;492 int32_t cLeft;629 VBGLPHYSHEAPFREEBLOCK *pBlock; 630 VBGLPHYSHEAPFREEBLOCK *pIter; 631 int32_t cLeft; 493 632 #ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS 494 uint32_t cbAlwaysSplit;633 uint32_t cbAlwaysSplit; 495 634 #endif 496 int rc;635 int rc; 497 636 498 637 /* 499 * Align the size to a pointer size to avoid getting misaligned header pointers and whatnot. 638 * Make sure we don't allocate anything too small to turn into a free node 639 * and align the size to prevent pointer misalignment and whatnot. 500 640 */ 641 cbSize = RT_MAX(cbSize, VBGL_PH_SMALLEST_ALLOC_SIZE); 501 642 cbSize = RT_ALIGN_32(cbSize, VBGL_PH_ALLOC_ALIGN); 502 Assert Stmt(cbSize > 0, cbSize = VBGL_PH_ALLOC_ALIGN); /* avoid allocating zero bytes */643 AssertCompile(VBGL_PH_ALLOC_ALIGN <= sizeof(pBlock->Core)); 503 644 504 645 rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap); … … 520 661 /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page, 521 662 so HGCM processing in VMMDev can use page locks instead of several reads and writes. */ 522 VBGLPHYSHEAP BLOCK *pFallback = NULL;523 for (pIter = g_vbgldata. u.s.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext, cLeft--)663 VBGLPHYSHEAPFREEBLOCK *pFallback = NULL; 664 for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--) 524 665 { 525 AssertBreak(pIter-> u32Signature == VBGL_PH_BLOCKSIGNATURE);526 if (pIter-> cbDataSize >= cbSize)666 AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE); 667 if (pIter->Core.cbDataSize >= cbSize) 527 668 { 528 if (pIter-> cbDataSize == cbSize)669 if (pIter->Core.cbDataSize == cbSize) 529 670 { 530 671 if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cbSize) … … 537 678 else 538 679 { 539 if (!pFallback || pIter-> cbDataSize < pFallback->cbDataSize)680 if (!pFallback || pIter->Core.cbDataSize < pFallback->Core.cbDataSize) 540 681 pFallback = pIter; 541 682 if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cbSize) 542 683 { 543 if (!pBlock || pIter-> cbDataSize < pBlock->cbDataSize)684 if (!pBlock || pIter->Core.cbDataSize < pBlock->Core.cbDataSize) 544 685 pBlock = pIter; 545 686 #ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS 546 else if (pIter-> cbDataSize >= cbAlwaysSplit)687 else if (pIter->Core.cbDataSize >= cbAlwaysSplit) 547 688 { 548 689 pBlock = pIter; … … 566 707 { 567 708 /* Large than 3/4 page: Find closest free list match. */ 568 for (pIter = g_vbgldata. u.s.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext, cLeft--)709 for (pIter = g_vbgldata.pFreeHead; pIter != NULL; pIter = pIter->pNextFree, cLeft--) 569 710 { 570 AssertBreak(pIter-> u32Signature == VBGL_PH_BLOCKSIGNATURE);571 if (pIter-> cbDataSize >= cbSize)711 AssertBreak(pIter->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE); 712 if (pIter->Core.cbDataSize >= cbSize) 572 713 { 573 if (pIter-> cbDataSize == cbSize)714 if (pIter->Core.cbDataSize == cbSize) 574 715 { 575 716 /* Exact match - we're done! */ … … 579 720 580 721 #ifdef VBGL_PH_STOP_SEARCH_AT_EXCESS 581 if (pIter-> cbDataSize >= cbAlwaysSplit)722 if (pIter->Core.cbDataSize >= cbAlwaysSplit) 582 723 { 583 724 /* Really big block - no point continue searching! */ … … 587 728 #endif 588 729 /* Looking for a free block with nearest size. */ 589 if (!pBlock || pIter-> cbDataSize < pBlock->cbDataSize)730 if (!pBlock || pIter->Core.cbDataSize < pBlock->Core.cbDataSize) 590 731 pBlock = pIter; 591 732 … … 608 749 { 609 750 /* We have a free block, either found or allocated. */ 610 VBGL_PH_ASSERT_MSG(pBlock-> u32Signature == VBGL_PH_BLOCKSIGNATURE,611 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock-> u32Signature));612 VBGL_PH_ASSERT_MSG(!pBlock-> fAllocated, ("pBlock = %p\n", pBlock));751 VBGL_PH_ASSERT_MSG(pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE, 752 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->Core.u32Signature)); 753 VBGL_PH_ASSERT_MSG(!pBlock->Core.fAllocated, ("pBlock = %p\n", pBlock)); 613 754 614 755 /* … … 617 758 * We do this before unlinking the block so we can preserve the location 618 759 * in the free list. 760 * 761 * Note! We cannot split off and return the tail end here, because that may 762 * violate the same page requirements for requests smaller than 3/4 page. 619 763 */ 620 if (pBlock->cbDataSize >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cbSize) 764 AssertCompile(VBGL_PH_MIN_SPLIT_FREE_BLOCK >= sizeof(*pBlock) - sizeof(pBlock->Core)); 765 if (pBlock->Core.cbDataSize >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cbSize) 621 766 { 622 pIter = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + cbSize); 623 vbglPhysHeapInitFreeBlock(pIter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof(VBGLPHYSHEAPBLOCK)); 624 625 pBlock->cbDataSize = cbSize; 626 627 /* Insert the new 'pIter' block after the 'pBlock' in the free list. */ 628 vbglPhysHeapInsertBlock(pBlock, pIter); 767 pIter = (VBGLPHYSHEAPFREEBLOCK *)((uintptr_t)(&pBlock->Core + 1) + cbSize); 768 vbglPhysHeapInitFreeBlock(pIter, pBlock->Core.pChunk, pBlock->Core.cbDataSize - cbSize - sizeof(VBGLPHYSHEAPBLOCK)); 769 770 pBlock->Core.cbDataSize = cbSize; 771 772 /* Insert the new 'pIter' block after the 'pBlock' in the block list 773 and in the free list. */ 774 vbglPhysHeapInsertBlockAfter(&pIter->Core, &pBlock->Core); 775 vbglPhysHeapInsertFreeBlockAfter(pIter, pBlock); 629 776 } 630 777 631 778 /* 632 * Unlink the block from the free list, mark it as allocated and insert 633 * it in the allocated list. 779 * Unlink the block from the free list and mark it as allocated. 634 780 */ 635 vbglPhysHeapUnlinkBlock(pBlock); 636 pBlock->fAllocated = true; 637 vbglPhysHeapInsertBlock(NULL, pBlock); 781 vbglPhysHeapUnlinkFreeBlock(pBlock); 782 pBlock->Core.fAllocated = true; 638 783 639 784 dumpheap("post alloc"); … … 644 789 rc = RTSemFastMutexRelease(g_vbgldata.mutexHeap); 645 790 646 VBGL_PH_dprintf(("VbglR0PhysHeapAlloc: returns %p size %x\n", pBlock + 1, pBlock-> cbDataSize));647 return pBlock+ 1;791 VBGL_PH_dprintf(("VbglR0PhysHeapAlloc: returns %p size %x\n", pBlock + 1, pBlock->Core.cbDataSize)); 792 return &pBlock->Core + 1; 648 793 } 649 794 … … 673 818 * Calculate and return its physical address. 674 819 */ 675 return pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pBlock->pChunk); 820 VBGLPHYSHEAPCHUNK *pChunk = pBlock->pChunk; 821 return pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pChunk); 676 822 } 677 823 … … 689 835 if (pv != NULL) 690 836 { 691 VBGLPHYSHEAP BLOCK *pBlock;837 VBGLPHYSHEAPFREEBLOCK *pBlock; 692 838 693 839 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap); … … 699 845 * Validate the block header. 700 846 */ 701 pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1; 702 if ( pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE 703 && pBlock->fAllocated) 847 pBlock = (VBGLPHYSHEAPFREEBLOCK *)((VBGLPHYSHEAPBLOCK *)pv - 1); 848 if ( pBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE 849 && pBlock->Core.fAllocated 850 && pBlock->Core.cbDataSize >= VBGL_PH_SMALLEST_ALLOC_SIZE) 704 851 { 705 852 VBGLPHYSHEAPCHUNK *pChunk; … … 707 854 708 855 /* 709 * Move the block from the allocated list to the free list.856 * Change the block status to freeed. 710 857 */ 711 VBGL_PH_dprintf(("VbglR0PhysHeapFree: %p size %#x\n", pv, pBlock->cbDataSize)); 712 vbglPhysHeapUnlinkBlock(pBlock); 713 714 dumpheap("post unlink"); 715 716 pBlock->fAllocated = false; 717 vbglPhysHeapInsertBlock(NULL, pBlock); 858 VBGL_PH_dprintf(("VbglR0PhysHeapFree: %p size %#x\n", pv, pBlock->Core.cbDataSize)); 859 860 pBlock->Core.fAllocated = false; 861 pBlock->pNextFree = pBlock->pPrevFree = NULL; 862 vbglPhysHeapInsertFreeBlock(pBlock); 718 863 719 864 dumpheap("post insert"); 720 865 721 866 /* 722 * Check if the block after this one is also free and we can merge it into 723 * this one. 867 * Check if the block after this one is also free and we can merge it into this one. 724 868 */ 725 pChunk = pBlock->pChunk; 726 pNeighbour = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + pBlock->cbDataSize); 727 if ( (uintptr_t)pNeighbour <= (uintptr_t)pChunk + pChunk->cbSize - sizeof(*pNeighbour) 728 && !pNeighbour->fAllocated) 869 pChunk = pBlock->Core.pChunk; 870 871 pNeighbour = pBlock->Core.pNext; 872 if ( pNeighbour 873 && !pNeighbour->fAllocated 874 && pNeighbour->pChunk == pChunk) 729 875 { 876 Assert((uintptr_t)pBlock + sizeof(pBlock->Core) + pBlock->Core.cbDataSize == (uintptr_t)pNeighbour); 877 730 878 /* Adjust size of current memory block */ 731 pBlock-> cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);879 pBlock->Core.cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK); 732 880 733 881 /* Unlink the following node and invalid it. */ 882 vbglPhysHeapUnlinkFreeBlock((VBGLPHYSHEAPFREEBLOCK *)pNeighbour); 734 883 vbglPhysHeapUnlinkBlock(pNeighbour); 735 884 … … 737 886 pNeighbour->cbDataSize = UINT32_MAX / 4; 738 887 739 dumpheap("post merge"); 888 dumpheap("post merge after"); 889 } 890 891 /* 892 * Same check for the block before us. This invalidates pBlock. 893 */ 894 pNeighbour = pBlock->Core.pPrev; 895 if ( pNeighbour 896 && !pNeighbour->fAllocated 897 && pNeighbour->pChunk == pChunk) 898 { 899 Assert((uintptr_t)pNeighbour + sizeof(*pNeighbour) + pNeighbour->cbDataSize == (uintptr_t)pBlock); 900 901 /* Adjust size of the block before us */ 902 pNeighbour->cbDataSize += pBlock->Core.cbDataSize + sizeof(VBGLPHYSHEAPBLOCK); 903 904 /* Unlink this node and invalid it. */ 905 vbglPhysHeapUnlinkFreeBlock(pBlock); 906 vbglPhysHeapUnlinkBlock(&pBlock->Core); 907 908 pBlock->Core.u32Signature = ~VBGL_PH_BLOCKSIGNATURE; 909 pBlock->Core.cbDataSize = UINT32_MAX / 8; 910 911 pBlock = NULL; /* invalid */ 912 913 dumpheap("post merge before"); 740 914 } 741 915 … … 744 918 * more completely free ones. 745 919 */ 746 if ( pChunk-> acBlocks[1 /*allocated*/] == 0920 if ( pChunk->cFreeBlocks == pChunk->cBlocks 747 921 && (pChunk->pPrev || pChunk->pNext)) 748 922 { … … 752 926 { 753 927 AssertBreak(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE); 754 if (pCurChunk-> acBlocks[1 /*allocated*/] == 0)928 if (pCurChunk->cFreeBlocks == pCurChunk->cBlocks) 755 929 { 756 930 cUnusedChunks++; … … 762 936 */ 763 937 vbglPhysHeapChunkDelete(pChunk); 764 p Neighbour = pBlock = NULL;/* invalid */938 pBlock = NULL; /* invalid */ 765 939 pChunk = NULL; 940 pNeighbour = NULL; 766 941 break; 767 942 } … … 770 945 } 771 946 772 /*773 * Hack to counter the lack of merging with the node before us, scan774 * the chunk and merge adjacent free nodes.775 */776 if (pChunk)777 {778 int32_t const cAllocBlocks = pChunk->acBlocks[1 /*alloc*/];779 int32_t const cApproxExcess = pChunk->acBlocks[0 /*free*/] - cAllocBlocks - 1;780 if ( cApproxExcess > 0 /* there are too many free nodes */781 && cAllocBlocks >= 0 /* sanity check */)782 {783 /* Just do the merging if there aren't that many blocks: */784 if (cAllocBlocks <= 4)785 vbglR0PhysHeapMergeAdjacentFreeBlocksInChunk(pChunk);786 /* Do the merging if we're above a 1:2 ratio: */787 else if (cApproxExcess >= cAllocBlocks)788 vbglR0PhysHeapMergeAdjacentFreeBlocksInChunk(pChunk);789 }790 }791 792 947 dumpheap("post free"); 793 948 } 794 949 else 795 950 AssertMsgFailed(("pBlock: %p: u32Signature=%#x cb=%#x fAllocated=%d - double free?\n", 796 pBlock, pBlock-> u32Signature, pBlock->cbDataSize, pBlock->fAllocated));951 pBlock, pBlock->Core.u32Signature, pBlock->Core.cbDataSize, pBlock->Core.fAllocated)); 797 952 798 953 rc = RTSemFastMutexRelease(g_vbgldata.mutexHeap); … … 819 974 820 975 size_t cbTotal = 0; 821 for (VBGLPHYSHEAPBLOCK *pCurBlock = g_vbgldata.u.s.pFreeBlocksHead; pCurBlock; pCurBlock = pCurBlock->pNext) 822 { 823 Assert(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE); 824 cbTotal += pCurBlock->cbDataSize; 976 for (VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead; pCurBlock; pCurBlock = pCurBlock->pNextFree) 977 { 978 Assert(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE); 979 Assert(!pCurBlock->Core.fAllocated); 980 cbTotal += pCurBlock->Core.cbDataSize; 825 981 } 826 982 … … 839 995 { 840 996 /* 841 * Scan the blocks in each chunk .997 * Scan the blocks in each chunk, walking the block list in parallel. 842 998 */ 843 unsigned acTotalBlocks[2] = { 0, 0 }; 999 const VBGLPHYSHEAPBLOCK *pPrevBlockListEntry = NULL; 1000 const VBGLPHYSHEAPBLOCK *pCurBlockListEntry = g_vbgldata.pBlockHead; 1001 unsigned acTotalBlocks[2] = { 0, 0 }; 844 1002 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead, *pPrevChunk = NULL; pCurChunk; pCurChunk = pCurChunk->pNext) 845 1003 { … … 850 1008 "pCurChunk=%p: pPrev=%p, expected %p", pCurChunk, pCurChunk->pPrev, pPrevChunk)); 851 1009 1010 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1); 852 1011 uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbSize; 853 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);854 1012 unsigned acBlocks[2] = { 0, 0 }; 855 1013 while ((uintptr_t)pCurBlock < uEnd) … … 861 1019 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2, 862 1020 "pCurBlock=%p: pChunk=%p, expected %p", pCurBlock, pCurBlock->pChunk, pCurChunk)); 863 AssertReturn( pCurBlock->cbDataSize >= VBGL_PH_ ALLOC_ALIGN864 && pCurBlock->cbDataSize < _128M1021 AssertReturn( pCurBlock->cbDataSize >= VBGL_PH_SMALLEST_ALLOC_SIZE 1022 && pCurBlock->cbDataSize <= VBGL_PH_LARGEST_ALLOC_SIZE 865 1023 && RT_ALIGN_32(pCurBlock->cbDataSize, VBGL_PH_ALLOC_ALIGN) == pCurBlock->cbDataSize, 866 1024 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3, 867 1025 "pCurBlock=%p: cbDataSize=%#x", pCurBlock, pCurBlock->cbDataSize)); 1026 AssertReturn(pCurBlock == pCurBlockListEntry, 1027 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4, 1028 "pCurChunk=%p: pCurBlock=%p, pCurBlockListEntry=%p\n", 1029 pCurChunk, pCurBlock, pCurBlockListEntry)); 1030 AssertReturn(pCurBlock->pPrev == pPrevBlockListEntry, 1031 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5, 1032 "pCurChunk=%p: pCurBlock->pPrev=%p, pPrevBlockListEntry=%p\n", 1033 pCurChunk, pCurBlock->pPrev, pPrevBlockListEntry)); 1034 868 1035 acBlocks[pCurBlock->fAllocated] += 1; 869 1036 870 1037 /* advance */ 1038 pPrevBlockListEntry = pCurBlock; 1039 pCurBlockListEntry = pCurBlock->pNext; 871 1040 pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbDataSize); 872 1041 } … … 876 1045 877 1046 acTotalBlocks[1] += acBlocks[1]; 878 AssertReturn(acBlocks[ 1] == (uint32_t)pCurChunk->acBlocks[1],1047 AssertReturn(acBlocks[0] + acBlocks[1] == (uint32_t)pCurChunk->cBlocks, 879 1048 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4, 880 "pCurChunk=%p: c AllocatedBlocks=%u, expected %u",881 pCurChunk, pCurChunk-> acBlocks[1],acBlocks[1]));1049 "pCurChunk=%p: cBlocks=%u, expected %u", 1050 pCurChunk, pCurChunk->cBlocks, acBlocks[0] + acBlocks[1])); 882 1051 883 1052 acTotalBlocks[0] += acBlocks[0]; 884 AssertReturn(acBlocks[0] == (uint32_t)pCurChunk-> acBlocks[0],1053 AssertReturn(acBlocks[0] == (uint32_t)pCurChunk->cFreeBlocks, 885 1054 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5, 886 1055 "pCurChunk=%p: cFreeBlocks=%u, expected %u", 887 pCurChunk, pCurChunk-> acBlocks[0], acBlocks[0]));1056 pCurChunk, pCurChunk->cFreeBlocks, acBlocks[0])); 888 1057 889 1058 pPrevChunk = pCurChunk; 890 1059 } 891 1060 892 AssertReturn(acTotalBlocks[0] == (uint32_t)g_vbgldata. acBlocks[0],1061 AssertReturn(acTotalBlocks[0] == (uint32_t)g_vbgldata.cFreeBlocks, 893 1062 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR, 894 "g_vbgldata. acBlocks[0]=%u, expected %u", g_vbgldata.acBlocks[0], acTotalBlocks[0]));895 AssertReturn(acTotalBlocks[ 1] == (uint32_t)g_vbgldata.acBlocks[1],1063 "g_vbgldata.cFreeBlocks=%u, expected %u", g_vbgldata.cFreeBlocks, acTotalBlocks[0])); 1064 AssertReturn(acTotalBlocks[0] + acTotalBlocks[1] == (uint32_t)g_vbgldata.cBlocks, 896 1065 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR, 897 "g_vbgldata. acBlocks[1]=%u, expected %u", g_vbgldata.acBlocks[1],acTotalBlocks[1]));1066 "g_vbgldata.cBlocks=%u, expected %u", g_vbgldata.cBlocks, acTotalBlocks[0] + acTotalBlocks[1])); 898 1067 899 1068 /* 900 * Count each list and check that they contain the same number of nodes. 1069 * Check that the free list contains the same number of blocks as we 1070 * encountered during the above scan. 901 1071 */ 902 for (unsigned iType = 0; iType < RT_ELEMENTS(g_vbgldata.u.apHeads); iType++)903 {904 unsigned cBlocks = 0;905 VBGLPHYSHEAPBLOCK *pPrevBlock = NULL;906 for (VBGLPHYSHEAPBLOCK *pCurBlock = g_vbgldata.u.apHeads[iType]; pCurBlock; pCurBlock = pCurBlock->pNext)1072 { 1073 unsigned cFreeListBlocks = 0; 1074 for (const VBGLPHYSHEAPFREEBLOCK *pCurBlock = g_vbgldata.pFreeHead, *pPrevBlock = NULL; 1075 pCurBlock; 1076 pCurBlock = pCurBlock->pNextFree) 907 1077 { 908 AssertReturn(pCurBlock-> u32Signature == VBGL_PH_BLOCKSIGNATURE,1078 AssertReturn(pCurBlock->Core.u32Signature == VBGL_PH_BLOCKSIGNATURE, 909 1079 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, 910 "pCurBlock=%p/ %u: magic=%#x", pCurBlock, iType, pCurBlock->u32Signature));911 AssertReturn(pCurBlock->pPrev == pPrevBlock,1080 "pCurBlock=%p/free: magic=%#x", pCurBlock, pCurBlock->Core.u32Signature)); 1081 AssertReturn(pCurBlock->pPrevFree == pPrevBlock, 912 1082 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2, 913 "pCurBlock=%p/ %u: pPrev=%p, expected %p", pCurBlock, iType, pCurBlock->pPrev, pPrevBlock));914 AssertReturn(pCurBlock-> pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,915 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurBlock=%p/ %u: chunk (%p) magic=%#x",916 pCurBlock, iType, pCurBlock->pChunk, pCurBlock->pChunk->u32Signature));917 c Blocks++;1083 "pCurBlock=%p/free: pPrev=%p, expected %p", pCurBlock, pCurBlock->pPrevFree, pPrevBlock)); 1084 AssertReturn(pCurBlock->Core.pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, 1085 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurBlock=%p/free: chunk (%p) magic=%#x", 1086 pCurBlock, pCurBlock->Core.pChunk, pCurBlock->Core.pChunk->u32Signature)); 1087 cFreeListBlocks++; 918 1088 pPrevBlock = pCurBlock; 919 1089 } 920 1090 921 AssertReturn(c Blocks == acTotalBlocks[iType],1091 AssertReturn(cFreeListBlocks == acTotalBlocks[0], 922 1092 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3, 923 "iType=%u: Found %u in list, expected %u", iType, cBlocks, acTotalBlocks[iType])); 924 } 925 1093 "Found %u in free list, expected %u", cFreeListBlocks, acTotalBlocks[0])); 1094 } 926 1095 return VINF_SUCCESS; 927 1096 } … … 952 1121 953 1122 /* Allocate the first chunk of the heap. */ 954 VBGLPHYSHEAP BLOCK *pBlock = vbglPhysHeapChunkAlloc(0);1123 VBGLPHYSHEAPFREEBLOCK *pBlock = vbglPhysHeapChunkAlloc(0); 955 1124 if (pBlock) 956 1125 return RTSemFastMutexCreate(&g_vbgldata.mutexHeap); -
trunk/src/VBox/Additions/common/VBoxGuest/lib/testcase/tstVbglR0PhysHeap-1.cpp
r97926 r97937 181 181 if (RT_FAILURE(rc)) 182 182 return RTTestSummaryAndDestroy(hTest); 183 RTTESTI_CHECK_RC_OK(VbglR0PhysHeapCheck(NULL)); 183 184 184 185 #define CHECK_PHYS_ADDR(a_pv) do { \ … … 239 240 240 241 CHECK_PHYS_ADDR(s_aOps[i].pvAlloc); 242 243 /* Check heap integrity: */ 244 RTTESTI_CHECK_RC_OK(VbglR0PhysHeapCheck(NULL)); 241 245 } 242 246 … … 251 255 VbglR0PhysHeapFree(s_aOps[i].pvAlloc); 252 256 size_t cbAfterSubFree = VbglR0PhysHeapGetFreeSize(); 257 RTTESTI_CHECK_RC_OK(VbglR0PhysHeapCheck(NULL)); 253 258 254 259 void *pv; … … 258 263 return RTTestSummaryAndDestroy(hTest); 259 264 CHECK_PHYS_ADDR(pv); 265 RTTESTI_CHECK_RC_OK(VbglR0PhysHeapCheck(NULL)); 260 266 261 267 //RTPrintf("debug: i=%d pv=%p cbReal=%#zx cbBeforeSub=%#zx cbAfterSubFree=%#zx cbAfterSubAlloc=%#zx \n", i, pv, RTHeapOffsetSize(Heap, pv), … … 376 382 /* free all */ 377 383 RTTestIPrintf(RTTESTLVL_ALWAYS, "Free-all-pre: cFreeBlocks=%u cAllocedBlocks=%u in %u chunk(s)\n", 378 g_vbgldata. acBlocks[0], g_vbgldata.acBlocks[1], g_cChunks);384 g_vbgldata.cFreeBlocks, g_vbgldata.cBlocks - g_vbgldata.cFreeBlocks, g_cChunks); 379 385 for (i = 0; i < RT_ELEMENTS(s_aHistory); i++) 380 386 { … … 382 388 s_aHistory[i].pv = NULL; 383 389 } 384 RTTestIPrintf(RTTESTLVL_ALWAYS, "Free-all-post: cFreeBlocks=%u in %u chunk(s)\n", g_vbgldata. acBlocks[0], g_cChunks);390 RTTestIPrintf(RTTESTLVL_ALWAYS, "Free-all-post: cFreeBlocks=%u in %u chunk(s)\n", g_vbgldata.cFreeBlocks, g_cChunks); 385 391 RTTESTI_CHECK_MSG(g_cChunks == 1, ("g_cChunks=%d\n", g_cChunks)); 386 RTTESTI_CHECK_MSG(g_vbgldata.acBlocks[1] == 0, ("g_vbgldata.acBlocks[1]=%d\n", g_vbgldata.acBlocks[0])); 392 RTTESTI_CHECK_MSG(g_vbgldata.cFreeBlocks == g_vbgldata.cBlocks, 393 ("g_vbgldata.cFreeBlocks=%d cBlocks=%d\n", g_vbgldata.cFreeBlocks, g_vbgldata.cBlocks)); 387 394 388 395 //size_t cbAfterRand = VbglR0PhysHeapGetFreeSize();
Note:
See TracChangeset
for help on using the changeset viewer.