Changeset 97913 in vbox for trunk/src/VBox/Additions/common
- Timestamp:
- Dec 30, 2022 2:22:15 AM (2 years ago)
- Location:
- trunk/src/VBox/Additions/common/VBoxGuest/lib
- Files:
-
- 1 added
- 2 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibInternal.h
r96407 r97913 105 105 #endif 106 106 107 struct _VBGLPHYSHEAPBLOCK;108 typedef struct _VBGLPHYSHEAPBLOCK VBGLPHYSHEAPBLOCK;107 struct VBGLPHYSHEAPBLOCK; 108 typedef struct VBGLPHYSHEAPBLOCK VBGLPHYSHEAPBLOCK; 109 109 struct _VBGLPHYSHEAPCHUNK; 110 typedef struct _VBGLPHYSHEAPCHUNK VBGLPHYSHEAPCHUNK;110 typedef struct VBGLPHYSHEAPCHUNK VBGLPHYSHEAPCHUNK; 111 111 112 112 enum VbglLibStatus … … 129 129 VMMDevMemory *pVMMDevMemory; 130 130 131 /** 132 * Physical memory heap data. 131 /** Physical memory heap data. 133 132 * @{ 134 133 */ 135 136 134 VBGLPHYSHEAPBLOCK *pFreeBlocksHead; 137 135 VBGLPHYSHEAPBLOCK *pAllocBlocksHead; 138 136 VBGLPHYSHEAPCHUNK *pChunkHead; 139 137 140 RTSEMFASTMUTEX mutexHeap;138 RTSEMFASTMUTEX mutexHeap; 141 139 /** @} */ 142 140 -
trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp
r96407 r97913 39 39 #include <iprt/alloc.h> 40 40 41 /* Physical memory heap consists of double linked list42 * of chunks. Memory blocks are allocated inside these chunks43 * and are members of Allocated and Free double linked lists.44 * 45 * When allocating a block, we search in Free linked46 * list for a suitable free block. If there is no such block,47 * a new chunk is allocated and the new block is taken from48 * the new chunk as the only chunk-sized free block.49 * Allocated block is excluded from the Free list and goes to50 * Alloc list.51 * 52 * When freeing block, we check the pointer and then53 * exclude block from Alloc list and move it to free list.54 * 55 * For each chunk we maintain the allocated blocks counter.56 * if 2 (or more) entire chunks are free they are immediately57 * deallocated, so we always have at most 1 free chunk.58 * 59 * When freeing blocks, two subsequent free blocks are always60 * merged together. Current implementation merges blocks only61 * when there is a block after the just freed one.62 * 41 /** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap. 42 * 43 * The physical memory heap consists of a doubly linked list of large chunks 44 * (VBGLDATA::pChunkHead), memory blocks are allocated within these chunks and 45 * are members of allocated (VBGLDATA::pAllocBlocksHead) and free 46 * (VBGLDATA::pFreeBlocksHead) doubly linked lists. 47 * 48 * When allocating a block, we search in Free linked list for a suitable free 49 * block. If there is no such block, a new chunk is allocated and the new block 50 * is taken from the new chunk as the only chunk-sized free block. Allocated 51 * block is excluded from the Free list and goes to Alloc list. 52 * 53 * When freeing block, we check the pointer and then exclude block from Alloc 54 * list and move it to free list. 55 * 56 * For each chunk we maintain the allocated blocks counter. If 2 (or more) 57 * entire chunks are free they are immediately deallocated, so we always have at 58 * most 1 free chunk. 59 * 60 * When freeing blocks, two subsequent free blocks are always merged together. 61 * Current implementation merges blocks only when there is a block after the 62 * just freed one. 63 63 */ 64 64 65 66 /********************************************************************************************************************************* 67 * Defined Constants And Macros * 68 *********************************************************************************************************************************/ 65 69 #define VBGL_PH_ASSERT Assert 66 #define VBGL_PH_ASSERT MsgAssertMsg70 #define VBGL_PH_ASSERT_MSG AssertMsg 67 71 68 72 // #define DUMPHEAP … … 86 90 #define VBGL_PH_BF_ALLOCATED (0x1) 87 91 88 struct _VBGLPHYSHEAPBLOCK 89 { 92 /** Threshold at which to split out a tail free block when allocating. 93 * The value is the amount of user space, i.e. excluding the header. */ 94 #define VBGL_PH_MIN_SPLIT_FREE_BLOCK 32 95 96 97 /********************************************************************************************************************************* 98 * Structures and Typedefs * 99 *********************************************************************************************************************************/ 100 struct VBGLPHYSHEAPBLOCK 101 { 102 /** Magic value (VBGL_PH_BLOCKSIGNATURE). */ 90 103 uint32_t u32Signature; 91 104 92 /* Size of user data in the block. Does not include theblock header. */105 /** Size of user data in the block. Does not include this block header. */ 93 106 uint32_t cbDataSize; 94 107 95 108 uint32_t fu32Flags; 96 109 97 struct _VBGLPHYSHEAPBLOCK*pNext;98 struct _VBGLPHYSHEAPBLOCK*pPrev;99 100 struct _VBGLPHYSHEAPCHUNK*pChunk;110 VBGLPHYSHEAPBLOCK *pNext; 111 VBGLPHYSHEAPBLOCK *pPrev; 112 113 VBGLPHYSHEAPCHUNK *pChunk; 101 114 }; 102 115 103 struct _VBGLPHYSHEAPCHUNK 104 { 116 struct VBGLPHYSHEAPCHUNK 117 { 118 /** Magic value (VBGL_PH_CHUNKSIGNATURE). */ 105 119 uint32_t u32Signature; 106 120 107 /* Size of the chunk. Includes the chunk header. */121 /** Size of the chunk. Includes the chunk header. */ 108 122 uint32_t cbSize; 109 123 110 /* Physical address of the chunk*/124 /** Physical address of the chunk (contiguous). */ 111 125 uint32_t physAddr; 112 126 113 /* Number of allocated blocks in the chunk */127 /** Number of allocated blocks in the chunk */ 114 128 int32_t cAllocatedBlocks; 115 129 116 struct _VBGLPHYSHEAPCHUNK*pNext;117 struct _VBGLPHYSHEAPCHUNK*pPrev;130 VBGLPHYSHEAPCHUNK *pNext; 131 VBGLPHYSHEAPCHUNK *pPrev; 118 132 }; 119 133 120 134 121 135 #ifndef DUMPHEAP 122 # define dumpheap(a)136 # define dumpheap(pszWhere) do { } while (0) 123 137 #else 124 void dumpheap (char *point)125 { 126 VBGL_PH_dprintf(("VBGL_PH dump at '%s'\n", p oint));138 void dumpheap(const char *pszWhere) 139 { 140 VBGL_PH_dprintf(("VBGL_PH dump at '%s'\n", pszWhere)); 127 141 128 142 VBGL_PH_dprintf(("Chunks:\n")); … … 162 176 } 163 177 164 VBGL_PH_dprintf(("VBGL_PH dump at '%s' done\n", p oint));178 VBGL_PH_dprintf(("VBGL_PH dump at '%s' done\n", pszWhere)); 165 179 } 166 180 #endif 167 181 168 182 169 DECLINLINE(void *) vbglPhysHeapBlock2Data (VBGLPHYSHEAPBLOCK *pBlock) 170 { 171 return (void *)(pBlock? (char *)pBlock + sizeof (VBGLPHYSHEAPBLOCK): NULL); 172 } 173 174 DECLINLINE(VBGLPHYSHEAPBLOCK *) vbglPhysHeapData2Block (void *p) 175 { 176 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)(p? (char *)p - sizeof (VBGLPHYSHEAPBLOCK): NULL); 177 178 VBGL_PH_ASSERTMsg(pBlock == NULL || pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE, 179 ("pBlock->u32Signature = %08X\n", pBlock->u32Signature)); 180 181 return pBlock; 182 } 183 184 DECLINLINE(int) vbglPhysHeapEnter (void) 183 DECLINLINE(void *) vbglPhysHeapBlock2Data(VBGLPHYSHEAPBLOCK *pBlock) 184 { 185 if (pBlock) 186 return pBlock + 1; 187 return NULL; 188 } 189 190 191 DECLINLINE(VBGLPHYSHEAPBLOCK *) vbglPhysHeapData2Block(void *pv) 192 { 193 if (pv) 194 { 195 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1; 196 AssertMsgReturn(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE, 197 ("pBlock->u32Signature = %08X\n", pBlock->u32Signature), 198 NULL); 199 return pBlock; 200 } 201 return NULL; 202 } 203 204 205 DECLINLINE(int) vbglPhysHeapEnter(void) 185 206 { 186 207 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap); 187 208 188 VBGL_PH_ASSERTMsg(RT_SUCCESS(rc), 189 ("Failed to request heap mutex, rc = %Rrc\n", rc)); 209 VBGL_PH_ASSERT_MSG(RT_SUCCESS(rc), ("Failed to request heap mutex, rc = %Rrc\n", rc)); 190 210 191 211 return rc; 192 212 } 193 213 194 DECLINLINE(void) vbglPhysHeapLeave (void) 214 215 DECLINLINE(void) vbglPhysHeapLeave(void) 195 216 { 196 217 RTSemFastMutexRelease(g_vbgldata.mutexHeap); … … 198 219 199 220 200 static void vbglPhysHeapInitBlock 221 static void vbglPhysHeapInitBlock(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbDataSize) 201 222 { 202 223 VBGL_PH_ASSERT(pBlock != NULL); … … 212 233 213 234 214 static void vbglPhysHeapInsertBlock (VBGLPHYSHEAPBLOCK *pInsertAfter, VBGLPHYSHEAPBLOCK *pBlock) 215 { 216 VBGL_PH_ASSERTMsg(pBlock->pNext == NULL, 217 ("pBlock->pNext = %p\n", pBlock->pNext)); 218 VBGL_PH_ASSERTMsg(pBlock->pPrev == NULL, 219 ("pBlock->pPrev = %p\n", pBlock->pPrev)); 235 static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pInsertAfter, VBGLPHYSHEAPBLOCK *pBlock) 236 { 237 VBGL_PH_ASSERT_MSG(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext)); 238 VBGL_PH_ASSERT_MSG(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev)); 220 239 221 240 if (pInsertAfter) … … 225 244 226 245 if (pInsertAfter->pNext) 227 {228 246 pInsertAfter->pNext->pPrev = pBlock; 229 }230 247 231 248 pInsertAfter->pNext = pBlock; … … 241 258 242 259 if (g_vbgldata.pAllocBlocksHead) 243 {244 260 g_vbgldata.pAllocBlocksHead->pPrev = pBlock; 245 }246 261 247 262 g_vbgldata.pAllocBlocksHead = pBlock; … … 252 267 253 268 if (g_vbgldata.pFreeBlocksHead) 254 {255 269 g_vbgldata.pFreeBlocksHead->pPrev = pBlock; 256 }257 270 258 271 g_vbgldata.pFreeBlocksHead = pBlock; … … 261 274 } 262 275 263 static void vbglPhysHeapExcludeBlock (VBGLPHYSHEAPBLOCK *pBlock) 276 277 /** 278 * Unlinks @a pBlock from the chain its on. 279 */ 280 static void vbglPhysHeapExcludeBlock(VBGLPHYSHEAPBLOCK *pBlock) 264 281 { 265 282 if (pBlock->pNext) 266 {267 283 pBlock->pNext->pPrev = pBlock->pPrev; 284 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */ 285 286 if (pBlock->pPrev) 287 pBlock->pPrev->pNext = pBlock->pNext; 288 else if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) 289 { 290 Assert(g_vbgldata.pAllocBlocksHead == pBlock); 291 g_vbgldata.pAllocBlocksHead = pBlock->pNext; 268 292 } 269 293 else 270 294 { 271 /* this is tail of list but we do not maintain tails of block lists. 272 * so do nothing. 273 */ 274 ; 275 } 276 277 if (pBlock->pPrev) 278 { 279 pBlock->pPrev->pNext = pBlock->pNext; 280 } 281 else 282 { 283 /* this is head of list but we do not maintain tails of block lists. */ 284 if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) 285 { 286 g_vbgldata.pAllocBlocksHead = pBlock->pNext; 287 } 288 else 289 { 290 g_vbgldata.pFreeBlocksHead = pBlock->pNext; 291 } 295 Assert(g_vbgldata.pFreeBlocksHead == pBlock); 296 g_vbgldata.pFreeBlocksHead = pBlock->pNext; 292 297 } 293 298 … … 314 319 physical address to the host. */ 315 320 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk); 321 /** @todo retry with smaller size if it fails, treating VBGL_PH_CHUNKSIZE as 322 * a guideline rather than absolute minimum size. */ 316 323 if (pChunk) 317 324 { … … 348 355 349 356 350 static void vbglPhysHeapChunkDelete 351 { 352 char *p;357 static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk) 358 { 359 uintptr_t uEnd, uCur; 353 360 VBGL_PH_ASSERT(pChunk != NULL); 354 VBGL_PH_ASSERTMsg(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, 355 ("pChunk->u32Signature = %08X\n", pChunk->u32Signature)); 361 VBGL_PH_ASSERT_MSG(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, ("pChunk->u32Signature = %08X\n", pChunk->u32Signature)); 356 362 357 363 VBGL_PH_dprintf(("Deleting chunk %p size %x\n", pChunk, pChunk->cbSize)); 358 364 359 /* first scan the chunk and exclude all blocks from lists */ 360 361 p = (char *)pChunk + sizeof (VBGLPHYSHEAPCHUNK); 362 363 while (p < (char *)pChunk + pChunk->cbSize) 364 { 365 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)p; 366 367 p += pBlock->cbDataSize + sizeof (VBGLPHYSHEAPBLOCK); 368 369 vbglPhysHeapExcludeBlock (pBlock); 370 } 371 372 VBGL_PH_ASSERTMsg(p == (char *)pChunk + pChunk->cbSize, 373 ("p = %p, (char *)pChunk + pChunk->cbSize = %p, pChunk->cbSize = %08X\n", 374 p, (char *)pChunk + pChunk->cbSize, pChunk->cbSize)); 365 /* first scan the chunk and exclude (unlink) all blocks from the lists */ 366 367 uEnd = (uintptr_t)pChunk + pChunk->cbSize; 368 uCur = (uintptr_t)(pChunk + 1); 369 370 while (uCur < uEnd) 371 { 372 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur; 373 374 uCur += pBlock->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK); 375 376 vbglPhysHeapExcludeBlock(pBlock); 377 } 378 379 VBGL_PH_ASSERT_MSG(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbSize = %08X\n", uCur, uEnd, pChunk->cbSize)); 375 380 376 381 /* Exclude chunk from the chunk list */ 377 382 if (pChunk->pNext) 378 {379 383 pChunk->pNext->pPrev = pChunk->pPrev; 380 } 384 /* else: we do not maintain tail pointer. */ 385 386 if (pChunk->pPrev) 387 pChunk->pPrev->pNext = pChunk->pNext; 381 388 else 382 389 { 383 /* we do not maintain tail */ 384 ; 385 } 386 387 if (pChunk->pPrev) 388 { 389 pChunk->pPrev->pNext = pChunk->pNext; 390 } 391 else 392 { 393 /* the chunk was head */ 390 Assert(g_vbgldata.pChunkHead == pChunk); 394 391 g_vbgldata.pChunkHead = pChunk->pNext; 395 392 } 396 393 397 RTMemContFree 398 } 399 400 401 DECLR0VBGL(void *) VbglR0PhysHeapAlloc 394 RTMemContFree(pChunk, pChunk->cbSize); 395 } 396 397 398 DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cbSize) 402 399 { 403 400 VBGLPHYSHEAPBLOCK *pBlock, *pIter; … … 409 406 cbSize = RT_ALIGN_32(cbSize, sizeof(void *)); 410 407 411 rc = vbglPhysHeapEnter 408 rc = vbglPhysHeapEnter(); 412 409 if (RT_FAILURE(rc)) 413 410 return NULL; 414 411 415 dumpheap 412 dumpheap("pre alloc"); 416 413 417 414 /* … … 474 471 if (!pBlock) 475 472 { 476 /* No free blocks, allocate a new chunk, 477 * the only free block of the chunk will 478 * be returned. 479 */ 480 pBlock = vbglPhysHeapChunkAlloc (cbSize); 473 /* No free blocks, allocate a new chunk, the only free block of the 474 chunk will be returned. */ 475 pBlock = vbglPhysHeapChunkAlloc(cbSize); 481 476 } 482 477 483 478 if (pBlock) 484 479 { 485 VBGL_PH_ASSERT Msg(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,486 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->u32Signature));487 VBGL_PH_ASSERT Msg((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) == 0,488 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));480 VBGL_PH_ASSERT_MSG(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE, 481 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->u32Signature)); 482 VBGL_PH_ASSERT_MSG((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) == 0, 483 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags)); 489 484 490 485 /* We have a free block, either found or allocated. */ 491 486 492 if (pBlock->cbDataSize > 2*(cbSize + sizeof (VBGLPHYSHEAPBLOCK))) 487 #if 1 /** @todo r=bird: This might work okay for medium sizes, but it can't be good for small 488 * allocations (unnecessary extra work) nor for large ones (internal fragmentation). */ 489 if (pBlock->cbDataSize > 2 * (cbSize + sizeof(VBGLPHYSHEAPBLOCK))) 490 #else 491 if (pBlock->cbDataSize >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cbSize) 492 #endif 493 493 { 494 494 /* Data will occupy less than a half of the block, 495 495 * split off the tail end into a new free list entry. 496 496 */ 497 pIter = (VBGLPHYSHEAPBLOCK *)(( char *)pBlock + sizeof (VBGLPHYSHEAPBLOCK) + cbSize);497 pIter = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + cbSize); 498 498 499 499 /* Init the new 'pIter' block, initialized blocks are always marked as free. */ 500 vbglPhysHeapInitBlock (pIter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof(VBGLPHYSHEAPBLOCK));500 vbglPhysHeapInitBlock(pIter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof(VBGLPHYSHEAPBLOCK)); 501 501 502 502 pBlock->cbDataSize = cbSize; 503 503 504 504 /* Insert the new 'pIter' block after the 'pBlock' in the free list */ 505 vbglPhysHeapInsertBlock 505 vbglPhysHeapInsertBlock(pBlock, pIter); 506 506 } 507 507 508 508 /* Exclude pBlock from free list */ 509 vbglPhysHeapExcludeBlock 509 vbglPhysHeapExcludeBlock(pBlock); 510 510 511 511 /* Mark as allocated */ … … 513 513 514 514 /* Insert to allocated list */ 515 vbglPhysHeapInsertBlock 515 vbglPhysHeapInsertBlock(NULL, pBlock); 516 516 517 517 /* Adjust the chunk allocated blocks counter */ … … 519 519 } 520 520 521 dumpheap 522 523 vbglPhysHeapLeave 524 VBGL_PH_dprintf(("VbglR0PhysHeapAlloc %x size %x\n", vbglPhysHeapBlock2Data 525 526 return vbglPhysHeapBlock2Data 527 } 528 529 DECLR0VBGL(uint32_t) VbglR0PhysHeapGetPhysAddr (void *p)521 dumpheap("post alloc"); 522 523 vbglPhysHeapLeave(); 524 VBGL_PH_dprintf(("VbglR0PhysHeapAlloc %x size %x\n", vbglPhysHeapBlock2Data(pBlock), pBlock->cbDataSize)); 525 526 return vbglPhysHeapBlock2Data(pBlock); 527 } 528 529 DECLR0VBGL(uint32_t) VbglR0PhysHeapGetPhysAddr(void *pv) 530 530 { 531 531 uint32_t physAddr = 0; 532 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapData2Block (p);532 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapData2Block(pv); 533 533 534 534 if (pBlock) 535 535 { 536 VBGL_PH_ASSERT Msg((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) != 0,537 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));536 VBGL_PH_ASSERT_MSG((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) != 0, 537 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags)); 538 538 539 539 if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) 540 physAddr = pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)p - (uintptr_t)pBlock->pChunk);540 physAddr = pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pBlock->pChunk); 541 541 } 542 542 … … 544 544 } 545 545 546 DECLR0VBGL(void) VbglR0PhysHeapFree(void *p )546 DECLR0VBGL(void) VbglR0PhysHeapFree(void *pv) 547 547 { 548 548 VBGLPHYSHEAPBLOCK *pBlock; 549 549 VBGLPHYSHEAPBLOCK *pNeighbour; 550 551 int rc = vbglPhysHeapEnter (); 550 VBGLPHYSHEAPCHUNK *pChunk; 551 552 int rc = vbglPhysHeapEnter(); 552 553 if (RT_FAILURE(rc)) 553 554 return; … … 555 556 dumpheap ("pre free"); 556 557 557 pBlock = vbglPhysHeapData2Block (p);558 pBlock = vbglPhysHeapData2Block(pv); 558 559 559 560 if (!pBlock) 560 561 { 561 vbglPhysHeapLeave 562 vbglPhysHeapLeave(); 562 563 return; 563 564 } 564 565 565 VBGL_PH_ASSERT Msg((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) != 0,566 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));566 VBGL_PH_ASSERT_MSG((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) != 0, 567 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags)); 567 568 568 569 /* Exclude from allocated list */ 569 vbglPhysHeapExcludeBlock 570 571 dumpheap 572 573 VBGL_PH_dprintf(("VbglR0PhysHeapFree % x size %x\n", p, pBlock->cbDataSize));570 vbglPhysHeapExcludeBlock(pBlock); 571 572 dumpheap("post exclude"); 573 574 VBGL_PH_dprintf(("VbglR0PhysHeapFree %p size %x\n", pv, pBlock->cbDataSize)); 574 575 575 576 /* Mark as free */ … … 577 578 578 579 /* Insert to free list */ 579 vbglPhysHeapInsertBlock 580 581 dumpheap 580 vbglPhysHeapInsertBlock(NULL, pBlock); 581 582 dumpheap("post insert"); 582 583 583 584 /* Adjust the chunk allocated blocks counter */ 584 pBlock->pChunk->cAllocatedBlocks--; 585 586 VBGL_PH_ASSERT(pBlock->pChunk->cAllocatedBlocks >= 0); 585 pChunk = pBlock->pChunk; 586 pChunk->cAllocatedBlocks--; 587 588 VBGL_PH_ASSERT(pChunk->cAllocatedBlocks >= 0); 587 589 588 590 /* Check if we can merge 2 free blocks. To simplify heap maintenance, … … 592 594 * and in that case the merging will work. 593 595 */ 594 595 pNeighbour = (VBGLPHYSHEAPBLOCK *)((char *)p + pBlock->cbDataSize); 596 597 if ((char *)pNeighbour < (char *)pBlock->pChunk + pBlock->pChunk->cbSize 596 /** @todo r=bird: This simplistic approach is of course not working. 597 * However, since the heap lists aren't sorted in any way, we cannot 598 * cheaply determine where the block before us starts. */ 599 600 pNeighbour = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + pBlock->cbDataSize); 601 602 if ( (uintptr_t)pNeighbour < (uintptr_t)pChunk + pChunk->cbSize 598 603 && (pNeighbour->fu32Flags & VBGL_PH_BF_ALLOCATED) == 0) 599 604 { … … 601 606 602 607 /* Adjust size of current memory block */ 603 pBlock->cbDataSize += pNeighbour->cbDataSize + sizeof 608 pBlock->cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK); 604 609 605 610 /* Exclude the next neighbour */ 606 vbglPhysHeapExcludeBlock 607 } 608 609 dumpheap 610 611 /* now check if there are 2 or more free chunks */612 if (p Block->pChunk->cAllocatedBlocks == 0)613 { 614 VBGLPHYSHEAPCHUNK *pC hunk = g_vbgldata.pChunkHead;615 616 uint32_t u32FreeChunks = 0;617 618 while (pChunk)611 vbglPhysHeapExcludeBlock(pNeighbour); 612 } 613 614 dumpheap("post merge"); 615 616 /* now check if there are 2 or more free (unused) chunks */ 617 if (pChunk->cAllocatedBlocks == 0) 618 { 619 VBGLPHYSHEAPCHUNK *pCurChunk; 620 621 uint32_t cUnusedChunks = 0; 622 623 for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext) 619 624 { 620 if (pChunk->cAllocatedBlocks == 0) 621 { 622 u32FreeChunks++; 623 } 624 625 pChunk = pChunk->pNext; 625 Assert(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE); 626 if (pCurChunk->cAllocatedBlocks == 0) 627 cUnusedChunks++; 626 628 } 627 629 628 if ( u32FreeChunks > 1)630 if (cUnusedChunks > 1) 629 631 { 630 632 /* Delete current chunk, it will also exclude all free blocks … … 632 634 * will also be invalid after this. 633 635 */ 634 vbglPhysHeapChunkDelete (pBlock->pChunk);636 vbglPhysHeapChunkDelete(pChunk); 635 637 } 636 638 } 637 639 638 dumpheap ("post free"); 639 640 vbglPhysHeapLeave (); 641 } 642 643 DECLR0VBGL(int) VbglR0PhysHeapInit (void) 644 { 645 int rc = VINF_SUCCESS; 640 dumpheap("post free"); 641 642 vbglPhysHeapLeave(); 643 } 644 645 #ifdef IN_TESTCASE /* For the testcase only */ 646 # include <iprt/err.h> 647 648 /** 649 * Returns the sum of all free heap blocks. 650 * 651 * This is the amount of memory you can theoretically allocate if you do 652 * allocations exactly matching the free blocks. 653 * 654 * @returns The size of the free blocks. 655 * @returns 0 if heap was safely detected as being bad. 656 */ 657 DECLVBGL(size_t) VbglR0PhysHeapGetFreeSize(void) 658 { 659 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap); 660 AssertRCReturn(rc, 0); 661 662 size_t cbTotal = 0; 663 for (VBGLPHYSHEAPBLOCK *pCurBlock = g_vbgldata.pFreeBlocksHead; pCurBlock; pCurBlock = pCurBlock->pNext) 664 { 665 Assert(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE); 666 cbTotal += pCurBlock->cbDataSize; 667 } 668 669 RTSemFastMutexRelease(g_vbgldata.mutexHeap); 670 return cbTotal; 671 } 672 673 static int vbglR0PhysHeapCheckLocked(PRTERRINFO pErrInfo) 674 { 675 /* 676 * Scan the blocks in each chunk. 677 */ 678 unsigned cTotalFreeBlocks = 0; 679 unsigned cTotalUsedBlocks = 0; 680 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext) 681 { 682 AssertReturn(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, 683 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurChunk=%p: magic=%#x\n", pCurChunk, pCurChunk->u32Signature)); 684 685 uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbSize; 686 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1); 687 unsigned cUsedBlocks = 0; 688 while ((uintptr_t)pCurBlock < uEnd) 689 { 690 AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE, 691 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, 692 "pCurBlock=%p: magic=%#x\n", pCurBlock, pCurBlock->u32Signature)); 693 AssertReturn(pCurBlock->pChunk == pCurChunk, 694 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2, 695 "pCurBlock=%p: pChunk=%p, expected %p\n", pCurBlock, pCurBlock->pChunk, pCurChunk)); 696 AssertReturn( pCurBlock->cbDataSize >= 8 697 && pCurBlock->cbDataSize < _128M 698 && RT_ALIGN_32(pCurBlock->cbDataSize, sizeof(void *)) == pCurBlock->cbDataSize, 699 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3, 700 "pCurBlock=%p: cbDataSize=%#x\n", pCurBlock, pCurBlock->cbDataSize)); 701 AssertReturn( pCurBlock->fu32Flags <= VBGL_PH_BF_ALLOCATED, 702 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3, 703 "pCurBlock=%p: fu32Flags=%#x\n", pCurBlock, pCurBlock->fu32Flags)); 704 if (pCurBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) 705 cUsedBlocks += 1; 706 else 707 cTotalFreeBlocks += 1; 708 709 /* advance */ 710 pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbDataSize); 711 } 712 AssertReturn((uintptr_t)pCurBlock == uEnd, 713 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4, 714 "pCurBlock=%p uEnd=%p\n", pCurBlock, uEnd)); 715 AssertReturn(cUsedBlocks == (uint32_t)pCurChunk->cAllocatedBlocks, 716 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4, 717 "pCurChunk=%p: cAllocatedBlocks=%u, expected %u\n", 718 pCurChunk, pCurChunk->cAllocatedBlocks, cUsedBlocks)); 719 cTotalUsedBlocks += cUsedBlocks; 720 } 721 return VINF_SUCCESS; 722 } 723 724 /** 725 * Performs a heap check. 726 * 727 * @returns Problem description on failure, NULL on success. 728 */ 729 DECLVBGL(int) VbglR0PhysHeapCheck(PRTERRINFO pErrInfo) 730 { 731 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap); 732 AssertRCReturn(rc, 0); 733 734 rc = vbglR0PhysHeapCheckLocked(pErrInfo); 735 736 RTSemFastMutexRelease(g_vbgldata.mutexHeap); 737 return rc; 738 } 739 740 741 #endif /* IN_TESTCASE */ 742 743 744 DECLR0VBGL(int) VbglR0PhysHeapInit(void) 745 { 746 g_vbgldata.mutexHeap = NIL_RTSEMFASTMUTEX; 646 747 647 748 /* Allocate the first chunk of the heap. */ 648 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapChunkAlloc (0); 649 650 if (!pBlock) 651 rc = VERR_NO_MEMORY; 652 653 RTSemFastMutexCreate(&g_vbgldata.mutexHeap); 654 655 return rc; 656 } 657 658 DECLR0VBGL(void) VbglR0PhysHeapTerminate (void) 749 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapChunkAlloc(0); 750 if (pBlock) 751 return RTSemFastMutexCreate(&g_vbgldata.mutexHeap); 752 return VERR_NO_MEMORY; 753 } 754 755 DECLR0VBGL(void) VbglR0PhysHeapTerminate(void) 659 756 { 660 757 while (g_vbgldata.pChunkHead) 661 { 662 vbglPhysHeapChunkDelete (g_vbgldata.pChunkHead); 663 } 758 vbglPhysHeapChunkDelete(g_vbgldata.pChunkHead); 664 759 665 760 RTSemFastMutexDestroy(g_vbgldata.mutexHeap); -
trunk/src/VBox/Additions/common/VBoxGuest/lib/testcase/tstVbglR0PhysHeap-1.cpp
r97904 r97913 39 39 * Header Files * 40 40 *********************************************************************************************************************************/ 41 #include <iprt/heap.h>42 43 41 #include <iprt/assert.h> 44 42 #include <iprt/errcore.h> 45 43 #include <iprt/initterm.h> 46 44 #include <iprt/log.h> 45 #include <iprt/mem.h> 47 46 #include <iprt/rand.h> 48 47 #include <iprt/stream.h> … … 52 51 #include <iprt/time.h> 53 52 53 #define IN_TESTCASE 54 #define IN_RING0 /* pretend we're in ring-0 so we get access to the functions */ 55 #include "../VBoxGuestR0LibInternal.h" 56 57 58 /********************************************************************************************************************************* 59 * Structures and Typedefs * 60 *********************************************************************************************************************************/ 61 typedef struct 62 { 63 uint32_t cb; 64 void *pv; 65 } TSTHISTORYENTRY; 66 67 68 /********************************************************************************************************************************* 69 * Global Variables * 70 *********************************************************************************************************************************/ 71 VBGLDATA g_vbgldata; 72 73 int g_cChunks = 0; 74 size_t g_cbChunks = 0; 75 76 /** Drop-in replacement for RTMemContAlloc */ 77 static void *tstMemContAlloc(PRTCCPHYS pPhys, size_t cb) 78 { 79 RTTESTI_CHECK(cb > 0); 80 81 #define TST_MAX_CHUNKS 24 82 if (g_cChunks < TST_MAX_CHUNKS) 83 { 84 void *pvRet = RTMemAlloc(cb); 85 if (pvRet) 86 { 87 g_cChunks++; 88 g_cbChunks += cb; 89 *pPhys = (uint32_t)(uintptr_t)pvRet ^ UINT32_C(0xf0f0f000); 90 return pvRet; 91 } 92 } 93 94 *pPhys = NIL_RTCCPHYS; 95 return NULL; 96 } 97 98 99 /** Drop-in replacement for RTMemContFree */ 100 static void tstMemContFree(void *pv, size_t cb) 101 { 102 RTTESTI_CHECK(RT_VALID_PTR(pv)); 103 RTTESTI_CHECK(cb > 0); 104 RTTESTI_CHECK(g_cChunks > 0); 105 RTMemFree(pv); 106 g_cChunks--; 107 g_cbChunks -= cb; 108 } 109 110 111 #define RTMemContAlloc tstMemContAlloc 112 #define RTMemContFree tstMemContFree 113 #include "../VBoxGuestR0LibPhysHeap.cpp" 114 115 116 static void PrintStats(TSTHISTORYENTRY const *paHistory, size_t cHistory, const char *pszDesc) 117 { 118 size_t cbAllocated = 0; 119 unsigned cLargeBlocks = 0; 120 unsigned cAllocated = 0; 121 for (size_t i = 0; i < cHistory; i++) 122 if (paHistory[i].pv) 123 { 124 cAllocated += 1; 125 cbAllocated += paHistory[i].cb; 126 cLargeBlocks += paHistory[i].cb > _1K; 127 } 128 129 size_t const cbOverhead = g_cChunks * sizeof(VBGLPHYSHEAPCHUNK) + cAllocated * sizeof(VBGLPHYSHEAPBLOCK); 130 size_t const cbFragmentation = g_cbChunks - cbOverhead - cbAllocated; 131 RTTestIPrintf(RTTESTLVL_ALWAYS, 132 "%s: %'9zu bytes in %2d chunks; %'9zu bytes in %4u blocks (%2u large)\n" 133 " => int-frag %'9zu (%2zu.%1zu%%) overhead %'9zu (%1zu.%02zu%%)\n", 134 pszDesc, 135 g_cbChunks, g_cChunks, 136 cbAllocated, cAllocated, cLargeBlocks, 137 cbFragmentation, cbFragmentation * 100 / g_cbChunks, (cbFragmentation * 1000 / g_cbChunks) % 10, 138 cbOverhead, cbOverhead * 100 / g_cbChunks, (cbOverhead * 10000 / g_cbChunks) % 100); 139 } 140 54 141 55 142 int main(int argc, char **argv) … … 61 148 */ 62 149 RTTEST hTest; 63 int rc = RTTestInitAndCreate("tst RTHeapOffset", &hTest);150 int rc = RTTestInitAndCreate("tstVbglR0PhysHeap-1", &hTest); 64 151 if (rc) 65 152 return rc; … … 70 157 */ 71 158 RTTestSub(hTest, "Basics"); 72 static uint8_t s_abMem[128*1024]; 73 RTHEAPOFFSET Heap; 74 RTTESTI_CHECK_RC(rc = RTHeapOffsetInit(&Heap, &s_abMem[1], sizeof(s_abMem) - 1), VINF_SUCCESS); 159 RTTESTI_CHECK_RC(rc = VbglR0PhysHeapInit(), VINF_SUCCESS); 75 160 if (RT_FAILURE(rc)) 76 161 return RTTestSummaryAndDestroy(hTest); … … 79 164 * Try allocate. 80 165 */ 81 static struct Tst HeapOffsetOps82 { 83 size_tcb;84 unsigned uAlignment;166 static struct TstPhysHeapOps 167 { 168 uint32_t cb; 169 unsigned iFreeOrder; 85 170 void *pvAlloc; 86 unsigned iFreeOrder;87 171 } s_aOps[] = 88 172 { 89 { 16, 0, NULL, 0}, // 090 { 16, 4, NULL, 1},91 { 16, 8, NULL, 2},92 { 16, 16, NULL, 5},93 { 16, 32, NULL, 4},94 { 32, 0, NULL, 3}, // 595 { 31, 0, NULL, 6},96 { 1024, 0, NULL, 8},97 { 1024, 32, NULL, 10},98 { 1024, 32, NULL, 12},99 { PAGE_SIZE, PAGE_SIZE, NULL, 13}, // 10100 { 1024, 32, NULL, 9},101 { PAGE_SIZE, 32, NULL, 11},102 { PAGE_SIZE, PAGE_SIZE, NULL, 14},103 { 16, 0, NULL, 15},104 { 9, 0, NULL, 7}, // 15105 { 16, 0, NULL, 7},106 { 36, 0, NULL, 7},107 { 16, 0, NULL, 7},108 { 12344, 0, NULL, 7},109 { 50, 0, NULL, 7}, // 20110 { 16, 0, NULL, 7},173 { 16, 0, NULL }, // 0 174 { 16, 1, NULL }, 175 { 16, 2, NULL }, 176 { 16, 5, NULL }, 177 { 16, 4, NULL }, 178 { 32, 3, NULL }, // 5 179 { 31, 6, NULL }, 180 { 1024, 8, NULL }, 181 { 1024, 10, NULL }, 182 { 1024, 12, NULL }, 183 { PAGE_SIZE, 13, NULL }, // 10 184 { 1024, 9, NULL }, 185 { PAGE_SIZE, 11, NULL }, 186 { PAGE_SIZE, 14, NULL }, 187 { 16, 15, NULL }, 188 { 9, 7, NULL }, // 15 189 { 16, 7, NULL }, 190 { 36, 7, NULL }, 191 { 16, 7, NULL }, 192 { 12344, 7, NULL }, 193 { 50, 7, NULL }, // 20 194 { 16, 7, NULL }, 111 195 }; 112 196 uint32_t i; 113 RTHeapOffsetDump(Heap, (PFNRTHEAPOFFSETPRINTF)(uintptr_t)RTPrintf); /** @todo Add some detail info output with a signature identical to RTPrintf. */114 size_t cbBefore = RTHeapOffsetGetFreeSize(Heap);197 //RTHeapOffsetDump(Heap, (PFNRTHEAPOFFSETPRINTF)(uintptr_t)RTPrintf); /** @todo Add some detail info output with a signature identical to RTPrintf. */ 198 //size_t cbBefore = VbglR0PhysHeapGetFreeSize(); 115 199 static char const s_szFill[] = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; 116 200 … … 118 202 for (i = 0; i < RT_ELEMENTS(s_aOps); i++) 119 203 { 120 s_aOps[i].pvAlloc = RTHeapOffsetAlloc(Heap, s_aOps[i].cb, s_aOps[i].uAlignment);121 RTTESTI_CHECK_MSG(s_aOps[i].pvAlloc, (" RTHeapOffsetAlloc(%p, %#x, %#x,) -> NULL i=%d\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i));204 s_aOps[i].pvAlloc = VbglR0PhysHeapAlloc(s_aOps[i].cb); 205 RTTESTI_CHECK_MSG(s_aOps[i].pvAlloc, ("VbglR0PhysHeapAlloc(%#x) -> NULL i=%d\n", s_aOps[i].cb, i)); 122 206 if (!s_aOps[i].pvAlloc) 123 207 return RTTestSummaryAndDestroy(hTest); 124 208 125 209 memset(s_aOps[i].pvAlloc, s_szFill[i], s_aOps[i].cb); 126 RTTESTI_CHECK_MSG(RT_ALIGN_P(s_aOps[i].pvAlloc, (s_aOps[i].uAlignment ? s_aOps[i].uAlignment : 8)) == s_aOps[i].pvAlloc,127 (" RTHeapOffsetAlloc(%p, %#x, %#x,) -> %p\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i));210 RTTESTI_CHECK_MSG(RT_ALIGN_P(s_aOps[i].pvAlloc, sizeof(void *)) == s_aOps[i].pvAlloc, 211 ("VbglR0PhysHeapAlloc(%#x) -> %p\n", s_aOps[i].cb, i)); 128 212 if (!s_aOps[i].pvAlloc) 129 213 return RTTestSummaryAndDestroy(hTest); … … 137 221 //RTPrintf("debug: i=%d pv=%#x cb=%#zx align=%#zx cbReal=%#zx\n", i, s_aOps[i].pvAlloc, 138 222 // s_aOps[i].cb, s_aOps[i].uAlignment, RTHeapOffsetSize(Heap, s_aOps[i].pvAlloc)); 139 size_t cbBeforeSub = RTHeapOffsetGetFreeSize(Heap);140 RTHeapOffsetFree(Heap,s_aOps[i].pvAlloc);141 size_t cbAfterSubFree = RTHeapOffsetGetFreeSize(Heap);223 size_t cbBeforeSub = VbglR0PhysHeapGetFreeSize(); 224 VbglR0PhysHeapFree(s_aOps[i].pvAlloc); 225 size_t cbAfterSubFree = VbglR0PhysHeapGetFreeSize(); 142 226 143 227 void *pv; 144 pv = RTHeapOffsetAlloc(Heap, s_aOps[i].cb, s_aOps[i].uAlignment);145 RTTESTI_CHECK_MSG(pv, (" RTHeapOffsetAlloc(%p, %#x, %#x,) -> NULL i=%d\n", (void *)Heap, s_aOps[i].cb, s_aOps[i].uAlignment, i));228 pv = VbglR0PhysHeapAlloc(s_aOps[i].cb); 229 RTTESTI_CHECK_MSG(pv, ("VbglR0PhysHeapAlloc(%#x) -> NULL i=%d\n", s_aOps[i].cb, i)); 146 230 if (!pv) 147 231 return RTTestSummaryAndDestroy(hTest); 148 232 //RTPrintf("debug: i=%d pv=%p cbReal=%#zx cbBeforeSub=%#zx cbAfterSubFree=%#zx cbAfterSubAlloc=%#zx \n", i, pv, RTHeapOffsetSize(Heap, pv), 149 // cbBeforeSub, cbAfterSubFree, RTHeapOffsetGetFreeSize(Heap));233 // cbBeforeSub, cbAfterSubFree, VbglR0PhysHeapGetFreeSize()); 150 234 151 235 if (pv != s_aOps[i].pvAlloc) 152 236 RTTestIPrintf(RTTESTLVL_ALWAYS, "Warning: Free+Alloc returned different address. new=%p old=%p i=%d\n", pv, s_aOps[i].pvAlloc, i); 153 237 s_aOps[i].pvAlloc = pv; 154 size_t cbAfterSubAlloc = RTHeapOffsetGetFreeSize(Heap);238 size_t cbAfterSubAlloc = VbglR0PhysHeapGetFreeSize(); 155 239 if (cbBeforeSub != cbAfterSubAlloc) 156 240 { … … 161 245 } 162 246 163 /* make a copy of the heap and the to-be-freed list. */ 164 static uint8_t s_abMemCopy[sizeof(s_abMem)]; 165 memcpy(s_abMemCopy, s_abMem, sizeof(s_abMem)); 166 uintptr_t offDelta = (uintptr_t)&s_abMemCopy[0] - (uintptr_t)&s_abMem[0]; 167 RTHEAPOFFSET hHeapCopy = (RTHEAPOFFSET)((uintptr_t)Heap + offDelta); 168 static struct TstHeapOffsetOps s_aOpsCopy[RT_ELEMENTS(s_aOps)]; 169 memcpy(&s_aOpsCopy[0], &s_aOps[0], sizeof(s_aOps)); 170 171 /* free it in a specific order. */ 172 int cFreed = 0; 173 for (i = 0; i < RT_ELEMENTS(s_aOps); i++) 174 { 175 unsigned j; 176 for (j = 0; j < RT_ELEMENTS(s_aOps); j++) 177 { 178 if ( s_aOps[j].iFreeOrder != i 179 || !s_aOps[j].pvAlloc) 180 continue; 181 //RTPrintf("j=%d i=%d free=%d cb=%d pv=%p\n", j, i, RTHeapOffsetGetFreeSize(Heap), s_aOps[j].cb, s_aOps[j].pvAlloc); 182 RTHeapOffsetFree(Heap, s_aOps[j].pvAlloc); 183 s_aOps[j].pvAlloc = NULL; 184 cFreed++; 185 } 186 } 187 RTTESTI_CHECK(cFreed == RT_ELEMENTS(s_aOps)); 188 RTTestIPrintf(RTTESTLVL_ALWAYS, "i=done free=%d\n", RTHeapOffsetGetFreeSize(Heap)); 189 190 /* check that we're back at the right amount of free memory. */ 191 size_t cbAfter = RTHeapOffsetGetFreeSize(Heap); 192 if (cbBefore != cbAfter) 193 { 194 RTTestIPrintf(RTTESTLVL_ALWAYS, 195 "Warning: Either we've split out an alignment chunk at the start, or we've got\n" 196 " an alloc/free accounting bug: cbBefore=%d cbAfter=%d\n", cbBefore, cbAfter); 197 RTHeapOffsetDump(Heap, (PFNRTHEAPOFFSETPRINTF)(uintptr_t)RTPrintf); 198 } 199 200 /* relocate and free the bits in heap2 now. */ 201 RTTestSub(hTest, "Relocated Heap"); 202 /* free it in a specific order. */ 203 int cFreed2 = 0; 204 for (i = 0; i < RT_ELEMENTS(s_aOpsCopy); i++) 205 { 206 unsigned j; 207 for (j = 0; j < RT_ELEMENTS(s_aOpsCopy); j++) 208 { 209 if ( s_aOpsCopy[j].iFreeOrder != i 210 || !s_aOpsCopy[j].pvAlloc) 211 continue; 212 //RTPrintf("j=%d i=%d free=%d cb=%d pv=%p\n", j, i, RTHeapOffsetGetFreeSize(hHeapCopy), s_aOpsCopy[j].cb, s_aOpsCopy[j].pvAlloc); 213 RTHeapOffsetFree(hHeapCopy, (uint8_t *)s_aOpsCopy[j].pvAlloc + offDelta); 214 s_aOpsCopy[j].pvAlloc = NULL; 215 cFreed2++; 216 } 217 } 218 RTTESTI_CHECK(cFreed2 == RT_ELEMENTS(s_aOpsCopy)); 219 220 /* check that we're back at the right amount of free memory. */ 221 size_t cbAfterCopy = RTHeapOffsetGetFreeSize(hHeapCopy); 222 RTTESTI_CHECK_MSG(cbAfterCopy == cbAfter, ("cbAfterCopy=%zu cbAfter=%zu\n", cbAfterCopy, cbAfter)); 247 VbglR0PhysHeapTerminate(); 248 RTTESTI_CHECK_MSG(g_cChunks == 0, ("g_cChunks=%d\n", g_cChunks)); 249 223 250 224 251 /* … … 226 253 */ 227 254 RTTestSub(hTest, "Random Test"); 228 RTTESTI_CHECK_RC(rc = RTHeapOffsetInit(&Heap, &s_abMem[1], sizeof(s_abMem) - 1), VINF_SUCCESS);255 RTTESTI_CHECK_RC(rc = VbglR0PhysHeapInit(), VINF_SUCCESS); 229 256 if (RT_FAILURE(rc)) 230 257 return RTTestSummaryAndDestroy(hTest); … … 240 267 #endif 241 268 242 static struct 243 { 244 size_t cb; 245 void *pv; 246 } s_aHistory[1536]; 269 static TSTHISTORYENTRY s_aHistory[3072]; 247 270 RT_ZERO(s_aHistory); 248 271 … … 252 275 if (!s_aHistory[i].pv) 253 276 { 254 uint32_t uAlignment = 1 << RTRandAdvU32Ex(hRand, 0, 7); 255 s_aHistory[i].cb = RTRandAdvU32Ex(hRand, 9, 1024); 256 s_aHistory[i].pv = RTHeapOffsetAlloc(Heap, s_aHistory[i].cb, uAlignment); 277 s_aHistory[i].cb = RTRandAdvU32Ex(hRand, 8, 1024); 278 s_aHistory[i].pv = VbglR0PhysHeapAlloc(s_aHistory[i].cb); 257 279 if (!s_aHistory[i].pv) 258 280 { 259 281 s_aHistory[i].cb = 9; 260 s_aHistory[i].pv = RTHeapOffsetAlloc(Heap, s_aHistory[i].cb, 0);282 s_aHistory[i].pv = VbglR0PhysHeapAlloc(s_aHistory[i].cb); 261 283 } 262 284 if (s_aHistory[i].pv) … … 265 287 else 266 288 { 267 RTHeapOffsetFree(Heap,s_aHistory[i].pv);289 VbglR0PhysHeapFree(s_aHistory[i].pv); 268 290 s_aHistory[i].pv = NULL; 269 291 } 270 292 293 #if 1 294 /* Check heap integrity: */ 295 RTTESTI_CHECK_RC_OK(VbglR0PhysHeapCheck(NULL)); 296 int cChunks = 0; 297 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext) 298 cChunks++; 299 RTTESTI_CHECK_MSG(cChunks == g_cChunks, ("g_cChunks=%u, but only %u chunks in the list!\n", g_cChunks, cChunks)); 300 #endif 301 271 302 if ((iTest % 7777) == 7776) 272 303 { 273 304 /* exhaust the heap */ 274 for (i = 0; i < RT_ELEMENTS(s_aHistory) && RTHeapOffsetGetFreeSize(Heap) >= 256; i++) 305 PrintStats(s_aHistory, RT_ELEMENTS(s_aHistory), "Exhaust-pre "); 306 307 for (i = 0; i < RT_ELEMENTS(s_aHistory) && (VbglR0PhysHeapGetFreeSize() >= 256 || g_cChunks < TST_MAX_CHUNKS); i++) 275 308 if (!s_aHistory[i].pv) 276 309 { 277 s_aHistory[i].cb = RTRandAdvU32Ex(hRand, 256, 16384); 278 s_aHistory[i].pv = RTHeapOffsetAlloc(Heap, s_aHistory[i].cb, 0); 310 s_aHistory[i].cb = RTRandAdvU32Ex(hRand, VBGL_PH_CHUNKSIZE / 8, VBGL_PH_CHUNKSIZE / 2 + VBGL_PH_CHUNKSIZE / 4); 311 s_aHistory[i].pv = VbglR0PhysHeapAlloc(s_aHistory[i].cb); 312 if (s_aHistory[i].pv) 313 memset(s_aHistory[i].pv, 0x55, s_aHistory[i].cb); 279 314 } 280 for (i = 0; i < RT_ELEMENTS(s_aHistory) && RTHeapOffsetGetFreeSize(Heap); i++) 281 { 282 if (!s_aHistory[i].pv) 283 { 284 s_aHistory[i].cb = 1; 285 s_aHistory[i].pv = RTHeapOffsetAlloc(Heap, s_aHistory[i].cb, 1); 286 } 287 if (s_aHistory[i].pv) 288 memset(s_aHistory[i].pv, 0x55, s_aHistory[i].cb); 289 } 290 RTTESTI_CHECK_MSG(RTHeapOffsetGetFreeSize(Heap) == 0, ("%zu\n", RTHeapOffsetGetFreeSize(Heap))); 315 316 size_t cbFree = VbglR0PhysHeapGetFreeSize(); 317 if (cbFree) 318 for (i = 0; i < RT_ELEMENTS(s_aHistory); i++) 319 if (!s_aHistory[i].pv) 320 { 321 s_aHistory[i].cb = RTRandAdvU32Ex(hRand, 1, (uint32_t)cbFree); 322 s_aHistory[i].pv = VbglR0PhysHeapAlloc(s_aHistory[i].cb); 323 while (s_aHistory[i].pv == NULL && s_aHistory[i].cb > 2) 324 { 325 s_aHistory[i].cb >>= 1; 326 s_aHistory[i].pv = VbglR0PhysHeapAlloc(s_aHistory[i].cb); 327 } 328 if (s_aHistory[i].pv) 329 memset(s_aHistory[i].pv, 0x55, s_aHistory[i].cb); 330 331 cbFree = VbglR0PhysHeapGetFreeSize(); 332 if (!cbFree) 333 break; 334 } 335 336 RTTESTI_CHECK_MSG(VbglR0PhysHeapGetFreeSize() == 0, ("%zu\n", VbglR0PhysHeapGetFreeSize())); 337 PrintStats(s_aHistory, RT_ELEMENTS(s_aHistory), "Exhaust-post"); 291 338 } 292 339 else if ((iTest % 7777) == 1111) … … 295 342 for (i = 0; i < RT_ELEMENTS(s_aHistory); i++) 296 343 { 297 RTHeapOffsetFree(Heap,s_aHistory[i].pv);344 VbglR0PhysHeapFree(s_aHistory[i].pv); 298 345 s_aHistory[i].pv = NULL; 299 346 } 300 size_t cbAfterRand = RTHeapOffsetGetFreeSize(Heap); 301 RTTESTI_CHECK_MSG(cbAfterRand == cbAfter, ("cbAfterRand=%zu cbAfter=%zu\n", cbAfterRand, cbAfter)); 347 RTTESTI_CHECK_MSG(g_cChunks == 1, ("g_cChunks=%d\n", g_cChunks)); 348 #if 0 349 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext) 350 { 351 RTTestIPrintf(RTTESTLVL_ALWAYS, "pCurChunk=%p: cAllocatedBlocks=%d\n", pCurChunk, pCurChunk->cAllocatedBlocks); 352 uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbSize; 353 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1); 354 unsigned iCurBlock = 0; 355 while ((uintptr_t)pCurBlock < uEnd) 356 { 357 RTTestIPrintf(RTTESTLVL_ALWAYS, " #%2u/%p: cb=%#x %s byte0=%02x\n", 358 iCurBlock, pCurBlock, pCurBlock->cbDataSize, pCurBlock->fu32Flags ? "alloc" : "free", 359 *(uint8_t const *)(pCurBlock + 1)); 360 pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbDataSize); 361 iCurBlock++; 362 } 363 } 364 #endif 365 366 //size_t cbAfterRand = VbglR0PhysHeapGetFreeSize(); 367 //RTTESTI_CHECK_MSG(cbAfterRand == cbAfter, ("cbAfterRand=%zu cbAfter=%zu\n", cbAfterRand, cbAfter)); 302 368 } 303 369 } … … 306 372 for (i = 0; i < RT_ELEMENTS(s_aHistory); i++) 307 373 { 308 RTHeapOffsetFree(Heap,s_aHistory[i].pv);374 VbglR0PhysHeapFree(s_aHistory[i].pv); 309 375 s_aHistory[i].pv = NULL; 310 376 } 311 377 312 /* check that we're back at the right amount of free memory. */ 313 size_t cbAfterRand = RTHeapOffsetGetFreeSize(Heap); 314 RTTESTI_CHECK_MSG(cbAfterRand == cbAfter, ("cbAfterRand=%zu cbAfter=%zu\n", cbAfterRand, cbAfter)); 378 RTTESTI_CHECK_MSG(g_cChunks == 1, ("g_cChunks=%d\n", g_cChunks)); 379 380 VbglR0PhysHeapTerminate(); 381 RTTESTI_CHECK_MSG(g_cChunks == 0, ("g_cChunks=%d\n", g_cChunks)); 315 382 316 383 RTTESTI_CHECK_RC(rc = RTRandAdvDestroy(hRand), VINF_SUCCESS);
Note:
See TracChangeset
for help on using the changeset viewer.