Changeset 97923 in vbox for trunk/src/VBox/Additions
- Timestamp:
- Dec 30, 2022 10:24:20 PM (2 years ago)
- Location:
- trunk/src/VBox/Additions/common/VBoxGuest/lib
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp
r97922 r97923 219 219 if (pBlock) 220 220 return pBlock + 1; 221 return NULL;222 }223 224 225 DECLINLINE(VBGLPHYSHEAPBLOCK *) vbglPhysHeapData2Block(void *pv)226 {227 if (pv)228 {229 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;230 AssertMsgReturn(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,231 ("pBlock->u32Signature = %08X\n", pBlock->u32Signature),232 NULL);233 return pBlock;234 }235 221 return NULL; 236 222 } … … 487 473 */ 488 474 475 /** @todo r=bird: Don't walk these lists for ever, use the block count 476 * statistics to limit the walking to the first X or something. */ 489 477 pBlock = NULL; 490 478 if (cbSize <= PAGE_SIZE / 4 * 3) … … 588 576 } 589 577 578 590 579 DECLR0VBGL(uint32_t) VbglR0PhysHeapGetPhysAddr(void *pv) 591 580 { 592 uint32_t physAddr = 0; 593 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapData2Block(pv); 594 595 if (pBlock) 596 { 597 VBGL_PH_ASSERT_MSG(pBlock->fAllocated, ("pBlock = %p\n", pBlock)); 598 599 if (pBlock->fAllocated) 600 physAddr = pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pBlock->pChunk); 601 } 602 603 return physAddr; 581 /* 582 * Validate the incoming pointer. 583 */ 584 if (pv != NULL) 585 { 586 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1; 587 if ( pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE 588 && pBlock->fAllocated) 589 { 590 /* 591 * Calculate and return its physical address. 592 */ 593 return pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pBlock->pChunk); 594 } 595 596 AssertMsgFailed(("Use after free or corrupt pointer variable: pv=%p pBlock=%p: u32Signature=%#x cb=%#x fAllocated=%d\n", 597 pv, pBlock, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fAllocated)); 598 } 599 else 600 AssertMsgFailed(("Unexpected NULL pointer\n")); 601 return 0; 604 602 } 605 603 … … 607 605 DECLR0VBGL(void) VbglR0PhysHeapFree(void *pv) 608 606 { 609 VBGLPHYSHEAPBLOCK *pBlock; 610 VBGLPHYSHEAPBLOCK *pNeighbour; 611 VBGLPHYSHEAPCHUNK *pChunk; 612 613 int rc = vbglPhysHeapEnter(); 614 if (RT_FAILURE(rc)) 615 return; 616 617 dumpheap("pre free"); 618 619 pBlock = vbglPhysHeapData2Block(pv); 620 621 if (!pBlock) 622 { 623 vbglPhysHeapLeave(); 624 return; 625 } 626 627 VBGL_PH_ASSERT_MSG(pBlock->fAllocated, ("pBlock = %p\n", pBlock)); 628 629 /* Exclude from allocated list */ 630 vbglPhysHeapExcludeBlock(pBlock); 631 632 dumpheap("post exclude"); 633 634 VBGL_PH_dprintf(("VbglR0PhysHeapFree %p size %x\n", pv, pBlock->cbDataSize)); 635 636 /* Mark as free */ 637 pBlock->fAllocated = false; 638 639 /* Insert to free list */ 640 vbglPhysHeapInsertBlock(NULL, pBlock); 641 642 dumpheap("post insert"); 643 644 pChunk = pBlock->pChunk; 645 646 /* Check if we can merge 2 free blocks. To simplify heap maintenance, 647 * we will look at block after the just freed one. 648 * This will not prevent us from detecting free memory chunks. 649 * Also in most cases blocks are deallocated in reverse allocation order 650 * and in that case the merging will work. 651 */ 652 /** @todo r=bird: This simplistic approach is of course not working. 653 * However, since the heap lists aren't sorted in any way, we cannot 654 * cheaply determine where the block before us starts. */ 655 656 pNeighbour = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + pBlock->cbDataSize); 657 658 if ( (uintptr_t)pNeighbour < (uintptr_t)pChunk + pChunk->cbSize 659 && !pNeighbour->fAllocated) 660 { 661 /* The next block is free as well. */ 662 663 /* Adjust size of current memory block */ 664 pBlock->cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK); 665 666 /* Exclude the next neighbour */ 667 vbglPhysHeapExcludeBlock(pNeighbour); 668 } 669 670 dumpheap("post merge"); 671 672 /* now check if there are 2 or more free (unused) chunks */ 673 if (pChunk->acBlocks[1 /*allocated*/] == 0) 674 { 675 VBGLPHYSHEAPCHUNK *pCurChunk; 676 677 uint32_t cUnusedChunks = 0; 678 679 for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext) 607 if (pv != NULL) 608 { 609 VBGLPHYSHEAPBLOCK *pBlock; 610 611 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap); 612 AssertRCReturnVoid(rc); 613 614 dumpheap("pre free"); 615 616 /* 617 * Validate the block header. 618 */ 619 pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1; 620 if ( pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE 621 && pBlock->fAllocated) 680 622 { 681 Assert(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE); 682 if (pCurChunk->acBlocks[1 /*allocated*/] == 0) 683 cUnusedChunks++; 623 VBGLPHYSHEAPCHUNK *pChunk; 624 VBGLPHYSHEAPBLOCK *pNeighbour; 625 626 /* 627 * Move the block from the allocated list to the free list. 628 */ 629 VBGL_PH_dprintf(("VbglR0PhysHeapFree: %p size %#x\n", pv, pBlock->cbDataSize)); 630 vbglPhysHeapExcludeBlock(pBlock); 631 632 dumpheap("post exclude"); 633 634 pBlock->fAllocated = false; 635 vbglPhysHeapInsertBlock(NULL, pBlock); 636 637 dumpheap("post insert"); 638 639 /* 640 * Check if the block after this one is also free and we can merge it into 641 * this one. 642 * 643 * Because the free list isn't sorted by address we cannot cheaply do the 644 * same for the block before us, so we have to hope for the best for now. 645 */ 646 /** @todo When the free:used ration in chunk is too skewed, scan the whole 647 * chunk and merge adjacent blocks that way every so often. Always do so 648 * when this is the last used one and we end up with more than 1 free 649 * node afterwards. */ 650 pChunk = pBlock->pChunk; 651 pNeighbour = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + pBlock->cbDataSize); 652 if ( (uintptr_t)pNeighbour <= (uintptr_t)pChunk + pChunk->cbSize - sizeof(*pNeighbour) 653 && !pNeighbour->fAllocated) 654 { 655 /* Adjust size of current memory block */ 656 pBlock->cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK); 657 658 /* Exclude the next neighbour */ 659 vbglPhysHeapExcludeBlock(pNeighbour); 660 661 dumpheap("post merge"); 662 } 663 664 /* 665 * If this chunk is now completely unused, delete it if there are 666 * more completely free ones. 667 */ 668 if ( pChunk->acBlocks[1 /*allocated*/] == 0 669 && (pChunk->pPrev || pChunk->pNext)) 670 { 671 VBGLPHYSHEAPCHUNK *pCurChunk; 672 uint32_t cUnusedChunks = 0; 673 for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext) 674 { 675 AssertBreak(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE); 676 if (pCurChunk->acBlocks[1 /*allocated*/] == 0) 677 { 678 cUnusedChunks++; 679 if (cUnusedChunks > 1) 680 { 681 /* Delete current chunk, it will also exclude all free blocks 682 * remaining in the chunk from the free list, so the pBlock 683 * will also be invalid after this. 684 */ 685 vbglPhysHeapChunkDelete(pChunk); 686 pNeighbour = pBlock = NULL; /* invalid */ 687 break; 688 } 689 } 690 } 691 } 692 693 dumpheap("post free"); 684 694 } 685 686 if (cUnusedChunks > 1) 687 { 688 /* Delete current chunk, it will also exclude all free blocks 689 * remaining in the chunk from the free list, so the pBlock 690 * will also be invalid after this. 691 */ 692 vbglPhysHeapChunkDelete(pChunk); 693 } 694 } 695 696 dumpheap("post free"); 697 698 vbglPhysHeapLeave(); 695 else 696 AssertMsgFailed(("pBlock: %p: u32Signature=%#x cb=%#x fAllocated=%d - double free?\n", 697 pBlock, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fAllocated)); 698 699 rc = RTSemFastMutexRelease(g_vbgldata.mutexHeap); 700 AssertRC(rc); 701 } 699 702 } 700 703 -
trunk/src/VBox/Additions/common/VBoxGuest/lib/testcase/tstVbglR0PhysHeap-1.cpp
r97921 r97923 87 87 g_cChunks++; 88 88 g_cbChunks += cb; 89 *pPhys = (uint32_t)(uintptr_t)pvRet ^ UINT32_C(0xf0f0f000); 89 *pPhys = (uint32_t)(uintptr_t)pvRet ^ (UINT32_C(0xf0f0f0f0) & ~(uint32_t)PAGE_OFFSET_MASK); 90 91 /* Avoid problematic values that won't happen in real life: */ 92 if (!*pPhys) 93 *pPhys = 4U << PAGE_SHIFT; 94 if (UINT32_MAX - *pPhys < cb) 95 *pPhys -= RT_ALIGN_32(cb, PAGE_SIZE); 96 90 97 return pvRet; 91 98 } … … 160 167 if (RT_FAILURE(rc)) 161 168 return RTTestSummaryAndDestroy(hTest); 169 170 #define CHECK_PHYS_ADDR(a_pv) do { \ 171 uint32_t const uPhys = VbglR0PhysHeapGetPhysAddr(a_pv); \ 172 if (uPhys == 0 || uPhys == UINT32_MAX || (uPhys & PAGE_OFFSET_MASK) != ((uintptr_t)(a_pv) & PAGE_OFFSET_MASK)) \ 173 RTTestIFailed("line %u: %s=%p: uPhys=%#x\n", __LINE__, #a_pv, (a_pv), uPhys); \ 174 } while (0) 162 175 163 176 /* … … 210 223 RTTESTI_CHECK_MSG(RT_ALIGN_P(s_aOps[i].pvAlloc, sizeof(void *)) == s_aOps[i].pvAlloc, 211 224 ("VbglR0PhysHeapAlloc(%#x) -> %p\n", s_aOps[i].cb, i)); 212 if (!s_aOps[i].pvAlloc) 213 return RTTestSummaryAndDestroy(hTest);225 226 CHECK_PHYS_ADDR(s_aOps[i].pvAlloc); 214 227 } 215 228 … … 230 243 if (!pv) 231 244 return RTTestSummaryAndDestroy(hTest); 245 CHECK_PHYS_ADDR(pv); 246 232 247 //RTPrintf("debug: i=%d pv=%p cbReal=%#zx cbBeforeSub=%#zx cbAfterSubFree=%#zx cbAfterSubAlloc=%#zx \n", i, pv, RTHeapOffsetSize(Heap, pv), 233 248 // cbBeforeSub, cbAfterSubFree, VbglR0PhysHeapGetFreeSize()); … … 283 298 } 284 299 if (s_aHistory[i].pv) 300 { 285 301 memset(s_aHistory[i].pv, 0xbb, s_aHistory[i].cb); 302 CHECK_PHYS_ADDR(s_aHistory[i].pv); 303 } 286 304 } 287 305 else … … 311 329 s_aHistory[i].pv = VbglR0PhysHeapAlloc(s_aHistory[i].cb); 312 330 if (s_aHistory[i].pv) 331 { 313 332 memset(s_aHistory[i].pv, 0x55, s_aHistory[i].cb); 333 CHECK_PHYS_ADDR(s_aHistory[i].pv); 334 } 314 335 } 315 336 … … 327 348 } 328 349 if (s_aHistory[i].pv) 350 { 329 351 memset(s_aHistory[i].pv, 0x55, s_aHistory[i].cb); 352 CHECK_PHYS_ADDR(s_aHistory[i].pv); 353 } 330 354 331 355 cbFree = VbglR0PhysHeapGetFreeSize();
Note:
See TracChangeset
for help on using the changeset viewer.