VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp@ 97925

Last change on this file since 97925 was 97925, checked in by vboxsync, 2 years ago

Add/VBoxGuestR0LibPhysHeap.cpp: Use the term 'unlink' rather than 'exclude' for removing a block/chunk from a linked list. Docs updates.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 31.2 KB
Line 
1/* $Id: VBoxGuestR0LibPhysHeap.cpp 97925 2022-12-30 23:00:05Z vboxsync $ */
2/** @file
3 * VBoxGuestLibR0 - Physical memory heap.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#include "VBoxGuestR0LibInternal.h"
36
37#include <iprt/assert.h>
38#include <iprt/err.h>
39#include <iprt/mem.h>
40#include <iprt/semaphore.h>
41
42/** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap.
43 *
44 * The physical memory heap consists of a doubly linked list of large chunks
45 * (VBGLDATA::pChunkHead), memory blocks are allocated within these chunks and
46 * are members of allocated (VBGLDATA::pAllocBlocksHead) and free
47 * (VBGLDATA::pFreeBlocksHead) doubly linked lists.
48 *
49 * When allocating a block, we search the free list for a suitable free block.
50 * If there is no such block, a new chunk is allocated and the new block is
51 * taken from the new chunk as the only chunk-sized free block. The allocated
52 * block is unlinked from the free list and goes to alloc list.
53 *
54 * When freeing block, we check the pointer and then unlink the block from the
55 * alloc list and move it to the free list.
56 *
57 * For each chunk we maintain the allocated blocks counter (as well as a count
58 * of free blocks). If 2 (or more) entire chunks are free they are immediately
59 * deallocated, so we always have at most 1 free chunk.
60 *
61 * When freeing blocks, two subsequent free blocks are always merged together.
62 * Current implementation merges blocks only when there is a free block after
63 * the just freed one, never when there is one before it as that's too
64 * expensive.
65 */
66
67
68/*********************************************************************************************************************************
69* Defined Constants And Macros *
70*********************************************************************************************************************************/
71#define VBGL_PH_ASSERT Assert
72#define VBGL_PH_ASSERT_MSG AssertMsg
73
74// #define DUMPHEAP
75
76#ifdef DUMPHEAP
77# define VBGL_PH_dprintf(a) RTAssertMsg2Weak a
78#else
79# define VBGL_PH_dprintf(a)
80#endif
81
82/* Heap block signature */
83#define VBGL_PH_BLOCKSIGNATURE (0xADDBBBBB)
84
85
86/* Heap chunk signature */
87#define VBGL_PH_CHUNKSIGNATURE (0xADDCCCCC)
88/* Heap chunk allocation unit */
89#define VBGL_PH_CHUNKSIZE (0x10000)
90
91/* Heap block bit flags */
92#define VBGL_PH_BF_ALLOCATED (0x1)
93
94/** Threshold at which to split out a tail free block when allocating.
95 *
96 * The value gives the amount of user space, i.e. excluding the header.
97 *
98 * Using 32 bytes based on VMMDev.h request sizes. The smallest requests are 24
99 * bytes, i.e. only the header, at least 4 of these. There are at least 10 with
100 * size 28 bytes and at least 11 with size 32 bytes. So, 32 bytes would fit
101 * some 25 requests out of about 60, which is reasonable.
102 */
103#define VBGL_PH_MIN_SPLIT_FREE_BLOCK 32
104
105
106/*********************************************************************************************************************************
107* Structures and Typedefs *
108*********************************************************************************************************************************/
109/**
110 * A heap block (within a chunk).
111 *
112 * This is used to track a part of a heap chunk that's either free or
113 * allocated. The VBGLPHYSHEAPBLOCK::fAllocated member indicates which it is.
114 */
115struct VBGLPHYSHEAPBLOCK
116{
117 /** Magic value (VBGL_PH_BLOCKSIGNATURE). */
118 uint32_t u32Signature;
119
120 /** Size of user data in the block. Does not include this block header. */
121 uint32_t cbDataSize : 31;
122 /** The top bit indicates whether it's allocated or free. */
123 uint32_t fAllocated : 1;
124
125 /** Pointer to the next block on the list. */
126 VBGLPHYSHEAPBLOCK *pNext;
127 /** Pointer to the previous block on the list. */
128 VBGLPHYSHEAPBLOCK *pPrev;
129 /** Pointer back to the chunk. */
130 VBGLPHYSHEAPCHUNK *pChunk;
131};
132
133/**
134 * A chunk of memory used by the heap for sub-allocations.
135 *
136 * There is a list of these.
137 */
138struct VBGLPHYSHEAPCHUNK
139{
140 /** Magic value (VBGL_PH_CHUNKSIGNATURE). */
141 uint32_t u32Signature;
142
143 /** Size of the chunk. Includes the chunk header. */
144 uint32_t cbSize;
145
146 /** Physical address of the chunk (contiguous). */
147 uint32_t physAddr;
148
149 uint32_t uPadding1;
150
151 /** Indexed by VBGLPHYSHEAPBLOCK::fAllocated, so zero is the free ones and
152 * one the allocated ones. */
153 int32_t acBlocks[2];
154
155 /** Pointer to the next chunk. */
156 VBGLPHYSHEAPCHUNK *pNext;
157 /** Pointer to the previous chunk. */
158 VBGLPHYSHEAPCHUNK *pPrev;
159#if ARCH_BITS == 64
160 /** Pad the size up to 64 bytes. */
161 uintptr_t auPadding2[3];
162#endif
163};
164#if ARCH_BITS == 64
165AssertCompileSize(VBGLPHYSHEAPCHUNK, 64);
166#endif
167
168
169#ifndef DUMPHEAP
170# define dumpheap(pszWhere) do { } while (0)
171#else
172void dumpheap(const char *pszWhere)
173{
174 VBGL_PH_dprintf(("VBGL_PH dump at '%s'\n", pszWhere));
175
176 VBGL_PH_dprintf(("Chunks:\n"));
177
178 VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead;
179
180 while (pChunk)
181 {
182 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, allocated = %8d, phys = %08X\n",
183 pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbSize, pChunk->cAllocatedBlocks, pChunk->physAddr));
184
185 pChunk = pChunk->pNext;
186 }
187
188 VBGL_PH_dprintf(("Allocated blocks:\n"));
189
190 VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pAllocBlocksHead;
191
192 while (pBlock)
193 {
194 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, %s, pChunk = %p\n",
195 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize,
196 pBlock->fAllocated ? "allocated" : "free", pBlock->pChunk));
197
198 pBlock = pBlock->pNext;
199 }
200
201 VBGL_PH_dprintf(("Free blocks:\n"));
202
203 pBlock = g_vbgldata.pFreeBlocksHead;
204
205 while (pBlock)
206 {
207 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, %s, pChunk = %p\n",
208 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize,
209 pBlock->fAllocated ? "allocated" : "free", pBlock->pChunk));
210
211 pBlock = pBlock->pNext;
212 }
213
214 VBGL_PH_dprintf(("VBGL_PH dump at '%s' done\n", pszWhere));
215}
216#endif
217
218
219DECLINLINE(void) vbglPhysHeapLeave(void)
220{
221 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
222}
223
224
225static void vbglPhysHeapInitFreeBlock(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbDataSize)
226{
227 VBGL_PH_ASSERT(pBlock != NULL);
228 VBGL_PH_ASSERT(pChunk != NULL);
229
230 pBlock->u32Signature = VBGL_PH_BLOCKSIGNATURE;
231 pBlock->cbDataSize = cbDataSize;
232 pBlock->fAllocated = false;
233 pBlock->pNext = NULL;
234 pBlock->pPrev = NULL;
235 pBlock->pChunk = pChunk;
236}
237
238
239/**
240 * Links @a pBlock onto the appropriate chain accoring to @a pInsertAfter or @a
241 * pBlock->fAllocated.
242 *
243 * This also update the per-chunk block counts.
244 */
245static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pInsertAfter, VBGLPHYSHEAPBLOCK *pBlock)
246{
247 bool const fAllocated = pBlock->fAllocated;
248
249 VBGL_PH_ASSERT_MSG(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
250 VBGL_PH_ASSERT_MSG(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
251
252 if (pInsertAfter)
253 {
254 pBlock->pNext = pInsertAfter->pNext;
255 pBlock->pPrev = pInsertAfter;
256
257 if (pInsertAfter->pNext)
258 pInsertAfter->pNext->pPrev = pBlock;
259
260 pInsertAfter->pNext = pBlock;
261 }
262 else
263 {
264 /* inserting to head of list */
265 pBlock->pPrev = NULL;
266
267 if (fAllocated)
268 {
269 pBlock->pNext = g_vbgldata.pAllocBlocksHead;
270
271 if (g_vbgldata.pAllocBlocksHead)
272 g_vbgldata.pAllocBlocksHead->pPrev = pBlock;
273
274 g_vbgldata.pAllocBlocksHead = pBlock;
275 }
276 else
277 {
278 pBlock->pNext = g_vbgldata.pFreeBlocksHead;
279
280 if (g_vbgldata.pFreeBlocksHead)
281 g_vbgldata.pFreeBlocksHead->pPrev = pBlock;
282
283 g_vbgldata.pFreeBlocksHead = pBlock;
284 }
285 }
286
287 /* Update the block counts: */
288 g_vbgldata.acBlocks[fAllocated] += 1;
289 pBlock->pChunk->acBlocks[fAllocated] += 1;
290 AssertMsg( (uint32_t)pBlock->pChunk->acBlocks[fAllocated]
291 <= pBlock->pChunk->cbSize / (sizeof(*pBlock) + sizeof(void *)),
292 ("pChunk=%p: cbSize=%#x acBlocks[%u]=%d\n",
293 pBlock->pChunk, pBlock->pChunk->cbSize, fAllocated, pBlock->pChunk->acBlocks[fAllocated]));
294}
295
296
297/**
298 * Unlinks @a pBlock from the chain it's on.
299 *
300 * This also update the per-chunk block counts.
301 */
302static void vbglPhysHeapUnlinkBlock(VBGLPHYSHEAPBLOCK *pBlock)
303{
304 bool const fAllocated = pBlock->fAllocated;
305
306 if (pBlock->pNext)
307 pBlock->pNext->pPrev = pBlock->pPrev;
308 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
309
310 if (pBlock->pPrev)
311 pBlock->pPrev->pNext = pBlock->pNext;
312 else if (fAllocated)
313 {
314 Assert(g_vbgldata.pAllocBlocksHead == pBlock);
315 g_vbgldata.pAllocBlocksHead = pBlock->pNext;
316 }
317 else
318 {
319 Assert(g_vbgldata.pFreeBlocksHead == pBlock);
320 g_vbgldata.pFreeBlocksHead = pBlock->pNext;
321 }
322
323 pBlock->pNext = NULL;
324 pBlock->pPrev = NULL;
325
326 /* Update the per-chunk counts: */
327 g_vbgldata.acBlocks[fAllocated] -= 1;
328 pBlock->pChunk->acBlocks[fAllocated] -= 1;
329 AssertMsg(pBlock->pChunk->acBlocks[fAllocated] >= 0,
330 ("pChunk=%p: cbSize=%#x acBlocks[%u]=%d\n",
331 pBlock->pChunk, pBlock->pChunk->cbSize, fAllocated, pBlock->pChunk->acBlocks[fAllocated]));
332 Assert(g_vbgldata.acBlocks[fAllocated] >= 0);
333}
334
335
336static VBGLPHYSHEAPBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock)
337{
338 RTCCPHYS PhysAddr = NIL_RTHCPHYS;
339 VBGLPHYSHEAPCHUNK *pChunk;
340 uint32_t cbChunk;
341 VBGL_PH_dprintf(("Allocating new chunk for %#x byte allocation\n", cbMinBlock));
342 AssertReturn(cbMinBlock < _128M, NULL); /* paranoia */
343
344 /* Compute the size of the new chunk, rounding up to next chunk size,
345 which must be power of 2. */
346 Assert(RT_IS_POWER_OF_TWO(VBGL_PH_CHUNKSIZE));
347 cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPBLOCK);
348 cbChunk = RT_ALIGN_32(cbChunk, VBGL_PH_CHUNKSIZE);
349
350 /* This function allocates physical contiguous memory below 4 GB. This 4GB
351 limitation stems from using a 32-bit OUT instruction to pass a block
352 physical address to the host. */
353 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
354 /** @todo retry with smaller size if it fails, treating VBGL_PH_CHUNKSIZE as
355 * a guideline rather than absolute minimum size. */
356 if (pChunk)
357 {
358 VBGLPHYSHEAPCHUNK *pOldHeadChunk;
359 VBGLPHYSHEAPBLOCK *pBlock;
360 AssertRelease(PhysAddr < _4G && PhysAddr + cbChunk <= _4G);
361
362 /* Init the new chunk. */
363 pChunk->u32Signature = VBGL_PH_CHUNKSIGNATURE;
364 pChunk->cbSize = cbChunk;
365 pChunk->physAddr = (uint32_t)PhysAddr;
366 pChunk->acBlocks[0] = 0;
367 pChunk->acBlocks[1] = 0;
368 pChunk->pNext = NULL;
369 pChunk->pPrev = NULL;
370 pChunk->uPadding1 = UINT32_C(0xADDCAAA1);
371#if ARCH_BITS == 64
372 pChunk->auPadding2[0] = UINT64_C(0xADDCAAA3ADDCAAA2);
373 pChunk->auPadding2[1] = UINT64_C(0xADDCAAA5ADDCAAA4);
374 pChunk->auPadding2[2] = UINT64_C(0xADDCAAA7ADDCAAA6);
375#endif
376
377 /* Initialize the free block, which now occupies entire chunk. */
378 pBlock = (VBGLPHYSHEAPBLOCK *)(pChunk + 1);
379 vbglPhysHeapInitFreeBlock(pBlock, pChunk, cbChunk - sizeof(VBGLPHYSHEAPCHUNK) - sizeof(VBGLPHYSHEAPBLOCK));
380 vbglPhysHeapInsertBlock(NULL, pBlock);
381
382 /* Add the chunk to the list. */
383 pOldHeadChunk = g_vbgldata.pChunkHead;
384 pChunk->pNext = pOldHeadChunk;
385 if (pOldHeadChunk)
386 pOldHeadChunk->pPrev = pChunk;
387 g_vbgldata.pChunkHead = pChunk;
388
389 VBGL_PH_dprintf(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->cbDataSize));
390 return pBlock;
391 }
392 LogRel(("vbglPhysHeapChunkAlloc: failed to alloc %u (%#x) contiguous bytes.\n", cbChunk, cbChunk));
393 return NULL;
394}
395
396
397static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk)
398{
399 uintptr_t uEnd, uCur;
400 VBGL_PH_ASSERT(pChunk != NULL);
401 VBGL_PH_ASSERT_MSG(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, ("pChunk->u32Signature = %08X\n", pChunk->u32Signature));
402
403 VBGL_PH_dprintf(("Deleting chunk %p size %x\n", pChunk, pChunk->cbSize));
404
405 /* first scan the chunk and unlink all blocks from the lists */
406
407 uEnd = (uintptr_t)pChunk + pChunk->cbSize;
408 uCur = (uintptr_t)(pChunk + 1);
409
410 while (uCur < uEnd)
411 {
412 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur;
413
414 uCur += pBlock->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
415
416 vbglPhysHeapUnlinkBlock(pBlock);
417 }
418
419 VBGL_PH_ASSERT_MSG(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbSize = %08X\n", uCur, uEnd, pChunk->cbSize));
420
421 /* Unlink the chunk from the chunk list. */
422 if (pChunk->pNext)
423 pChunk->pNext->pPrev = pChunk->pPrev;
424 /* else: we do not maintain tail pointer. */
425
426 if (pChunk->pPrev)
427 pChunk->pPrev->pNext = pChunk->pNext;
428 else
429 {
430 Assert(g_vbgldata.pChunkHead == pChunk);
431 g_vbgldata.pChunkHead = pChunk->pNext;
432 }
433
434 RTMemContFree(pChunk, pChunk->cbSize);
435}
436
437
438DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cbSize)
439{
440 VBGLPHYSHEAPBLOCK *pBlock, *pIter;
441 int rc;
442
443 /*
444 * Align the size to a pointer size to avoid getting misaligned header pointers and whatnot.
445 */
446 cbSize = RT_ALIGN_32(cbSize, sizeof(void *));
447 AssertStmt(cbSize > 0, cbSize = sizeof(void *)); /* avoid allocating zero bytes */
448
449
450 rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
451 AssertRCReturn(rc, NULL);
452
453 dumpheap("pre alloc");
454
455 /*
456 * Search the free list. We do this in linear fashion as we don't expect
457 * there to be many blocks in the heap.
458 */
459 /** @todo r=bird: Don't walk these lists for ever, use the block count
460 * statistics to limit the walking to the first X or something. */
461 pBlock = NULL;
462 if (cbSize <= PAGE_SIZE / 4 * 3)
463 {
464 /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page,
465 so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
466 VBGLPHYSHEAPBLOCK *pFallback = NULL;
467 for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
468 {
469 AssertBreak(pIter->u32Signature == VBGL_PH_BLOCKSIGNATURE);
470 if (pIter->cbDataSize >= cbSize)
471 {
472 if (pIter->cbDataSize == cbSize)
473 {
474 if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cbSize)
475 {
476 pBlock = pIter;
477 break;
478 }
479 pFallback = pIter;
480 }
481 else
482 {
483 if (!pFallback || pIter->cbDataSize < pFallback->cbDataSize)
484 pFallback = pIter;
485 if (PAGE_SIZE - ((uintptr_t)(pIter + 1) & PAGE_OFFSET_MASK) >= cbSize)
486 if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
487 pBlock = pIter;
488 }
489 }
490 }
491
492 if (!pBlock)
493 pBlock = pFallback;
494 }
495 else
496 {
497 /* Large than 3/4 page: Find closest free list match. */
498 for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
499 {
500 AssertBreak(pIter->u32Signature == VBGL_PH_BLOCKSIGNATURE);
501 if (pIter->cbDataSize >= cbSize)
502 {
503 if (pIter->cbDataSize == cbSize)
504 {
505 /* Exact match - we're done! */
506 pBlock = pIter;
507 break;
508 }
509
510 /* Looking for a free block with nearest size. */
511 if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
512 pBlock = pIter;
513 }
514 }
515 }
516
517 if (!pBlock)
518 {
519 /* No free blocks, allocate a new chunk, the only free block of the
520 chunk will be returned. */
521 pBlock = vbglPhysHeapChunkAlloc(cbSize);
522 }
523
524 if (pBlock)
525 {
526 /* We have a free block, either found or allocated. */
527 VBGL_PH_ASSERT_MSG(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
528 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->u32Signature));
529 VBGL_PH_ASSERT_MSG(!pBlock->fAllocated, ("pBlock = %p\n", pBlock));
530
531 /*
532 * If the block is too large, split off a free block with the unused space.
533 *
534 * We do this before unlinking the block so we can preserve the location
535 * in the free list.
536 */
537 if (pBlock->cbDataSize >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cbSize)
538 {
539 pIter = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + cbSize);
540 vbglPhysHeapInitFreeBlock(pIter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof(VBGLPHYSHEAPBLOCK));
541
542 pBlock->cbDataSize = cbSize;
543
544 /* Insert the new 'pIter' block after the 'pBlock' in the free list. */
545 vbglPhysHeapInsertBlock(pBlock, pIter);
546 }
547
548 /*
549 * Unlink the block from the free list, mark it as allocated and insert
550 * it in the allocated list.
551 */
552 vbglPhysHeapUnlinkBlock(pBlock);
553 pBlock->fAllocated = true;
554 vbglPhysHeapInsertBlock(NULL, pBlock);
555
556 dumpheap("post alloc");
557
558 rc = RTSemFastMutexRelease(g_vbgldata.mutexHeap);
559
560 VBGL_PH_dprintf(("VbglR0PhysHeapAlloc: returns %p size %x\n", pBlock + 1, pBlock->cbDataSize));
561 return pBlock + 1;
562 }
563
564 rc = RTSemFastMutexRelease(g_vbgldata.mutexHeap);
565 AssertRC(rc);
566
567 VBGL_PH_dprintf(("VbglR0PhysHeapAlloc: returns NULL (requested %#x bytes)\n", cbSize));
568 return NULL;
569}
570
571
572DECLR0VBGL(uint32_t) VbglR0PhysHeapGetPhysAddr(void *pv)
573{
574 /*
575 * Validate the incoming pointer.
576 */
577 if (pv != NULL)
578 {
579 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
580 if ( pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE
581 && pBlock->fAllocated)
582 {
583 /*
584 * Calculate and return its physical address.
585 */
586 return pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pBlock->pChunk);
587 }
588
589 AssertMsgFailed(("Use after free or corrupt pointer variable: pv=%p pBlock=%p: u32Signature=%#x cb=%#x fAllocated=%d\n",
590 pv, pBlock, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fAllocated));
591 }
592 else
593 AssertMsgFailed(("Unexpected NULL pointer\n"));
594 return 0;
595}
596
597
598DECLR0VBGL(void) VbglR0PhysHeapFree(void *pv)
599{
600 if (pv != NULL)
601 {
602 VBGLPHYSHEAPBLOCK *pBlock;
603
604 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
605 AssertRCReturnVoid(rc);
606
607 dumpheap("pre free");
608
609 /*
610 * Validate the block header.
611 */
612 pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
613 if ( pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE
614 && pBlock->fAllocated)
615 {
616 VBGLPHYSHEAPCHUNK *pChunk;
617 VBGLPHYSHEAPBLOCK *pNeighbour;
618
619 /*
620 * Move the block from the allocated list to the free list.
621 */
622 VBGL_PH_dprintf(("VbglR0PhysHeapFree: %p size %#x\n", pv, pBlock->cbDataSize));
623 vbglPhysHeapUnlinkBlock(pBlock);
624
625 dumpheap("post unlink");
626
627 pBlock->fAllocated = false;
628 vbglPhysHeapInsertBlock(NULL, pBlock);
629
630 dumpheap("post insert");
631
632 /*
633 * Check if the block after this one is also free and we can merge it into
634 * this one.
635 *
636 * Because the free list isn't sorted by address we cannot cheaply do the
637 * same for the block before us, so we have to hope for the best for now.
638 */
639 /** @todo When the free:used ration in chunk is too skewed, scan the whole
640 * chunk and merge adjacent blocks that way every so often. Always do so
641 * when this is the last used one and we end up with more than 1 free
642 * node afterwards. */
643 pChunk = pBlock->pChunk;
644 pNeighbour = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + pBlock->cbDataSize);
645 if ( (uintptr_t)pNeighbour <= (uintptr_t)pChunk + pChunk->cbSize - sizeof(*pNeighbour)
646 && !pNeighbour->fAllocated)
647 {
648 /* Adjust size of current memory block */
649 pBlock->cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
650
651 /* Unlink the following node and invalid it. */
652 vbglPhysHeapUnlinkBlock(pNeighbour);
653
654 pNeighbour->u32Signature = ~VBGL_PH_BLOCKSIGNATURE;
655 pNeighbour->cbDataSize = UINT32_MAX / 4;
656
657 dumpheap("post merge");
658 }
659
660 /*
661 * If this chunk is now completely unused, delete it if there are
662 * more completely free ones.
663 */
664 if ( pChunk->acBlocks[1 /*allocated*/] == 0
665 && (pChunk->pPrev || pChunk->pNext))
666 {
667 VBGLPHYSHEAPCHUNK *pCurChunk;
668 uint32_t cUnusedChunks = 0;
669 for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
670 {
671 AssertBreak(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE);
672 if (pCurChunk->acBlocks[1 /*allocated*/] == 0)
673 {
674 cUnusedChunks++;
675 if (cUnusedChunks > 1)
676 {
677 /* Delete current chunk, it will also unlink all free blocks
678 * remaining in the chunk from the free list, so the pBlock
679 * will also be invalid after this.
680 */
681 vbglPhysHeapChunkDelete(pChunk);
682 pNeighbour = pBlock = NULL; /* invalid */
683 break;
684 }
685 }
686 }
687 }
688
689 dumpheap("post free");
690 }
691 else
692 AssertMsgFailed(("pBlock: %p: u32Signature=%#x cb=%#x fAllocated=%d - double free?\n",
693 pBlock, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fAllocated));
694
695 rc = RTSemFastMutexRelease(g_vbgldata.mutexHeap);
696 AssertRC(rc);
697 }
698}
699
700#ifdef IN_TESTCASE /* For the testcase only */
701# include <iprt/err.h>
702
703/**
704 * Returns the sum of all free heap blocks.
705 *
706 * This is the amount of memory you can theoretically allocate if you do
707 * allocations exactly matching the free blocks.
708 *
709 * @returns The size of the free blocks.
710 * @returns 0 if heap was safely detected as being bad.
711 */
712DECLVBGL(size_t) VbglR0PhysHeapGetFreeSize(void)
713{
714 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
715 AssertRCReturn(rc, 0);
716
717 size_t cbTotal = 0;
718 for (VBGLPHYSHEAPBLOCK *pCurBlock = g_vbgldata.pFreeBlocksHead; pCurBlock; pCurBlock = pCurBlock->pNext)
719 {
720 Assert(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
721 cbTotal += pCurBlock->cbDataSize;
722 }
723
724 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
725 return cbTotal;
726}
727
728
729/**
730 * Checks the heap, caller responsible for locking.
731 *
732 * @returns VINF_SUCCESS if okay, error status if not.
733 * @param pErrInfo Where to return more error details, optional.
734 */
735static int vbglR0PhysHeapCheckLocked(PRTERRINFO pErrInfo)
736{
737 /*
738 * Scan the blocks in each chunk.
739 */
740 unsigned acTotalBlocks[2] = { 0, 0 };
741 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead, *pPrevChunk = NULL; pCurChunk; pCurChunk = pCurChunk->pNext)
742 {
743 AssertReturn(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
744 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurChunk=%p: magic=%#x", pCurChunk, pCurChunk->u32Signature));
745 AssertReturn(pCurChunk->pPrev == pPrevChunk,
746 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
747 "pCurChunk=%p: pPrev=%p, expected %p", pCurChunk, pCurChunk->pPrev, pPrevChunk));
748
749 uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbSize;
750 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);
751 unsigned acBlocks[2] = { 0, 0 };
752 while ((uintptr_t)pCurBlock < uEnd)
753 {
754 AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
755 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
756 "pCurBlock=%p: magic=%#x", pCurBlock, pCurBlock->u32Signature));
757 AssertReturn(pCurBlock->pChunk == pCurChunk,
758 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
759 "pCurBlock=%p: pChunk=%p, expected %p", pCurBlock, pCurBlock->pChunk, pCurChunk));
760 AssertReturn( pCurBlock->cbDataSize >= sizeof(void *)
761 && pCurBlock->cbDataSize < _128M
762 && RT_ALIGN_32(pCurBlock->cbDataSize, sizeof(void *)) == pCurBlock->cbDataSize,
763 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
764 "pCurBlock=%p: cbDataSize=%#x", pCurBlock, pCurBlock->cbDataSize));
765 acBlocks[pCurBlock->fAllocated] += 1;
766
767 /* advance */
768 pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbDataSize);
769 }
770 AssertReturn((uintptr_t)pCurBlock == uEnd,
771 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
772 "pCurBlock=%p uEnd=%p", pCurBlock, uEnd));
773
774 acTotalBlocks[1] += acBlocks[1];
775 AssertReturn(acBlocks[1] == (uint32_t)pCurChunk->acBlocks[1],
776 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
777 "pCurChunk=%p: cAllocatedBlocks=%u, expected %u",
778 pCurChunk, pCurChunk->acBlocks[1], acBlocks[1]));
779
780 acTotalBlocks[0] += acBlocks[0];
781 AssertReturn(acBlocks[0] == (uint32_t)pCurChunk->acBlocks[0],
782 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
783 "pCurChunk=%p: cFreeBlocks=%u, expected %u",
784 pCurChunk, pCurChunk->acBlocks[0], acBlocks[0]));
785
786 pPrevChunk = pCurChunk;
787 }
788
789 AssertReturn(acTotalBlocks[0] == (uint32_t)g_vbgldata.acBlocks[0],
790 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
791 "g_vbgldata.acBlocks[0]=%u, expected %u", g_vbgldata.acBlocks[0], acTotalBlocks[0]));
792 AssertReturn(acTotalBlocks[1] == (uint32_t)g_vbgldata.acBlocks[1],
793 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
794 "g_vbgldata.acBlocks[1]=%u, expected %u", g_vbgldata.acBlocks[1], acTotalBlocks[1]));
795
796 /*
797 * Count each list and check that they contain the same number of nodes.
798 */
799 VBGLPHYSHEAPBLOCK *apHeads[2] = { g_vbgldata.pFreeBlocksHead, g_vbgldata.pAllocBlocksHead };
800 for (unsigned iType = 0; iType < RT_ELEMENTS(apHeads); iType++)
801 {
802 unsigned cBlocks = 0;
803 VBGLPHYSHEAPBLOCK *pPrevBlock = NULL;
804 for (VBGLPHYSHEAPBLOCK *pCurBlock = apHeads[iType]; pCurBlock; pCurBlock = pCurBlock->pNext)
805 {
806 AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
807 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
808 "pCurBlock=%p/%u: magic=%#x", pCurBlock, iType, pCurBlock->u32Signature));
809 AssertReturn(pCurBlock->pPrev == pPrevBlock,
810 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
811 "pCurBlock=%p/%u: pPrev=%p, expected %p", pCurBlock, iType, pCurBlock->pPrev, pPrevBlock));
812 AssertReturn(pCurBlock->pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
813 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurBlock=%p/%u: chunk (%p) magic=%#x",
814 pCurBlock, iType, pCurBlock->pChunk, pCurBlock->pChunk->u32Signature));
815 cBlocks++;
816 pPrevBlock = pCurBlock;
817 }
818
819 AssertReturn(cBlocks == acTotalBlocks[iType],
820 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
821 "iType=%u: Found %u in list, expected %u", iType, cBlocks, acTotalBlocks[iType]));
822 }
823
824 return VINF_SUCCESS;
825}
826
827
828/**
829 * Performs a heap check.
830 *
831 * @returns Problem description on failure, NULL on success.
832 * @param pErrInfo Where to return more error details, optional.
833 */
834DECLVBGL(int) VbglR0PhysHeapCheck(PRTERRINFO pErrInfo)
835{
836 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
837 AssertRCReturn(rc, 0);
838
839 rc = vbglR0PhysHeapCheckLocked(pErrInfo);
840
841 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
842 return rc;
843}
844
845#endif /* IN_TESTCASE */
846
847DECLR0VBGL(int) VbglR0PhysHeapInit(void)
848{
849 g_vbgldata.mutexHeap = NIL_RTSEMFASTMUTEX;
850
851 /* Allocate the first chunk of the heap. */
852 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapChunkAlloc(0);
853 if (pBlock)
854 return RTSemFastMutexCreate(&g_vbgldata.mutexHeap);
855 return VERR_NO_CONT_MEMORY;
856}
857
858DECLR0VBGL(void) VbglR0PhysHeapTerminate(void)
859{
860 while (g_vbgldata.pChunkHead)
861 vbglPhysHeapChunkDelete(g_vbgldata.pChunkHead);
862
863 RTSemFastMutexDestroy(g_vbgldata.mutexHeap);
864}
865
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette