VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp@ 97923

Last change on this file since 97923 was 97923, checked in by vboxsync, 2 years ago

Add/VBoxGuestR0LibPhysHeap.cpp: Made VbglR0PhysHeapFree and VbglR0PhysHeapGetPhysAddr more strict wrt input. Cleanups.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 30.8 KB
Line 
1/* $Id: VBoxGuestR0LibPhysHeap.cpp 97923 2022-12-30 22:24:20Z vboxsync $ */
2/** @file
3 * VBoxGuestLibR0 - Physical memory heap.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#include "VBoxGuestR0LibInternal.h"
36
37#include <iprt/assert.h>
38#include <iprt/semaphore.h>
39#include <iprt/alloc.h>
40
41/** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap.
42 *
43 * The physical memory heap consists of a doubly linked list of large chunks
44 * (VBGLDATA::pChunkHead), memory blocks are allocated within these chunks and
45 * are members of allocated (VBGLDATA::pAllocBlocksHead) and free
46 * (VBGLDATA::pFreeBlocksHead) doubly linked lists.
47 *
48 * When allocating a block, we search in Free linked list for a suitable free
49 * block. If there is no such block, a new chunk is allocated and the new block
50 * is taken from the new chunk as the only chunk-sized free block. Allocated
51 * block is excluded from the Free list and goes to Alloc list.
52 *
53 * When freeing block, we check the pointer and then exclude block from Alloc
54 * list and move it to free list.
55 *
56 * For each chunk we maintain the allocated blocks counter. If 2 (or more)
57 * entire chunks are free they are immediately deallocated, so we always have at
58 * most 1 free chunk.
59 *
60 * When freeing blocks, two subsequent free blocks are always merged together.
61 * Current implementation merges blocks only when there is a block after the
62 * just freed one.
63 */
64
65
66/*********************************************************************************************************************************
67* Defined Constants And Macros *
68*********************************************************************************************************************************/
69#define VBGL_PH_ASSERT Assert
70#define VBGL_PH_ASSERT_MSG AssertMsg
71
72// #define DUMPHEAP
73
74#ifdef DUMPHEAP
75# define VBGL_PH_dprintf(a) RTAssertMsg2Weak a
76#else
77# define VBGL_PH_dprintf(a)
78#endif
79
80/* Heap block signature */
81#define VBGL_PH_BLOCKSIGNATURE (0xADDBBBBB)
82
83
84/* Heap chunk signature */
85#define VBGL_PH_CHUNKSIGNATURE (0xADDCCCCC)
86/* Heap chunk allocation unit */
87#define VBGL_PH_CHUNKSIZE (0x10000)
88
89/* Heap block bit flags */
90#define VBGL_PH_BF_ALLOCATED (0x1)
91
92/** Threshold at which to split out a tail free block when allocating.
93 *
94 * The value gives the amount of user space, i.e. excluding the header.
95 *
96 * Using 32 bytes based on VMMDev.h request sizes. The smallest requests are 24
97 * bytes, i.e. only the header, at least 4 of these. There are at least 10 with
98 * size 28 bytes and at least 11 with size 32 bytes. So, 32 bytes would fit
99 * some 25 requests out of about 60, which is reasonable.
100 */
101#define VBGL_PH_MIN_SPLIT_FREE_BLOCK 32
102
103
104/*********************************************************************************************************************************
105* Structures and Typedefs *
106*********************************************************************************************************************************/
107/**
108 * A heap block (within a chunk).
109 *
110 * This is used to track a part of a heap chunk that's either free or
111 * allocated. The VBGLPHYSHEAPBLOCK::fAllocated member indicates which it is.
112 */
113struct VBGLPHYSHEAPBLOCK
114{
115 /** Magic value (VBGL_PH_BLOCKSIGNATURE). */
116 uint32_t u32Signature;
117
118 /** Size of user data in the block. Does not include this block header. */
119 uint32_t cbDataSize : 31;
120 /** The top bit indicates whether it's allocated or free. */
121 uint32_t fAllocated : 1;
122
123 /** Pointer to the next block on the list. */
124 VBGLPHYSHEAPBLOCK *pNext;
125 /** Pointer to the previous block on the list. */
126 VBGLPHYSHEAPBLOCK *pPrev;
127 /** Pointer back to the chunk. */
128 VBGLPHYSHEAPCHUNK *pChunk;
129};
130
131/**
132 * A chunk of memory used by the heap for sub-allocations.
133 *
134 * There is a list of these.
135 */
136struct VBGLPHYSHEAPCHUNK
137{
138 /** Magic value (VBGL_PH_CHUNKSIGNATURE). */
139 uint32_t u32Signature;
140
141 /** Size of the chunk. Includes the chunk header. */
142 uint32_t cbSize;
143
144 /** Physical address of the chunk (contiguous). */
145 uint32_t physAddr;
146
147 uint32_t uPadding1;
148
149 /** Indexed by VBGLPHYSHEAPBLOCK::fAllocated, so zero is the free ones and
150 * one the allocated ones. */
151 int32_t acBlocks[2];
152
153 /** Pointer to the next chunk. */
154 VBGLPHYSHEAPCHUNK *pNext;
155 /** Pointer to the previous chunk. */
156 VBGLPHYSHEAPCHUNK *pPrev;
157#if ARCH_BITS == 64
158 /** Pad the size up to 64 bytes. */
159 uintptr_t auPadding2[3];
160#endif
161};
162#if ARCH_BITS == 64
163AssertCompileSize(VBGLPHYSHEAPCHUNK, 64);
164#endif
165
166
167#ifndef DUMPHEAP
168# define dumpheap(pszWhere) do { } while (0)
169#else
170void dumpheap(const char *pszWhere)
171{
172 VBGL_PH_dprintf(("VBGL_PH dump at '%s'\n", pszWhere));
173
174 VBGL_PH_dprintf(("Chunks:\n"));
175
176 VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead;
177
178 while (pChunk)
179 {
180 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, allocated = %8d, phys = %08X\n",
181 pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbSize, pChunk->cAllocatedBlocks, pChunk->physAddr));
182
183 pChunk = pChunk->pNext;
184 }
185
186 VBGL_PH_dprintf(("Allocated blocks:\n"));
187
188 VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pAllocBlocksHead;
189
190 while (pBlock)
191 {
192 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, %s, pChunk = %p\n",
193 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize,
194 pBlock->fAllocated ? "allocated" : "free", pBlock->pChunk));
195
196 pBlock = pBlock->pNext;
197 }
198
199 VBGL_PH_dprintf(("Free blocks:\n"));
200
201 pBlock = g_vbgldata.pFreeBlocksHead;
202
203 while (pBlock)
204 {
205 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, %s, pChunk = %p\n",
206 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize,
207 pBlock->fAllocated ? "allocated" : "free", pBlock->pChunk));
208
209 pBlock = pBlock->pNext;
210 }
211
212 VBGL_PH_dprintf(("VBGL_PH dump at '%s' done\n", pszWhere));
213}
214#endif
215
216
217DECLINLINE(void *) vbglPhysHeapBlock2Data(VBGLPHYSHEAPBLOCK *pBlock)
218{
219 if (pBlock)
220 return pBlock + 1;
221 return NULL;
222}
223
224
225DECLINLINE(int) vbglPhysHeapEnter(void)
226{
227 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
228
229 VBGL_PH_ASSERT_MSG(RT_SUCCESS(rc), ("Failed to request heap mutex, rc = %Rrc\n", rc));
230
231 return rc;
232}
233
234
235DECLINLINE(void) vbglPhysHeapLeave(void)
236{
237 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
238}
239
240
241static void vbglPhysHeapInitBlock(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbDataSize)
242{
243 VBGL_PH_ASSERT(pBlock != NULL);
244 VBGL_PH_ASSERT(pChunk != NULL);
245
246 pBlock->u32Signature = VBGL_PH_BLOCKSIGNATURE;
247 pBlock->cbDataSize = cbDataSize;
248 pBlock->fAllocated = false;
249 pBlock->pNext = NULL;
250 pBlock->pPrev = NULL;
251 pBlock->pChunk = pChunk;
252}
253
254
255/**
256 * Links @a pBlock onto the appropriate chain accoring to @a pInsertAfter or @a
257 * pBlock->fAllocated.
258 *
259 * This also update the per-chunk block counts.
260 */
261static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pInsertAfter, VBGLPHYSHEAPBLOCK *pBlock)
262{
263 bool const fAllocated = pBlock->fAllocated;
264
265 VBGL_PH_ASSERT_MSG(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
266 VBGL_PH_ASSERT_MSG(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
267
268 if (pInsertAfter)
269 {
270 pBlock->pNext = pInsertAfter->pNext;
271 pBlock->pPrev = pInsertAfter;
272
273 if (pInsertAfter->pNext)
274 pInsertAfter->pNext->pPrev = pBlock;
275
276 pInsertAfter->pNext = pBlock;
277 }
278 else
279 {
280 /* inserting to head of list */
281 pBlock->pPrev = NULL;
282
283 if (fAllocated)
284 {
285 pBlock->pNext = g_vbgldata.pAllocBlocksHead;
286
287 if (g_vbgldata.pAllocBlocksHead)
288 g_vbgldata.pAllocBlocksHead->pPrev = pBlock;
289
290 g_vbgldata.pAllocBlocksHead = pBlock;
291 }
292 else
293 {
294 pBlock->pNext = g_vbgldata.pFreeBlocksHead;
295
296 if (g_vbgldata.pFreeBlocksHead)
297 g_vbgldata.pFreeBlocksHead->pPrev = pBlock;
298
299 g_vbgldata.pFreeBlocksHead = pBlock;
300 }
301 }
302
303 /* Update the block counts: */
304 g_vbgldata.acBlocks[fAllocated] += 1;
305 pBlock->pChunk->acBlocks[fAllocated] += 1;
306 AssertMsg( (uint32_t)pBlock->pChunk->acBlocks[fAllocated]
307 <= pBlock->pChunk->cbSize / (sizeof(*pBlock) + sizeof(void *)),
308 ("pChunk=%p: cbSize=%#x acBlocks[%u]=%d\n",
309 pBlock->pChunk, pBlock->pChunk->cbSize, fAllocated, pBlock->pChunk->acBlocks[fAllocated]));
310}
311
312
313/**
314 * Unlinks @a pBlock from the chain it's on.
315 *
316 * This also update the per-chunk block counts.
317 */
318static void vbglPhysHeapExcludeBlock(VBGLPHYSHEAPBLOCK *pBlock)
319{
320 bool const fAllocated = pBlock->fAllocated;
321
322 if (pBlock->pNext)
323 pBlock->pNext->pPrev = pBlock->pPrev;
324 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
325
326 if (pBlock->pPrev)
327 pBlock->pPrev->pNext = pBlock->pNext;
328 else if (fAllocated)
329 {
330 Assert(g_vbgldata.pAllocBlocksHead == pBlock);
331 g_vbgldata.pAllocBlocksHead = pBlock->pNext;
332 }
333 else
334 {
335 Assert(g_vbgldata.pFreeBlocksHead == pBlock);
336 g_vbgldata.pFreeBlocksHead = pBlock->pNext;
337 }
338
339 pBlock->pNext = NULL;
340 pBlock->pPrev = NULL;
341
342 /* Update the per-chunk counts: */
343 g_vbgldata.acBlocks[fAllocated] -= 1;
344 pBlock->pChunk->acBlocks[fAllocated] -= 1;
345 AssertMsg(pBlock->pChunk->acBlocks[fAllocated] >= 0,
346 ("pChunk=%p: cbSize=%#x acBlocks[%u]=%d\n",
347 pBlock->pChunk, pBlock->pChunk->cbSize, fAllocated, pBlock->pChunk->acBlocks[fAllocated]));
348 Assert(g_vbgldata.acBlocks[fAllocated] >= 0);
349}
350
351
352static VBGLPHYSHEAPBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock)
353{
354 RTCCPHYS PhysAddr = NIL_RTHCPHYS;
355 VBGLPHYSHEAPCHUNK *pChunk;
356 uint32_t cbChunk;
357 VBGL_PH_dprintf(("Allocating new chunk for %#x byte allocation\n", cbMinBlock));
358 AssertReturn(cbMinBlock < _128M, NULL); /* paranoia */
359
360 /* Compute the size of the new chunk, rounding up to next chunk size,
361 which must be power of 2. */
362 Assert(RT_IS_POWER_OF_TWO(VBGL_PH_CHUNKSIZE));
363 cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPBLOCK);
364 cbChunk = RT_ALIGN_32(cbChunk, VBGL_PH_CHUNKSIZE);
365
366 /* This function allocates physical contiguous memory below 4 GB. This 4GB
367 limitation stems from using a 32-bit OUT instruction to pass a block
368 physical address to the host. */
369 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
370 /** @todo retry with smaller size if it fails, treating VBGL_PH_CHUNKSIZE as
371 * a guideline rather than absolute minimum size. */
372 if (pChunk)
373 {
374 VBGLPHYSHEAPCHUNK *pOldHeadChunk;
375 VBGLPHYSHEAPBLOCK *pBlock;
376 AssertRelease(PhysAddr < _4G && PhysAddr + cbChunk <= _4G);
377
378 /* Init the new chunk. */
379 pChunk->u32Signature = VBGL_PH_CHUNKSIGNATURE;
380 pChunk->cbSize = cbChunk;
381 pChunk->physAddr = (uint32_t)PhysAddr;
382 pChunk->acBlocks[0] = 0;
383 pChunk->acBlocks[1] = 0;
384 pChunk->pNext = NULL;
385 pChunk->pPrev = NULL;
386 pChunk->uPadding1 = UINT32_C(0xADDCAAA1);
387#if ARCH_BITS == 64
388 pChunk->auPadding2[0] = UINT64_C(0xADDCAAA3ADDCAAA2);
389 pChunk->auPadding2[1] = UINT64_C(0xADDCAAA5ADDCAAA4);
390 pChunk->auPadding2[2] = UINT64_C(0xADDCAAA7ADDCAAA6);
391#endif
392
393 /* Initialize the free block, which now occupies entire chunk. */
394 pBlock = (VBGLPHYSHEAPBLOCK *)(pChunk + 1);
395 vbglPhysHeapInitBlock(pBlock, pChunk, cbChunk - sizeof(VBGLPHYSHEAPCHUNK) - sizeof(VBGLPHYSHEAPBLOCK));
396 vbglPhysHeapInsertBlock(NULL, pBlock);
397
398 /* Add the chunk to the list. */
399 pOldHeadChunk = g_vbgldata.pChunkHead;
400 pChunk->pNext = pOldHeadChunk;
401 if (pOldHeadChunk)
402 pOldHeadChunk->pPrev = pChunk;
403 g_vbgldata.pChunkHead = pChunk;
404
405 VBGL_PH_dprintf(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->cbDataSize));
406 return pBlock;
407 }
408 LogRel(("vbglPhysHeapChunkAlloc: failed to alloc %u (%#x) contiguous bytes.\n", cbChunk, cbChunk));
409 return NULL;
410}
411
412
413static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk)
414{
415 uintptr_t uEnd, uCur;
416 VBGL_PH_ASSERT(pChunk != NULL);
417 VBGL_PH_ASSERT_MSG(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, ("pChunk->u32Signature = %08X\n", pChunk->u32Signature));
418
419 VBGL_PH_dprintf(("Deleting chunk %p size %x\n", pChunk, pChunk->cbSize));
420
421 /* first scan the chunk and exclude (unlink) all blocks from the lists */
422
423 uEnd = (uintptr_t)pChunk + pChunk->cbSize;
424 uCur = (uintptr_t)(pChunk + 1);
425
426 while (uCur < uEnd)
427 {
428 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur;
429
430 uCur += pBlock->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
431
432 vbglPhysHeapExcludeBlock(pBlock);
433 }
434
435 VBGL_PH_ASSERT_MSG(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbSize = %08X\n", uCur, uEnd, pChunk->cbSize));
436
437 /* Exclude chunk from the chunk list */
438 if (pChunk->pNext)
439 pChunk->pNext->pPrev = pChunk->pPrev;
440 /* else: we do not maintain tail pointer. */
441
442 if (pChunk->pPrev)
443 pChunk->pPrev->pNext = pChunk->pNext;
444 else
445 {
446 Assert(g_vbgldata.pChunkHead == pChunk);
447 g_vbgldata.pChunkHead = pChunk->pNext;
448 }
449
450 RTMemContFree(pChunk, pChunk->cbSize);
451}
452
453
454DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cbSize)
455{
456 VBGLPHYSHEAPBLOCK *pBlock, *pIter;
457 int rc;
458
459 /*
460 * Align the size to a pointer size to avoid getting misaligned header pointers and whatnot.
461 */
462 cbSize = RT_ALIGN_32(cbSize, sizeof(void *));
463
464 rc = vbglPhysHeapEnter();
465 if (RT_FAILURE(rc))
466 return NULL;
467
468 dumpheap("pre alloc");
469
470 /*
471 * Search the free list. We do this in linear fashion as we don't expect
472 * there to be many blocks in the heap.
473 */
474
475 /** @todo r=bird: Don't walk these lists for ever, use the block count
476 * statistics to limit the walking to the first X or something. */
477 pBlock = NULL;
478 if (cbSize <= PAGE_SIZE / 4 * 3)
479 {
480 /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page,
481 so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
482
483 VBGLPHYSHEAPBLOCK *pFallback = NULL;
484 for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
485 if (pIter->cbDataSize >= cbSize)
486 {
487 if (pIter->cbDataSize == cbSize)
488 {
489 if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
490 {
491 pBlock = pIter;
492 break;
493 }
494 pFallback = pIter;
495 }
496 else
497 {
498 if (!pFallback || pIter->cbDataSize < pFallback->cbDataSize)
499 pFallback = pIter;
500 if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
501 if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
502 pBlock = pIter;
503 }
504 }
505
506 if (!pBlock)
507 pBlock = pFallback;
508 }
509 else
510 {
511 /* Large than 3/4 page: Find smallest free list match. */
512
513 for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
514 if (pIter->cbDataSize >= cbSize)
515 {
516 if (pIter->cbDataSize == cbSize)
517 {
518 /* Exact match - we're done! */
519 pBlock = pIter;
520 break;
521 }
522
523 /* Looking for a free block with nearest size. */
524 if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
525 pBlock = pIter;
526 }
527 }
528
529 if (!pBlock)
530 {
531 /* No free blocks, allocate a new chunk, the only free block of the
532 chunk will be returned. */
533 pBlock = vbglPhysHeapChunkAlloc(cbSize);
534 }
535
536 if (pBlock)
537 {
538 VBGL_PH_ASSERT_MSG(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
539 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->u32Signature));
540 VBGL_PH_ASSERT_MSG(!pBlock->fAllocated, ("pBlock = %p\n", pBlock));
541
542 /* We have a free block, either found or allocated. */
543
544 if (pBlock->cbDataSize >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cbSize)
545 {
546 /* Data will occupy less than a half of the block,
547 * split off the tail end into a new free list entry.
548 */
549 pIter = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + cbSize);
550
551 /* Init the new 'pIter' block, initialized blocks are always marked as free. */
552 vbglPhysHeapInitBlock(pIter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof(VBGLPHYSHEAPBLOCK));
553
554 pBlock->cbDataSize = cbSize;
555
556 /* Insert the new 'pIter' block after the 'pBlock' in the free list */
557 vbglPhysHeapInsertBlock(pBlock, pIter);
558 }
559
560 /* Exclude pBlock from free list */
561 vbglPhysHeapExcludeBlock(pBlock);
562
563 /* Mark as allocated */
564 pBlock->fAllocated = true;
565
566 /* Insert to allocated list */
567 vbglPhysHeapInsertBlock(NULL, pBlock);
568 }
569
570 dumpheap("post alloc");
571
572 vbglPhysHeapLeave();
573 VBGL_PH_dprintf(("VbglR0PhysHeapAlloc %x size %x\n", vbglPhysHeapBlock2Data(pBlock), pBlock->cbDataSize));
574
575 return vbglPhysHeapBlock2Data(pBlock);
576}
577
578
579DECLR0VBGL(uint32_t) VbglR0PhysHeapGetPhysAddr(void *pv)
580{
581 /*
582 * Validate the incoming pointer.
583 */
584 if (pv != NULL)
585 {
586 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
587 if ( pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE
588 && pBlock->fAllocated)
589 {
590 /*
591 * Calculate and return its physical address.
592 */
593 return pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pBlock->pChunk);
594 }
595
596 AssertMsgFailed(("Use after free or corrupt pointer variable: pv=%p pBlock=%p: u32Signature=%#x cb=%#x fAllocated=%d\n",
597 pv, pBlock, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fAllocated));
598 }
599 else
600 AssertMsgFailed(("Unexpected NULL pointer\n"));
601 return 0;
602}
603
604
605DECLR0VBGL(void) VbglR0PhysHeapFree(void *pv)
606{
607 if (pv != NULL)
608 {
609 VBGLPHYSHEAPBLOCK *pBlock;
610
611 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
612 AssertRCReturnVoid(rc);
613
614 dumpheap("pre free");
615
616 /*
617 * Validate the block header.
618 */
619 pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
620 if ( pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE
621 && pBlock->fAllocated)
622 {
623 VBGLPHYSHEAPCHUNK *pChunk;
624 VBGLPHYSHEAPBLOCK *pNeighbour;
625
626 /*
627 * Move the block from the allocated list to the free list.
628 */
629 VBGL_PH_dprintf(("VbglR0PhysHeapFree: %p size %#x\n", pv, pBlock->cbDataSize));
630 vbglPhysHeapExcludeBlock(pBlock);
631
632 dumpheap("post exclude");
633
634 pBlock->fAllocated = false;
635 vbglPhysHeapInsertBlock(NULL, pBlock);
636
637 dumpheap("post insert");
638
639 /*
640 * Check if the block after this one is also free and we can merge it into
641 * this one.
642 *
643 * Because the free list isn't sorted by address we cannot cheaply do the
644 * same for the block before us, so we have to hope for the best for now.
645 */
646 /** @todo When the free:used ration in chunk is too skewed, scan the whole
647 * chunk and merge adjacent blocks that way every so often. Always do so
648 * when this is the last used one and we end up with more than 1 free
649 * node afterwards. */
650 pChunk = pBlock->pChunk;
651 pNeighbour = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + pBlock->cbDataSize);
652 if ( (uintptr_t)pNeighbour <= (uintptr_t)pChunk + pChunk->cbSize - sizeof(*pNeighbour)
653 && !pNeighbour->fAllocated)
654 {
655 /* Adjust size of current memory block */
656 pBlock->cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
657
658 /* Exclude the next neighbour */
659 vbglPhysHeapExcludeBlock(pNeighbour);
660
661 dumpheap("post merge");
662 }
663
664 /*
665 * If this chunk is now completely unused, delete it if there are
666 * more completely free ones.
667 */
668 if ( pChunk->acBlocks[1 /*allocated*/] == 0
669 && (pChunk->pPrev || pChunk->pNext))
670 {
671 VBGLPHYSHEAPCHUNK *pCurChunk;
672 uint32_t cUnusedChunks = 0;
673 for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
674 {
675 AssertBreak(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE);
676 if (pCurChunk->acBlocks[1 /*allocated*/] == 0)
677 {
678 cUnusedChunks++;
679 if (cUnusedChunks > 1)
680 {
681 /* Delete current chunk, it will also exclude all free blocks
682 * remaining in the chunk from the free list, so the pBlock
683 * will also be invalid after this.
684 */
685 vbglPhysHeapChunkDelete(pChunk);
686 pNeighbour = pBlock = NULL; /* invalid */
687 break;
688 }
689 }
690 }
691 }
692
693 dumpheap("post free");
694 }
695 else
696 AssertMsgFailed(("pBlock: %p: u32Signature=%#x cb=%#x fAllocated=%d - double free?\n",
697 pBlock, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fAllocated));
698
699 rc = RTSemFastMutexRelease(g_vbgldata.mutexHeap);
700 AssertRC(rc);
701 }
702}
703
704#ifdef IN_TESTCASE /* For the testcase only */
705# include <iprt/err.h>
706
707/**
708 * Returns the sum of all free heap blocks.
709 *
710 * This is the amount of memory you can theoretically allocate if you do
711 * allocations exactly matching the free blocks.
712 *
713 * @returns The size of the free blocks.
714 * @returns 0 if heap was safely detected as being bad.
715 */
716DECLVBGL(size_t) VbglR0PhysHeapGetFreeSize(void)
717{
718 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
719 AssertRCReturn(rc, 0);
720
721 size_t cbTotal = 0;
722 for (VBGLPHYSHEAPBLOCK *pCurBlock = g_vbgldata.pFreeBlocksHead; pCurBlock; pCurBlock = pCurBlock->pNext)
723 {
724 Assert(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
725 cbTotal += pCurBlock->cbDataSize;
726 }
727
728 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
729 return cbTotal;
730}
731
732
733/**
734 * Checks the heap, caller responsible for locking.
735 *
736 * @returns VINF_SUCCESS if okay, error status if not.
737 * @param pErrInfo Where to return more error details, optional.
738 */
739static int vbglR0PhysHeapCheckLocked(PRTERRINFO pErrInfo)
740{
741 /*
742 * Scan the blocks in each chunk.
743 */
744 unsigned acTotalBlocks[2] = { 0, 0 };
745 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead, *pPrevChunk = NULL; pCurChunk; pCurChunk = pCurChunk->pNext)
746 {
747 AssertReturn(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
748 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurChunk=%p: magic=%#x", pCurChunk, pCurChunk->u32Signature));
749 AssertReturn(pCurChunk->pPrev == pPrevChunk,
750 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
751 "pCurChunk=%p: pPrev=%p, expected %p", pCurChunk, pCurChunk->pPrev, pPrevChunk));
752
753 uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbSize;
754 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);
755 unsigned acBlocks[2] = { 0, 0 };
756 while ((uintptr_t)pCurBlock < uEnd)
757 {
758 AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
759 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
760 "pCurBlock=%p: magic=%#x", pCurBlock, pCurBlock->u32Signature));
761 AssertReturn(pCurBlock->pChunk == pCurChunk,
762 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
763 "pCurBlock=%p: pChunk=%p, expected %p", pCurBlock, pCurBlock->pChunk, pCurChunk));
764 AssertReturn( pCurBlock->cbDataSize >= sizeof(void *)
765 && pCurBlock->cbDataSize < _128M
766 && RT_ALIGN_32(pCurBlock->cbDataSize, sizeof(void *)) == pCurBlock->cbDataSize,
767 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
768 "pCurBlock=%p: cbDataSize=%#x", pCurBlock, pCurBlock->cbDataSize));
769 acBlocks[pCurBlock->fAllocated] += 1;
770
771 /* advance */
772 pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbDataSize);
773 }
774 AssertReturn((uintptr_t)pCurBlock == uEnd,
775 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
776 "pCurBlock=%p uEnd=%p", pCurBlock, uEnd));
777
778 acTotalBlocks[1] += acBlocks[1];
779 AssertReturn(acBlocks[1] == (uint32_t)pCurChunk->acBlocks[1],
780 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
781 "pCurChunk=%p: cAllocatedBlocks=%u, expected %u",
782 pCurChunk, pCurChunk->acBlocks[1], acBlocks[1]));
783
784 acTotalBlocks[0] += acBlocks[0];
785 AssertReturn(acBlocks[0] == (uint32_t)pCurChunk->acBlocks[0],
786 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_5,
787 "pCurChunk=%p: cFreeBlocks=%u, expected %u",
788 pCurChunk, pCurChunk->acBlocks[0], acBlocks[0]));
789
790 pPrevChunk = pCurChunk;
791 }
792
793 AssertReturn(acTotalBlocks[0] == (uint32_t)g_vbgldata.acBlocks[0],
794 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
795 "g_vbgldata.acBlocks[0]=%u, expected %u", g_vbgldata.acBlocks[0], acTotalBlocks[0]));
796 AssertReturn(acTotalBlocks[1] == (uint32_t)g_vbgldata.acBlocks[1],
797 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR,
798 "g_vbgldata.acBlocks[1]=%u, expected %u", g_vbgldata.acBlocks[1], acTotalBlocks[1]));
799
800 /*
801 * Count each list and check that they contain the same number of nodes.
802 */
803 VBGLPHYSHEAPBLOCK *apHeads[2] = { g_vbgldata.pFreeBlocksHead, g_vbgldata.pAllocBlocksHead };
804 for (unsigned iType = 0; iType < RT_ELEMENTS(apHeads); iType++)
805 {
806 unsigned cBlocks = 0;
807 VBGLPHYSHEAPBLOCK *pPrevBlock = NULL;
808 for (VBGLPHYSHEAPBLOCK *pCurBlock = apHeads[iType]; pCurBlock; pCurBlock = pCurBlock->pNext)
809 {
810 AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
811 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
812 "pCurBlock=%p/%u: magic=%#x", pCurBlock, iType, pCurBlock->u32Signature));
813 AssertReturn(pCurBlock->pPrev == pPrevBlock,
814 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
815 "pCurBlock=%p/%u: pPrev=%p, expected %p", pCurBlock, iType, pCurBlock->pPrev, pPrevBlock));
816 AssertReturn(pCurBlock->pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
817 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurBlock=%p/%u: chunk (%p) magic=%#x",
818 pCurBlock, iType, pCurBlock->pChunk, pCurBlock->pChunk->u32Signature));
819 cBlocks++;
820 pPrevBlock = pCurBlock;
821 }
822
823 AssertReturn(cBlocks == acTotalBlocks[iType],
824 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
825 "iType=%u: Found %u in list, expected %u", iType, cBlocks, acTotalBlocks[iType]));
826 }
827
828 return VINF_SUCCESS;
829}
830
831
832/**
833 * Performs a heap check.
834 *
835 * @returns Problem description on failure, NULL on success.
836 * @param pErrInfo Where to return more error details, optional.
837 */
838DECLVBGL(int) VbglR0PhysHeapCheck(PRTERRINFO pErrInfo)
839{
840 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
841 AssertRCReturn(rc, 0);
842
843 rc = vbglR0PhysHeapCheckLocked(pErrInfo);
844
845 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
846 return rc;
847}
848
849#endif /* IN_TESTCASE */
850
851DECLR0VBGL(int) VbglR0PhysHeapInit(void)
852{
853 g_vbgldata.mutexHeap = NIL_RTSEMFASTMUTEX;
854
855 /* Allocate the first chunk of the heap. */
856 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapChunkAlloc(0);
857 if (pBlock)
858 return RTSemFastMutexCreate(&g_vbgldata.mutexHeap);
859 return VERR_NO_MEMORY;
860}
861
862DECLR0VBGL(void) VbglR0PhysHeapTerminate(void)
863{
864 while (g_vbgldata.pChunkHead)
865 vbglPhysHeapChunkDelete(g_vbgldata.pChunkHead);
866
867 RTSemFastMutexDestroy(g_vbgldata.mutexHeap);
868}
869
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette