VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp@ 97920

Last change on this file since 97920 was 97919, checked in by vboxsync, 2 years ago

Add/VBoxGuestR0LibPhysHeap.cpp: Reduced the size of VBGLPHYSHEAPBLOCK: 64-bit: 40 -> 32 bytes; 32-bit: 24 -> 20 bytes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 25.2 KB
Line 
1/* $Id: VBoxGuestR0LibPhysHeap.cpp 97919 2022-12-30 16:57:42Z vboxsync $ */
2/** @file
3 * VBoxGuestLibR0 - Physical memory heap.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#include "VBoxGuestR0LibInternal.h"
36
37#include <iprt/assert.h>
38#include <iprt/semaphore.h>
39#include <iprt/alloc.h>
40
41/** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap.
42 *
43 * The physical memory heap consists of a doubly linked list of large chunks
44 * (VBGLDATA::pChunkHead), memory blocks are allocated within these chunks and
45 * are members of allocated (VBGLDATA::pAllocBlocksHead) and free
46 * (VBGLDATA::pFreeBlocksHead) doubly linked lists.
47 *
48 * When allocating a block, we search in Free linked list for a suitable free
49 * block. If there is no such block, a new chunk is allocated and the new block
50 * is taken from the new chunk as the only chunk-sized free block. Allocated
51 * block is excluded from the Free list and goes to Alloc list.
52 *
53 * When freeing block, we check the pointer and then exclude block from Alloc
54 * list and move it to free list.
55 *
56 * For each chunk we maintain the allocated blocks counter. If 2 (or more)
57 * entire chunks are free they are immediately deallocated, so we always have at
58 * most 1 free chunk.
59 *
60 * When freeing blocks, two subsequent free blocks are always merged together.
61 * Current implementation merges blocks only when there is a block after the
62 * just freed one.
63 */
64
65
66/*********************************************************************************************************************************
67* Defined Constants And Macros *
68*********************************************************************************************************************************/
69#define VBGL_PH_ASSERT Assert
70#define VBGL_PH_ASSERT_MSG AssertMsg
71
72// #define DUMPHEAP
73
74#ifdef DUMPHEAP
75# define VBGL_PH_dprintf(a) RTAssertMsg2Weak a
76#else
77# define VBGL_PH_dprintf(a)
78#endif
79
80/* Heap block signature */
81#define VBGL_PH_BLOCKSIGNATURE (0xADDBBBBB)
82
83
84/* Heap chunk signature */
85#define VBGL_PH_CHUNKSIGNATURE (0xADDCCCCC)
86/* Heap chunk allocation unit */
87#define VBGL_PH_CHUNKSIZE (0x10000)
88
89/* Heap block bit flags */
90#define VBGL_PH_BF_ALLOCATED (0x1)
91
92/** Threshold at which to split out a tail free block when allocating.
93 *
94 * The value gives the amount of user space, i.e. excluding the header.
95 *
96 * Using 32 bytes based on VMMDev.h request sizes. The smallest requests are 24
97 * bytes, i.e. only the header, at least 4 of these. There are at least 10 with
98 * size 28 bytes and at least 11 with size 32 bytes. So, 32 bytes would fit
99 * some 25 requests out of about 60, which is reasonable.
100 */
101#define VBGL_PH_MIN_SPLIT_FREE_BLOCK 32
102
103
104/*********************************************************************************************************************************
105* Structures and Typedefs *
106*********************************************************************************************************************************/
107/**
108 * A heap block (within a chunk).
109 *
110 * This is used to track a part of a heap chunk that's either free or
111 * allocated. The VBGLPHYSHEAPBLOCK::fAllocated member indicates which it is.
112 */
113struct VBGLPHYSHEAPBLOCK
114{
115 /** Magic value (VBGL_PH_BLOCKSIGNATURE). */
116 uint32_t u32Signature;
117
118 /** Size of user data in the block. Does not include this block header. */
119 uint32_t cbDataSize : 31;
120 /** The top bit indicates whether it's allocated or free. */
121 uint32_t fAllocated : 1;
122
123 /** Pointer to the next block on the list. */
124 VBGLPHYSHEAPBLOCK *pNext;
125 /** Pointer to the previous block on the list. */
126 VBGLPHYSHEAPBLOCK *pPrev;
127 /** Pointer back to the chunk. */
128 VBGLPHYSHEAPCHUNK *pChunk;
129};
130
131/**
132 * A chunk of memory used by the heap for sub-allocations.
133 *
134 * There is a list of these.
135 */
136struct VBGLPHYSHEAPCHUNK
137{
138 /** Magic value (VBGL_PH_CHUNKSIGNATURE). */
139 uint32_t u32Signature;
140
141 /** Size of the chunk. Includes the chunk header. */
142 uint32_t cbSize;
143
144 /** Physical address of the chunk (contiguous). */
145 uint32_t physAddr;
146
147 /** Number of allocated blocks in the chunk */
148 int32_t cAllocatedBlocks;
149
150 /** Pointer to the next chunk. */
151 VBGLPHYSHEAPCHUNK *pNext;
152 /** Pointer to the previous chunk. */
153 VBGLPHYSHEAPCHUNK *pPrev;
154};
155
156
157#ifndef DUMPHEAP
158# define dumpheap(pszWhere) do { } while (0)
159#else
160void dumpheap(const char *pszWhere)
161{
162 VBGL_PH_dprintf(("VBGL_PH dump at '%s'\n", pszWhere));
163
164 VBGL_PH_dprintf(("Chunks:\n"));
165
166 VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead;
167
168 while (pChunk)
169 {
170 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, allocated = %8d, phys = %08X\n",
171 pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbSize, pChunk->cAllocatedBlocks, pChunk->physAddr));
172
173 pChunk = pChunk->pNext;
174 }
175
176 VBGL_PH_dprintf(("Allocated blocks:\n"));
177
178 VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pAllocBlocksHead;
179
180 while (pBlock)
181 {
182 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, %s, pChunk = %p\n",
183 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize,
184 pBlock->fAllocated ? "allocated" : "free", pBlock->pChunk));
185
186 pBlock = pBlock->pNext;
187 }
188
189 VBGL_PH_dprintf(("Free blocks:\n"));
190
191 pBlock = g_vbgldata.pFreeBlocksHead;
192
193 while (pBlock)
194 {
195 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, %s, pChunk = %p\n",
196 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize,
197 pBlock->fAllocated ? "allocated" : "free", pBlock->pChunk));
198
199 pBlock = pBlock->pNext;
200 }
201
202 VBGL_PH_dprintf(("VBGL_PH dump at '%s' done\n", pszWhere));
203}
204#endif
205
206
207DECLINLINE(void *) vbglPhysHeapBlock2Data(VBGLPHYSHEAPBLOCK *pBlock)
208{
209 if (pBlock)
210 return pBlock + 1;
211 return NULL;
212}
213
214
215DECLINLINE(VBGLPHYSHEAPBLOCK *) vbglPhysHeapData2Block(void *pv)
216{
217 if (pv)
218 {
219 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
220 AssertMsgReturn(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
221 ("pBlock->u32Signature = %08X\n", pBlock->u32Signature),
222 NULL);
223 return pBlock;
224 }
225 return NULL;
226}
227
228
229DECLINLINE(int) vbglPhysHeapEnter(void)
230{
231 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
232
233 VBGL_PH_ASSERT_MSG(RT_SUCCESS(rc), ("Failed to request heap mutex, rc = %Rrc\n", rc));
234
235 return rc;
236}
237
238
239DECLINLINE(void) vbglPhysHeapLeave(void)
240{
241 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
242}
243
244
245static void vbglPhysHeapInitBlock(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbDataSize)
246{
247 VBGL_PH_ASSERT(pBlock != NULL);
248 VBGL_PH_ASSERT(pChunk != NULL);
249
250 pBlock->u32Signature = VBGL_PH_BLOCKSIGNATURE;
251 pBlock->cbDataSize = cbDataSize;
252 pBlock->fAllocated = false;
253 pBlock->pNext = NULL;
254 pBlock->pPrev = NULL;
255 pBlock->pChunk = pChunk;
256}
257
258
259static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pInsertAfter, VBGLPHYSHEAPBLOCK *pBlock)
260{
261 VBGL_PH_ASSERT_MSG(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
262 VBGL_PH_ASSERT_MSG(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
263
264 if (pInsertAfter)
265 {
266 pBlock->pNext = pInsertAfter->pNext;
267 pBlock->pPrev = pInsertAfter;
268
269 if (pInsertAfter->pNext)
270 pInsertAfter->pNext->pPrev = pBlock;
271
272 pInsertAfter->pNext = pBlock;
273 }
274 else
275 {
276 /* inserting to head of list */
277 pBlock->pPrev = NULL;
278
279 if (pBlock->fAllocated)
280 {
281 pBlock->pNext = g_vbgldata.pAllocBlocksHead;
282
283 if (g_vbgldata.pAllocBlocksHead)
284 g_vbgldata.pAllocBlocksHead->pPrev = pBlock;
285
286 g_vbgldata.pAllocBlocksHead = pBlock;
287 }
288 else
289 {
290 pBlock->pNext = g_vbgldata.pFreeBlocksHead;
291
292 if (g_vbgldata.pFreeBlocksHead)
293 g_vbgldata.pFreeBlocksHead->pPrev = pBlock;
294
295 g_vbgldata.pFreeBlocksHead = pBlock;
296 }
297 }
298}
299
300
301/**
302 * Unlinks @a pBlock from the chain its on.
303 */
304static void vbglPhysHeapExcludeBlock(VBGLPHYSHEAPBLOCK *pBlock)
305{
306 if (pBlock->pNext)
307 pBlock->pNext->pPrev = pBlock->pPrev;
308 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
309
310 if (pBlock->pPrev)
311 pBlock->pPrev->pNext = pBlock->pNext;
312 else if (pBlock->fAllocated)
313 {
314 Assert(g_vbgldata.pAllocBlocksHead == pBlock);
315 g_vbgldata.pAllocBlocksHead = pBlock->pNext;
316 }
317 else
318 {
319 Assert(g_vbgldata.pFreeBlocksHead == pBlock);
320 g_vbgldata.pFreeBlocksHead = pBlock->pNext;
321 }
322
323 pBlock->pNext = NULL;
324 pBlock->pPrev = NULL;
325}
326
327static VBGLPHYSHEAPBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock)
328{
329 RTCCPHYS PhysAddr = NIL_RTHCPHYS;
330 VBGLPHYSHEAPCHUNK *pChunk;
331 uint32_t cbChunk;
332 VBGL_PH_dprintf(("Allocating new chunk for %#x byte allocation\n", cbMinBlock));
333 AssertReturn(cbMinBlock < _128M, NULL); /* paranoia */
334
335 /* Compute the size of the new chunk, rounding up to next chunk size,
336 which must be power of 2. */
337 Assert(RT_IS_POWER_OF_TWO(VBGL_PH_CHUNKSIZE));
338 cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPBLOCK);
339 cbChunk = RT_ALIGN_32(cbChunk, VBGL_PH_CHUNKSIZE);
340
341 /* This function allocates physical contiguous memory below 4 GB. This 4GB
342 limitation stems from using a 32-bit OUT instruction to pass a block
343 physical address to the host. */
344 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
345 /** @todo retry with smaller size if it fails, treating VBGL_PH_CHUNKSIZE as
346 * a guideline rather than absolute minimum size. */
347 if (pChunk)
348 {
349 VBGLPHYSHEAPCHUNK *pOldHeadChunk;
350 VBGLPHYSHEAPBLOCK *pBlock;
351 AssertRelease(PhysAddr < _4G && PhysAddr + cbChunk <= _4G);
352
353 /* Init the new chunk. */
354 pChunk->u32Signature = VBGL_PH_CHUNKSIGNATURE;
355 pChunk->cbSize = cbChunk;
356 pChunk->physAddr = (uint32_t)PhysAddr;
357 pChunk->cAllocatedBlocks = 0;
358 pChunk->pNext = NULL;
359 pChunk->pPrev = NULL;
360
361 /* Initialize the free block, which now occupies entire chunk. */
362 pBlock = (VBGLPHYSHEAPBLOCK *)(pChunk + 1);
363 vbglPhysHeapInitBlock(pBlock, pChunk, cbChunk - sizeof(VBGLPHYSHEAPCHUNK) - sizeof(VBGLPHYSHEAPBLOCK));
364 vbglPhysHeapInsertBlock(NULL, pBlock);
365
366 /* Add the chunk to the list. */
367 pOldHeadChunk = g_vbgldata.pChunkHead;
368 pChunk->pNext = pOldHeadChunk;
369 if (pOldHeadChunk)
370 pOldHeadChunk->pPrev = pChunk;
371 g_vbgldata.pChunkHead = pChunk;
372
373 VBGL_PH_dprintf(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->cbDataSize));
374 return pBlock;
375 }
376 LogRel(("vbglPhysHeapChunkAlloc: failed to alloc %u (%#x) contiguous bytes.\n", cbChunk, cbChunk));
377 return NULL;
378}
379
380
381static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk)
382{
383 uintptr_t uEnd, uCur;
384 VBGL_PH_ASSERT(pChunk != NULL);
385 VBGL_PH_ASSERT_MSG(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, ("pChunk->u32Signature = %08X\n", pChunk->u32Signature));
386
387 VBGL_PH_dprintf(("Deleting chunk %p size %x\n", pChunk, pChunk->cbSize));
388
389 /* first scan the chunk and exclude (unlink) all blocks from the lists */
390
391 uEnd = (uintptr_t)pChunk + pChunk->cbSize;
392 uCur = (uintptr_t)(pChunk + 1);
393
394 while (uCur < uEnd)
395 {
396 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur;
397
398 uCur += pBlock->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
399
400 vbglPhysHeapExcludeBlock(pBlock);
401 }
402
403 VBGL_PH_ASSERT_MSG(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbSize = %08X\n", uCur, uEnd, pChunk->cbSize));
404
405 /* Exclude chunk from the chunk list */
406 if (pChunk->pNext)
407 pChunk->pNext->pPrev = pChunk->pPrev;
408 /* else: we do not maintain tail pointer. */
409
410 if (pChunk->pPrev)
411 pChunk->pPrev->pNext = pChunk->pNext;
412 else
413 {
414 Assert(g_vbgldata.pChunkHead == pChunk);
415 g_vbgldata.pChunkHead = pChunk->pNext;
416 }
417
418 RTMemContFree(pChunk, pChunk->cbSize);
419}
420
421
422DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cbSize)
423{
424 VBGLPHYSHEAPBLOCK *pBlock, *pIter;
425 int rc;
426
427 /*
428 * Align the size to a pointer size to avoid getting misaligned header pointers and whatnot.
429 */
430 cbSize = RT_ALIGN_32(cbSize, sizeof(void *));
431
432 rc = vbglPhysHeapEnter();
433 if (RT_FAILURE(rc))
434 return NULL;
435
436 dumpheap("pre alloc");
437
438 /*
439 * Search the free list. We do this in linear fashion as we don't expect
440 * there to be many blocks in the heap.
441 */
442
443 pBlock = NULL;
444 if (cbSize <= PAGE_SIZE / 4 * 3)
445 {
446 /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page,
447 so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
448
449 VBGLPHYSHEAPBLOCK *pFallback = NULL;
450 for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
451 if (pIter->cbDataSize >= cbSize)
452 {
453 if (pIter->cbDataSize == cbSize)
454 {
455 if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
456 {
457 pBlock = pIter;
458 break;
459 }
460 pFallback = pIter;
461 }
462 else
463 {
464 if (!pFallback || pIter->cbDataSize < pFallback->cbDataSize)
465 pFallback = pIter;
466 if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
467 if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
468 pBlock = pIter;
469 }
470 }
471
472 if (!pBlock)
473 pBlock = pFallback;
474 }
475 else
476 {
477 /* Large than 3/4 page: Find smallest free list match. */
478
479 for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
480 if (pIter->cbDataSize >= cbSize)
481 {
482 if (pIter->cbDataSize == cbSize)
483 {
484 /* Exact match - we're done! */
485 pBlock = pIter;
486 break;
487 }
488
489 /* Looking for a free block with nearest size. */
490 if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
491 pBlock = pIter;
492 }
493 }
494
495 if (!pBlock)
496 {
497 /* No free blocks, allocate a new chunk, the only free block of the
498 chunk will be returned. */
499 pBlock = vbglPhysHeapChunkAlloc(cbSize);
500 }
501
502 if (pBlock)
503 {
504 VBGL_PH_ASSERT_MSG(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
505 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->u32Signature));
506 VBGL_PH_ASSERT_MSG(!pBlock->fAllocated, ("pBlock = %p\n", pBlock));
507
508 /* We have a free block, either found or allocated. */
509
510 if (pBlock->cbDataSize >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cbSize)
511 {
512 /* Data will occupy less than a half of the block,
513 * split off the tail end into a new free list entry.
514 */
515 pIter = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + cbSize);
516
517 /* Init the new 'pIter' block, initialized blocks are always marked as free. */
518 vbglPhysHeapInitBlock(pIter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof(VBGLPHYSHEAPBLOCK));
519
520 pBlock->cbDataSize = cbSize;
521
522 /* Insert the new 'pIter' block after the 'pBlock' in the free list */
523 vbglPhysHeapInsertBlock(pBlock, pIter);
524 }
525
526 /* Exclude pBlock from free list */
527 vbglPhysHeapExcludeBlock(pBlock);
528
529 /* Mark as allocated */
530 pBlock->fAllocated = true;
531
532 /* Insert to allocated list */
533 vbglPhysHeapInsertBlock(NULL, pBlock);
534
535 /* Adjust the chunk allocated blocks counter */
536 pBlock->pChunk->cAllocatedBlocks++;
537 }
538
539 dumpheap("post alloc");
540
541 vbglPhysHeapLeave();
542 VBGL_PH_dprintf(("VbglR0PhysHeapAlloc %x size %x\n", vbglPhysHeapBlock2Data(pBlock), pBlock->cbDataSize));
543
544 return vbglPhysHeapBlock2Data(pBlock);
545}
546
547DECLR0VBGL(uint32_t) VbglR0PhysHeapGetPhysAddr(void *pv)
548{
549 uint32_t physAddr = 0;
550 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapData2Block(pv);
551
552 if (pBlock)
553 {
554 VBGL_PH_ASSERT_MSG(pBlock->fAllocated, ("pBlock = %p\n", pBlock));
555
556 if (pBlock->fAllocated)
557 physAddr = pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pBlock->pChunk);
558 }
559
560 return physAddr;
561}
562
563DECLR0VBGL(void) VbglR0PhysHeapFree(void *pv)
564{
565 VBGLPHYSHEAPBLOCK *pBlock;
566 VBGLPHYSHEAPBLOCK *pNeighbour;
567 VBGLPHYSHEAPCHUNK *pChunk;
568
569 int rc = vbglPhysHeapEnter();
570 if (RT_FAILURE(rc))
571 return;
572
573 dumpheap ("pre free");
574
575 pBlock = vbglPhysHeapData2Block(pv);
576
577 if (!pBlock)
578 {
579 vbglPhysHeapLeave();
580 return;
581 }
582
583 VBGL_PH_ASSERT_MSG(pBlock->fAllocated, ("pBlock = %p\n", pBlock));
584
585 /* Exclude from allocated list */
586 vbglPhysHeapExcludeBlock(pBlock);
587
588 dumpheap("post exclude");
589
590 VBGL_PH_dprintf(("VbglR0PhysHeapFree %p size %x\n", pv, pBlock->cbDataSize));
591
592 /* Mark as free */
593 pBlock->fAllocated = false;
594
595 /* Insert to free list */
596 vbglPhysHeapInsertBlock(NULL, pBlock);
597
598 dumpheap("post insert");
599
600 /* Adjust the chunk allocated blocks counter */
601 pChunk = pBlock->pChunk;
602 pChunk->cAllocatedBlocks--;
603
604 VBGL_PH_ASSERT(pChunk->cAllocatedBlocks >= 0);
605
606 /* Check if we can merge 2 free blocks. To simplify heap maintenance,
607 * we will look at block after the just freed one.
608 * This will not prevent us from detecting free memory chunks.
609 * Also in most cases blocks are deallocated in reverse allocation order
610 * and in that case the merging will work.
611 */
612 /** @todo r=bird: This simplistic approach is of course not working.
613 * However, since the heap lists aren't sorted in any way, we cannot
614 * cheaply determine where the block before us starts. */
615
616 pNeighbour = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + pBlock->cbDataSize);
617
618 if ( (uintptr_t)pNeighbour < (uintptr_t)pChunk + pChunk->cbSize
619 && !pNeighbour->fAllocated)
620 {
621 /* The next block is free as well. */
622
623 /* Adjust size of current memory block */
624 pBlock->cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
625
626 /* Exclude the next neighbour */
627 vbglPhysHeapExcludeBlock(pNeighbour);
628 }
629
630 dumpheap("post merge");
631
632 /* now check if there are 2 or more free (unused) chunks */
633 if (pChunk->cAllocatedBlocks == 0)
634 {
635 VBGLPHYSHEAPCHUNK *pCurChunk;
636
637 uint32_t cUnusedChunks = 0;
638
639 for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
640 {
641 Assert(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE);
642 if (pCurChunk->cAllocatedBlocks == 0)
643 cUnusedChunks++;
644 }
645
646 if (cUnusedChunks > 1)
647 {
648 /* Delete current chunk, it will also exclude all free blocks
649 * remaining in the chunk from the free list, so the pBlock
650 * will also be invalid after this.
651 */
652 vbglPhysHeapChunkDelete(pChunk);
653 }
654 }
655
656 dumpheap("post free");
657
658 vbglPhysHeapLeave();
659}
660
661#ifdef IN_TESTCASE /* For the testcase only */
662# include <iprt/err.h>
663
664/**
665 * Returns the sum of all free heap blocks.
666 *
667 * This is the amount of memory you can theoretically allocate if you do
668 * allocations exactly matching the free blocks.
669 *
670 * @returns The size of the free blocks.
671 * @returns 0 if heap was safely detected as being bad.
672 */
673DECLVBGL(size_t) VbglR0PhysHeapGetFreeSize(void)
674{
675 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
676 AssertRCReturn(rc, 0);
677
678 size_t cbTotal = 0;
679 for (VBGLPHYSHEAPBLOCK *pCurBlock = g_vbgldata.pFreeBlocksHead; pCurBlock; pCurBlock = pCurBlock->pNext)
680 {
681 Assert(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
682 cbTotal += pCurBlock->cbDataSize;
683 }
684
685 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
686 return cbTotal;
687}
688
689static int vbglR0PhysHeapCheckLocked(PRTERRINFO pErrInfo)
690{
691 /*
692 * Scan the blocks in each chunk.
693 */
694 unsigned cTotalFreeBlocks = 0;
695 unsigned cTotalUsedBlocks = 0;
696 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
697 {
698 AssertReturn(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
699 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurChunk=%p: magic=%#x\n", pCurChunk, pCurChunk->u32Signature));
700
701 uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbSize;
702 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);
703 unsigned cUsedBlocks = 0;
704 while ((uintptr_t)pCurBlock < uEnd)
705 {
706 AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
707 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
708 "pCurBlock=%p: magic=%#x\n", pCurBlock, pCurBlock->u32Signature));
709 AssertReturn(pCurBlock->pChunk == pCurChunk,
710 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
711 "pCurBlock=%p: pChunk=%p, expected %p\n", pCurBlock, pCurBlock->pChunk, pCurChunk));
712 AssertReturn( pCurBlock->cbDataSize >= 8
713 && pCurBlock->cbDataSize < _128M
714 && RT_ALIGN_32(pCurBlock->cbDataSize, sizeof(void *)) == pCurBlock->cbDataSize,
715 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
716 "pCurBlock=%p: cbDataSize=%#x\n", pCurBlock, pCurBlock->cbDataSize));
717 if (pCurBlock->fAllocated)
718 cUsedBlocks += 1;
719 else
720 cTotalFreeBlocks += 1;
721
722 /* advance */
723 pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbDataSize);
724 }
725 AssertReturn((uintptr_t)pCurBlock == uEnd,
726 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
727 "pCurBlock=%p uEnd=%p\n", pCurBlock, uEnd));
728 AssertReturn(cUsedBlocks == (uint32_t)pCurChunk->cAllocatedBlocks,
729 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
730 "pCurChunk=%p: cAllocatedBlocks=%u, expected %u\n",
731 pCurChunk, pCurChunk->cAllocatedBlocks, cUsedBlocks));
732 cTotalUsedBlocks += cUsedBlocks;
733 }
734 return VINF_SUCCESS;
735}
736
737/**
738 * Performs a heap check.
739 *
740 * @returns Problem description on failure, NULL on success.
741 */
742DECLVBGL(int) VbglR0PhysHeapCheck(PRTERRINFO pErrInfo)
743{
744 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
745 AssertRCReturn(rc, 0);
746
747 rc = vbglR0PhysHeapCheckLocked(pErrInfo);
748
749 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
750 return rc;
751}
752
753
754#endif /* IN_TESTCASE */
755
756
757DECLR0VBGL(int) VbglR0PhysHeapInit(void)
758{
759 g_vbgldata.mutexHeap = NIL_RTSEMFASTMUTEX;
760
761 /* Allocate the first chunk of the heap. */
762 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapChunkAlloc(0);
763 if (pBlock)
764 return RTSemFastMutexCreate(&g_vbgldata.mutexHeap);
765 return VERR_NO_MEMORY;
766}
767
768DECLR0VBGL(void) VbglR0PhysHeapTerminate(void)
769{
770 while (g_vbgldata.pChunkHead)
771 vbglPhysHeapChunkDelete(g_vbgldata.pChunkHead);
772
773 RTSemFastMutexDestroy(g_vbgldata.mutexHeap);
774}
775
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette