VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp@ 1507

Last change on this file since 1507 was 320, checked in by vboxsync, 18 years ago

@todo on a potential bug.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 43.4 KB
Line 
1/* $Id: MMAllHyper.cpp 320 2007-01-25 16:57:39Z vboxsync $ */
2/** @file
3 * MM - Memory Monitor(/Manager) - Hypervisor Memory Area, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_MM_HYPER_HEAP
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include "MMInternal.h"
30#include <VBox/vm.h>
31
32#include <VBox/err.h>
33#include <VBox/param.h>
34#include <iprt/assert.h>
35#include <VBox/log.h>
36#include <iprt/asm.h>
37#include <iprt/string.h>
38
39
40/*******************************************************************************
41* Defined Constants And Macros *
42*******************************************************************************/
43#ifdef DEBUG
44# define MMHYPER_HEAP_STRICT 1
45#endif
46
47#define ASSERT_L(u1, u2) AssertMsg((u1) < (u2), ("u1=%#x u2=%#x\n", u1, u2))
48#define ASSERT_LE(u1, u2) AssertMsg((u1) <= (u2), ("u1=%#x u2=%#x\n", u1, u2))
49#define ASSERT_GE(u1, u2) AssertMsg((u1) >= (u2), ("u1=%#x u2=%#x\n", u1, u2))
50#define ASSERT_ALIGN(u1) AssertMsg(!((u1) & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("u1=%#x (%d)\n", u1, u1))
51
52#define ASSERT_OFFPREV(pHeap, pChunk) \
53 do { Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) <= 0); \
54 Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) >= (intptr_t)CTXSUFF((pHeap)->pbHeap) - (intptr_t)(pChunk)); \
55 AssertMsg( MMHYPERCHUNK_GET_OFFPREV(pChunk) != 0 \
56 || (uint8_t *)(pChunk) == CTXSUFF((pHeap)->pbHeap), \
57 ("pChunk=%p pvHyperHeap=%p\n", (pChunk), CTXSUFF((pHeap)->pbHeap))); \
58 } while (0)
59
60#define ASSERT_OFFNEXT(pHeap, pChunk) \
61 do { ASSERT_ALIGN((pChunk)->offNext); \
62 ASSERT_L((pChunk)->offNext, (uintptr_t)CTXSUFF((pHeap)->pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
63 } while (0)
64
65#define ASSERT_OFFHEAP(pHeap, pChunk) \
66 do { Assert((pChunk)->offHeap); \
67 AssertMsg((PMMHYPERHEAP)((pChunk)->offHeap + (uintptr_t)pChunk) == (pHeap), \
68 ("offHeap=%RX32 pChunk=%p pHeap=%p\n", (pChunk)->offHeap, (pChunk), (pHeap))); \
69 Assert((pHeap)->u32Magic == MMHYPERHEAP_MAGIC); \
70 } while (0)
71
72#ifdef VBOX_WITH_STATISTICS
73#define ASSERT_OFFSTAT(pHeap, pChunk) \
74 do { if (MMHYPERCHUNK_ISFREE(pChunk)) \
75 Assert(!(pChunk)->offStat); \
76 else if ((pChunk)->offStat) \
77 { \
78 Assert((pChunk)->offStat); \
79 AssertMsg(!((pChunk)->offStat & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("offStat=%RX32\n", (pChunk)->offStat)); \
80 uintptr_t uPtr = (uintptr_t)(pChunk)->offStat + (uintptr_t)pChunk; NOREF(uPtr); \
81 AssertMsg(uPtr - (uintptr_t)CTXSUFF((pHeap)->pbHeap) < (pHeap)->offPageAligned, \
82 ("%p - %p < %RX32\n", uPtr, CTXSUFF((pHeap)->pbHeap), (pHeap)->offPageAligned)); \
83 } \
84 } while (0)
85#else
86#define ASSERT_OFFSTAT(pHeap, pChunk) \
87 do { Assert(!(pChunk)->offStat); \
88 } while (0)
89#endif
90
91#define ASSERT_CHUNK(pHeap, pChunk) \
92 do { ASSERT_OFFNEXT(pHeap, pChunk); \
93 ASSERT_OFFPREV(pHeap, pChunk); \
94 ASSERT_OFFHEAP(pHeap, pChunk); \
95 ASSERT_OFFSTAT(pHeap, pChunk); \
96 } while (0)
97#define ASSERT_CHUNK_USED(pHeap, pChunk) \
98 do { ASSERT_OFFNEXT(pHeap, pChunk); \
99 ASSERT_OFFPREV(pHeap, pChunk); \
100 Assert(MMHYPERCHUNK_ISUSED(pChunk)); \
101 } while (0)
102
103#define ASSERT_FREE_OFFPREV(pHeap, pChunk) \
104 do { ASSERT_ALIGN((pChunk)->offPrev); \
105 ASSERT_GE(((pChunk)->offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)), (intptr_t)CTXSUFF((pHeap)->pbHeap) - (intptr_t)(pChunk)); \
106 Assert((pChunk)->offPrev != MMHYPERCHUNK_GET_OFFPREV(&(pChunk)->core) || !(pChunk)->offPrev); \
107 AssertMsg( (pChunk)->offPrev \
108 || (uintptr_t)(pChunk) - (uintptr_t)CTXSUFF((pHeap)->pbHeap) == (pHeap)->offFreeHead, \
109 ("pChunk=%p offChunk=%#x offFreeHead=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)CTXSUFF((pHeap)->pbHeap),\
110 (pHeap)->offFreeHead)); \
111 } while (0)
112
113#define ASSERT_FREE_OFFNEXT(pHeap, pChunk) \
114 do { ASSERT_ALIGN((pChunk)->offNext); \
115 ASSERT_L((pChunk)->offNext, (uintptr_t)CTXSUFF((pHeap)->pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
116 Assert((pChunk)->offNext != (pChunk)->core.offNext || !(pChunk)->offNext); \
117 AssertMsg( (pChunk)->offNext \
118 || (uintptr_t)(pChunk) - (uintptr_t)CTXSUFF((pHeap)->pbHeap) == (pHeap)->offFreeTail, \
119 ("pChunk=%p offChunk=%#x offFreeTail=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)CTXSUFF((pHeap)->pbHeap), \
120 (pHeap)->offFreeTail)); \
121 } while (0)
122
123#define ASSERT_FREE_CB(pHeap, pChunk) \
124 do { ASSERT_ALIGN((pChunk)->cb); \
125 Assert((pChunk)->cb > 0); \
126 if ((pChunk)->core.offNext) \
127 AssertMsg((pChunk)->cb == ((pChunk)->core.offNext - sizeof(MMHYPERCHUNK)), \
128 ("cb=%d offNext=%d\n", (pChunk)->cb, (pChunk)->core.offNext)); \
129 else \
130 ASSERT_LE((pChunk)->cb, (uintptr_t)CTXSUFF((pHeap)->pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
131 } while (0)
132
133#define ASSERT_CHUNK_FREE(pHeap, pChunk) \
134 do { ASSERT_CHUNK(pHeap, &(pChunk)->core); \
135 Assert(MMHYPERCHUNK_ISFREE(pChunk)); \
136 ASSERT_FREE_OFFNEXT(pHeap, pChunk); \
137 ASSERT_FREE_OFFPREV(pHeap, pChunk); \
138 ASSERT_FREE_CB(pHeap, pChunk); \
139 } while (0)
140
141
142/*******************************************************************************
143* Internal Functions *
144*******************************************************************************/
145static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment);
146static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb);
147#ifdef VBOX_WITH_STATISTICS
148static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag);
149#ifdef IN_RING3
150static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat);
151#endif
152#endif
153static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk);
154#ifdef MMHYPER_HEAP_STRICT
155static void mmr3HyperHeapCheck(PMMHYPERHEAP pHeap);
156#endif
157
158
159/**
160 * Allocates memory in the Hypervisor (GC VMM) area.
161 * The returned memory is of course zeroed.
162 *
163 * @returns VBox status code.
164 * @param pVM The VM to operate on.
165 * @param cb Number of bytes to allocate.
166 * @param uAlignment Required memory alignment in bytes.
167 * Values are 0,8,16,32 and PAGE_SIZE.
168 * 0 -> default alignment, i.e. 8 bytes.
169 * @param enmTag The statistics tag.
170 * @param ppv Where to store the address to the allocated
171 * memory.
172 * @remark This is assumed not to be used at times when serialization is required.
173 */
174MMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
175{
176 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
177
178 /*
179 * Validate input and adjust it to reasonable values.
180 */
181 if (!uAlignment || uAlignment < MMHYPER_HEAP_ALIGN_MIN)
182 uAlignment = MMHYPER_HEAP_ALIGN_MIN;
183 uint32_t cbAligned;
184 switch (uAlignment)
185 {
186 case 8:
187 case 16:
188 case 32:
189 cbAligned = RT_ALIGN(cb, MMHYPER_HEAP_ALIGN_MIN);
190 if (!cbAligned || cbAligned < cb)
191 {
192 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
193 AssertMsgFailed(("Nice try.\n"));
194 return VERR_INVALID_PARAMETER;
195 }
196 break;
197
198 case PAGE_SIZE:
199 AssertMsg(RT_ALIGN(cb, PAGE_SIZE) == cb, ("The size isn't page aligned. (cb=%#x)\n", cb));
200 cbAligned = RT_ALIGN(cb, PAGE_SIZE);
201 if (!cbAligned)
202 {
203 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
204 AssertMsgFailed(("Nice try.\n"));
205 return VERR_INVALID_PARAMETER;
206 }
207 break;
208
209 default:
210 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
211 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
212 return VERR_INVALID_PARAMETER;
213 }
214
215
216 /*
217 * Get heap and statisticsStatistics.
218 */
219 PMMHYPERHEAP pHeap = CTXSUFF(pVM->mm.s.pHyperHeap);
220#ifdef VBOX_WITH_STATISTICS
221 PMMHYPERSTAT pStat = mmHyperStat(pHeap, enmTag);
222 if (!pStat)
223 {
224 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
225 AssertMsgFailed(("Failed to allocate statistics!\n"));
226 return VERR_MM_HYPER_NO_MEMORY;
227 }
228#endif
229 if (uAlignment < PAGE_SIZE)
230 {
231 /*
232 * Allocate a chunk.
233 */
234 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, cbAligned, uAlignment);
235 if (pChunk)
236 {
237#ifdef VBOX_WITH_STATISTICS
238 const uint32_t cbChunk = pChunk->offNext
239 ? pChunk->offNext
240 : CTXSUFF(pHeap->pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
241 pStat->cbAllocated += (uint32_t)cbChunk;
242 pStat->cbCurAllocated += (uint32_t)cbChunk;
243 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
244 pStat->cbMaxAllocated = pStat->cbCurAllocated;
245 pStat->cAllocations++;
246 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
247#else
248 pChunk->offStat = 0;
249#endif
250 void *pv = pChunk + 1;
251 *ppv = pv;
252 ASMMemZero32(pv, cbAligned);
253 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, pv));
254 return VINF_SUCCESS;
255 }
256 }
257 else
258 {
259 /*
260 * Allocate page aligned memory.
261 */
262 void *pv = mmHyperAllocPages(pHeap, cbAligned);
263 if (pv)
264 {
265#ifdef VBOX_WITH_STATISTICS
266 pStat->cbAllocated += cbAligned;
267 pStat->cbCurAllocated += cbAligned;
268 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
269 pStat->cbMaxAllocated = pStat->cbCurAllocated;
270 pStat->cAllocations++;
271#endif
272 *ppv = pv;
273 /* ASMMemZero32(pv, cbAligned); - not required since memory is alloc-only and SUPPageAlloc zeros it. */
274 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, ppv));
275 return VINF_SUCCESS;
276 }
277 }
278
279#ifdef VBOX_WITH_STATISTICS
280 pStat->cAllocations++;
281 pStat->cFailures++;
282#endif
283 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
284 AssertMsgFailed(("Failed to allocate %d bytes!\n", cb));
285 return VERR_MM_HYPER_NO_MEMORY;
286}
287
288
289
290/**
291 * Allocates a chunk of memory from the specified heap.
292 * The caller validates the parameters of this request.
293 *
294 * @returns Pointer to the allocated chunk.
295 * @returns NULL on failure.
296 * @param pHeap The heap.
297 * @param cb Size of the memory block to allocate.
298 * @param uAlignment The alignment specifications for the allocated block.
299 * @internal
300 */
301static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment)
302{
303 Log3(("mmHyperAllocChunk: Enter cb=%#x uAlignment=%#x\n", cb, uAlignment));
304#ifdef MMHYPER_HEAP_STRICT
305 mmr3HyperHeapCheck(pHeap);
306#endif
307
308 /*
309 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
310 */
311 if (pHeap->offFreeHead == NIL_OFFSET)
312 return NULL;
313
314 /*
315 * Small alignments - from the front of the heap.
316 *
317 * Must split off free chunks at the end to prevent messing up the
318 * last free node which we take the page aligned memory from the top of.
319 */
320 PMMHYPERCHUNK pRet = NULL;
321 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)CTXSUFF(pHeap->pbHeap) + pHeap->offFreeHead);
322 while (pFree)
323 {
324 ASSERT_CHUNK_FREE(pHeap, pFree);
325 if (pFree->cb >= cb)
326 {
327 unsigned offAlign = (uintptr_t)(&pFree->core + 1) & (uAlignment - 1);
328 if (offAlign)
329 offAlign = uAlignment - offAlign;
330 if (!offAlign || pFree->cb - offAlign >= cb)
331 {
332 Log3(("mmHyperAllocChunk: Using pFree=%p pFree->cb=%d offAlign=%d\n", pFree, pFree->cb, offAlign));
333
334 /*
335 * Adjust the node in front.
336 * Because of multiple alignments we need to special case allocation of the first block.
337 */
338 if (offAlign)
339 {
340 MMHYPERCHUNKFREE Free = *pFree;
341 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
342 {
343 /* just add a bit of memory to it. */
344 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&Free.core));
345 pPrev->core.offNext += offAlign;
346 AssertMsg(!MMHYPERCHUNK_ISFREE(&pPrev->core), ("Impossible!\n"));
347 Log3(("mmHyperAllocChunk: Added %d bytes to %p\n", offAlign, pPrev));
348 }
349 else
350 {
351 /* make new head node, mark it USED for simplisity. */
352 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)CTXSUFF(pHeap->pbHeap);
353 Assert(pPrev == &pFree->core);
354 pPrev->offPrev = 0;
355 MMHYPERCHUNK_SET_TYPE(pPrev, MMHYPERCHUNK_FLAGS_USED);
356 pPrev->offNext = offAlign;
357 Log3(("mmHyperAllocChunk: Created new first node of %d bytes\n", offAlign));
358
359 }
360 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - offAlign, -(int)offAlign));
361 pHeap->cbFree -= offAlign;
362
363 /* Recreate pFree node and adjusting everything... */
364 pFree = (PMMHYPERCHUNKFREE)((char *)pFree + offAlign);
365 *pFree = Free;
366
367 pFree->cb -= offAlign;
368 if (pFree->core.offNext)
369 {
370 pFree->core.offNext -= offAlign;
371 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
372 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
373 ASSERT_CHUNK(pHeap, pNext);
374 }
375 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
376 MMHYPERCHUNK_SET_OFFPREV(&pFree->core, MMHYPERCHUNK_GET_OFFPREV(&pFree->core) - offAlign);
377
378 if (pFree->offNext)
379 {
380 pFree->offNext -= offAlign;
381 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
382 pNext->offPrev = -(int32_t)pFree->offNext;
383 ASSERT_CHUNK_FREE(pHeap, pNext);
384 }
385 else
386 pHeap->offFreeTail += offAlign;
387 if (pFree->offPrev)
388 {
389 pFree->offPrev -= offAlign;
390 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
391 pPrev->offNext = -pFree->offPrev;
392 ASSERT_CHUNK_FREE(pHeap, pPrev);
393 }
394 else
395 pHeap->offFreeHead += offAlign;
396 pFree->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pFree;
397 pFree->core.offStat = 0;
398 ASSERT_CHUNK_FREE(pHeap, pFree);
399 Log3(("mmHyperAllocChunk: Realigned pFree=%p\n", pFree));
400 }
401
402 /*
403 * Split off a new FREE chunk?
404 */
405 if (pFree->cb >= cb + RT_ALIGN(sizeof(MMHYPERCHUNKFREE), MMHYPER_HEAP_ALIGN_MIN))
406 {
407 /*
408 * Move the FREE chunk up to make room for the new USED chunk.
409 */
410 const int off = cb + sizeof(MMHYPERCHUNK);
411 PMMHYPERCHUNKFREE pNew = (PMMHYPERCHUNKFREE)((char *)&pFree->core + off);
412 *pNew = *pFree;
413 pNew->cb -= off;
414 if (pNew->core.offNext)
415 {
416 pNew->core.offNext -= off;
417 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pNew + pNew->core.offNext);
418 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pNew->core.offNext);
419 ASSERT_CHUNK(pHeap, pNext);
420 }
421 pNew->core.offPrev = -off;
422 MMHYPERCHUNK_SET_TYPE(pNew, MMHYPERCHUNK_FLAGS_FREE);
423
424 if (pNew->offNext)
425 {
426 pNew->offNext -= off;
427 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offNext);
428 pNext->offPrev = -(int32_t)pNew->offNext;
429 ASSERT_CHUNK_FREE(pHeap, pNext);
430 }
431 else
432 pHeap->offFreeTail += off;
433 if (pNew->offPrev)
434 {
435 pNew->offPrev -= off;
436 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offPrev);
437 pPrev->offNext = -pNew->offPrev;
438 ASSERT_CHUNK_FREE(pHeap, pPrev);
439 }
440 else
441 pHeap->offFreeHead += off;
442 pNew->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pNew;
443 pNew->core.offStat = 0;
444 ASSERT_CHUNK_FREE(pHeap, pNew);
445
446 /*
447 * Update the old FREE node making it a USED node.
448 */
449 pFree->core.offNext = off;
450 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
451
452
453 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
454 pHeap->cbFree - (cb + sizeof(MMHYPERCHUNK)), -(int)(cb + sizeof(MMHYPERCHUNK))));
455 pHeap->cbFree -= (uint32_t)(cb + sizeof(MMHYPERCHUNK));
456 pRet = &pFree->core;
457 ASSERT_CHUNK(pHeap, &pFree->core);
458 Log3(("mmHyperAllocChunk: Created free chunk pNew=%p cb=%d\n", pNew, pNew->cb));
459 }
460 else
461 {
462 /*
463 * Link out of free list.
464 */
465 if (pFree->offNext)
466 {
467 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
468 if (pFree->offPrev)
469 {
470 pNext->offPrev += pFree->offPrev;
471 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
472 pPrev->offNext += pFree->offNext;
473 ASSERT_CHUNK_FREE(pHeap, pPrev);
474 }
475 else
476 {
477 pHeap->offFreeHead += pFree->offNext;
478 pNext->offPrev = 0;
479 }
480 ASSERT_CHUNK_FREE(pHeap, pNext);
481 }
482 else
483 {
484 if (pFree->offPrev)
485 {
486 pHeap->offFreeTail += pFree->offPrev;
487 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
488 pPrev->offNext = 0;
489 ASSERT_CHUNK_FREE(pHeap, pPrev);
490 }
491 else
492 {
493 pHeap->offFreeHead = NIL_OFFSET;
494 pHeap->offFreeTail = NIL_OFFSET;
495 }
496 }
497
498 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
499 pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
500 pHeap->cbFree -= pFree->cb;
501 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
502 pRet = &pFree->core;
503 ASSERT_CHUNK(pHeap, &pFree->core);
504 Log3(("mmHyperAllocChunk: Converted free chunk %p to used chunk.\n", pFree));
505 }
506 Log3(("mmHyperAllocChunk: Returning %p\n", pRet));
507 break;
508 }
509 }
510
511 /* next */
512 pFree = pFree->offNext ? (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext) : NULL;
513 }
514
515#ifdef MMHYPER_HEAP_STRICT
516 mmr3HyperHeapCheck(pHeap);
517#endif
518 return pRet;
519}
520
521
522/**
523 * Allocates one or more pages of memory from the specified heap.
524 * The caller validates the parameters of this request.
525 *
526 * @returns Pointer to the allocated chunk.
527 * @returns NULL on failure.
528 * @param pHeap The heap.
529 * @param cb Size of the memory block to allocate.
530 * @internal
531 */
532static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb)
533{
534 Log3(("mmHyperAllocPages: Enter cb=%#x\n", cb));
535
536#ifdef MMHYPER_HEAP_STRICT
537 mmr3HyperHeapCheck(pHeap);
538#endif
539
540 /*
541 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
542 */
543 if (pHeap->offFreeHead == NIL_OFFSET)
544 return NULL;
545
546 /*
547 * Page aligned chunks.
548 *
549 * Page aligned chunks can only be allocated from the last FREE chunk.
550 * This is for reasons of simplicity and fragmentation. Page aligned memory
551 * must also be allocated in page aligned sizes. Page aligned memory cannot
552 * be freed either.
553 *
554 * So, for this to work, the last FREE chunk needs to end on a page aligned
555 * boundrary.
556 */
557 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)CTXSUFF(pHeap->pbHeap) + pHeap->offFreeTail);
558 ASSERT_CHUNK_FREE(pHeap, pFree);
559 if ( (((uintptr_t)(&pFree->core + 1) + pFree->cb) & PAGE_OFFSET_MASK - 1)
560 || pFree->cb + sizeof(MMHYPERCHUNK) < cb)
561 {
562 Log3(("mmHyperAllocPages: Not enough/no page aligned memory!\n"));
563 return NULL;
564 }
565
566 void *pvRet;
567 if (pFree->cb > cb)
568 {
569 /*
570 * Simple, just cut the top of the free node and return it.
571 */
572 pFree->cb -= cb;
573 pvRet = (char *)(&pFree->core + 1) + pFree->cb;
574 AssertMsg(ALIGNP(pvRet, PAGE_SIZE) == pvRet, ("pvRet=%p cb=%#x pFree=%p pFree->cb=%#x\n", pvRet, cb, pFree, pFree->cb));
575 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - cb, -(int)cb));
576 pHeap->cbFree -= cb;
577 ASSERT_CHUNK_FREE(pHeap, pFree);
578 Log3(("mmHyperAllocPages: Allocated from pFree=%p new pFree->cb=%d\n", pFree, pFree->cb));
579 }
580 else
581 {
582 /*
583 * Unlink the FREE node.
584 */
585 pvRet = (char *)(&pFree->core + 1) + pFree->cb - cb;
586 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
587 pHeap->cbFree -= pFree->cb;
588
589 /* a scrap of spare memory (unlikely)? add it to the sprevious chunk. */
590 if (pvRet != (void *)pFree)
591 {
592 AssertMsg(MMHYPERCHUNK_GET_OFFPREV(&pFree->core), ("How the *beep* did someone manage to allocated up all the heap with page aligned memory?!?\n"));
593 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&pFree->core));
594 pPrev->offNext += (uintptr_t)pvRet - (uintptr_t)pFree;
595 AssertMsg(!MMHYPERCHUNK_ISFREE(pPrev), ("Free bug?\n"));
596#ifdef VBOX_WITH_STATISTICS
597 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pPrev + pPrev->offStat);
598 pStat->cbAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
599 pStat->cbCurAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
600#endif
601 Log3(("mmHyperAllocPages: Added %d to %p (page align)\n", (uintptr_t)pvRet - (uintptr_t)pFree, pFree));
602 }
603
604 /* unlink from FREE chain. */
605 if (pFree->offPrev)
606 {
607 pHeap->offFreeTail += pFree->offPrev;
608 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev))->offNext = 0;
609 }
610 else
611 {
612 pHeap->offFreeTail = NIL_OFFSET;
613 pHeap->offFreeHead = NIL_OFFSET;
614 }
615 Log3(("mmHyperAllocPages: Unlinked pFree=%d\n", pFree));
616 }
617 pHeap->offPageAligned = (uintptr_t)pvRet - (uintptr_t)CTXSUFF(pHeap->pbHeap);
618 Log3(("mmHyperAllocPages: Returning %p (page aligned)\n", pvRet));
619
620#ifdef MMHYPER_HEAP_STRICT
621 mmr3HyperHeapCheck(pHeap);
622#endif
623 return pvRet;
624}
625
626
627#ifdef VBOX_WITH_STATISTICS
628/**
629 * Get the statistic record for a tag.
630 *
631 * @returns Pointer to a stat record.
632 * @returns NULL on failure.
633 * @param pHeap The heap.
634 * @param enmTag The tag.
635 */
636static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag)
637{
638 /* try look it up first. */
639 PMMHYPERSTAT pStat = (PMMHYPERSTAT)RTAvloGCPhysGet(&pHeap->HyperHeapStatTree, enmTag);
640 if (!pStat)
641 {
642 /* try allocate a new one */
643 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, RT_ALIGN(sizeof(*pStat), MMHYPER_HEAP_ALIGN_MIN), MMHYPER_HEAP_ALIGN_MIN);
644 if (!pChunk)
645 return NULL;
646 pStat = (PMMHYPERSTAT)(pChunk + 1);
647 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
648
649 ASMMemZero32(pStat, sizeof(*pStat));
650 pStat->Core.Key = enmTag;
651 RTAvloGCPhysInsert(&pHeap->HyperHeapStatTree, &pStat->Core);
652 }
653 if (!pStat->fRegistered)
654 {
655#ifdef IN_RING3
656 mmR3HyperStatRegisterOne(pHeap->pVMHC, pStat);
657#else
658 /** @todo schedule a HC action. */
659#endif
660 }
661 return pStat;
662}
663
664#ifdef IN_RING3
665/**
666 * Registers statistics with STAM.
667 *
668 */
669static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat)
670{
671 if (pStat->fRegistered)
672 return;
673 const char *pszTag = mmR3GetTagName((MMTAG)pStat->Core.Key);
674
675 char szName[128];
676 RTStrPrintf(szName, sizeof(szName), "/MM/HyperHeap/%s/cAllocations", pszTag);
677 STAMR3Register(pVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_COUNT, "Number of alloc calls.");
678
679 RTStrPrintf(szName, sizeof(szName), "/MM/HyperHeap/%s/cFrees", pszTag);
680 STAMR3Register(pVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_COUNT, "Number of free calls.");
681
682 RTStrPrintf(szName, sizeof(szName), "/MM/HyperHeap/%s/cFailures", pszTag);
683 STAMR3Register(pVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_COUNT, "Number of failures.");
684
685 RTStrPrintf(szName, sizeof(szName), "/MM/HyperHeap/%s/cbAllocated", pszTag);
686 STAMR3Register(pVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_BYTES, "Total number of allocated bytes.");
687
688 RTStrPrintf(szName, sizeof(szName), "/MM/HyperHeap/%s/cbFreed", pszTag);
689 STAMR3Register(pVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_BYTES, "Total number of freed bytes.");
690
691 RTStrPrintf(szName, sizeof(szName), "/MM/HyperHeap/%s/cbCurAllocated", pszTag);
692 STAMR3Register(pVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_BYTES, "Number of bytes currently allocated.");
693
694 RTStrPrintf(szName, sizeof(szName), "/MM/HyperHeap/%s/cbMaxAllocated", pszTag);
695 STAMR3Register(pVM, &pStat->cbMaxAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, szName, STAMUNIT_BYTES, "Max number of bytes allocated at the same time.");
696
697 pStat->fRegistered = true;
698}
699#endif
700
701#endif
702
703
704/**
705 * Free memory allocated using MMHyperAlloc().
706 * The caller validates the parameters of this request.
707 *
708 * @returns VBox status code.
709 * @param pVM The VM to operate on.
710 * @param pv The memory to free.
711 * @remark Try avoid free hyper memory.
712 */
713MMDECL(int) MMHyperFree(PVM pVM, void *pv)
714{
715 Log2(("MMHyperFree: pv=%p\n", pv));
716 if (!pv)
717 return VINF_SUCCESS;
718 AssertMsgReturn(ALIGNP(pv, MMHYPER_HEAP_ALIGN_MIN) == pv,
719 ("Invalid pointer %p!\n", pv),
720 VERR_INVALID_POINTER);
721
722 /*
723 * Get the heap and stats.
724 * Validate the chunk at the same time.
725 */
726 PMMHYPERCHUNK pChunk = (PMMHYPERCHUNK)((PMMHYPERCHUNK)pv - 1);
727
728 AssertMsgReturn( (uintptr_t)pChunk + pChunk->offNext >= (uintptr_t)pChunk
729 || RT_ALIGN_32(pChunk->offNext, MMHYPER_HEAP_ALIGN_MIN) != pChunk->offNext,
730 ("%p: offNext=%#RX32\n", pv, pChunk->offNext),
731 VERR_INVALID_POINTER);
732
733 AssertMsgReturn(MMHYPERCHUNK_ISUSED(pChunk),
734 ("%p: Not used!\n", pv),
735 VERR_INVALID_POINTER);
736
737 int32_t offPrev = MMHYPERCHUNK_GET_OFFPREV(pChunk);
738 AssertMsgReturn( (uintptr_t)pChunk + offPrev <= (uintptr_t)pChunk
739 && !((uint32_t)-offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)),
740 ("%p: offPrev=%#RX32!\n", pv, offPrev),
741 VERR_INVALID_POINTER);
742
743 /* statistics */
744#ifdef VBOX_WITH_STATISTICS
745 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pChunk + pChunk->offStat);
746 AssertMsgReturn( ALIGNP(pStat, MMHYPER_HEAP_ALIGN_MIN) == (void *)pStat
747 && pChunk->offStat,
748 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
749 VERR_INVALID_POINTER);
750#else
751 AssertMsgReturn(!pChunk->offStat,
752 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
753 VERR_INVALID_POINTER);
754#endif
755
756 /* The heap structure. */
757 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)((uintptr_t)pChunk + pChunk->offHeap);
758 AssertMsgReturn( !((uintptr_t)pHeap & PAGE_OFFSET_MASK)
759 && pChunk->offHeap,
760 ("%p: pHeap=%#x offHeap=%RX32\n", pv, pHeap->u32Magic, pChunk->offHeap),
761 VERR_INVALID_POINTER);
762
763 AssertMsgReturn(pHeap->u32Magic == MMHYPERHEAP_MAGIC,
764 ("%p: u32Magic=%#x\n", pv, pHeap->u32Magic),
765 VERR_INVALID_POINTER);
766Assert(pHeap == CTXSUFF(pVM->mm.s.pHyperHeap));
767
768 /* Some more verifications using additional info from pHeap. */
769 AssertMsgReturn((uintptr_t)pChunk + offPrev >= (uintptr_t)CTXSUFF(pHeap->pbHeap),
770 ("%p: offPrev=%#RX32!\n", pv, offPrev),
771 VERR_INVALID_POINTER);
772
773 AssertMsgReturn(pChunk->offNext < pHeap->cbHeap,
774 ("%p: offNext=%#RX32!\n", pv, pChunk->offNext),
775 VERR_INVALID_POINTER);
776
777 AssertMsgReturn( (uintptr_t)pv - (uintptr_t)CTXSUFF(pHeap->pbHeap) <= pHeap->offPageAligned,
778 ("Invalid pointer %p! (heap: %p-%p)\n", pv, CTXSUFF(pHeap->pbHeap),
779 (char *)CTXSUFF(pHeap->pbHeap) + pHeap->offPageAligned),
780 VERR_INVALID_POINTER);
781
782#if defined(VBOX_WITH_STATISTICS) || defined(MMHYPER_HEAP_FREE_POISON)
783 /* calc block size. */
784 const uint32_t cbChunk = pChunk->offNext
785 ? pChunk->offNext
786 : CTXSUFF(pHeap->pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
787#endif
788#ifdef MMHYPER_HEAP_FREE_POISON
789 /* poison the block */
790 memset(pChunk + 1, MMHYPER_HEAP_FREE_POISON, cbChunk - sizeof(*pChunk));
791#endif
792
793#ifdef MMHYPER_HEAP_FREE_DELAY
794# ifdef MMHYPER_HEAP_FREE_POISON
795 /*
796 * Check poison.
797 */
798 unsigned i = ELEMENTS(pHeap->aDelayedFrees);
799 while (i-- > 0)
800 if (pHeap->aDelayedFrees[i].offChunk)
801 {
802 PMMHYPERCHUNK pCur = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[i].offChunk);
803 const size_t cb = pCur->offNext
804 ? pCur->offNext - sizeof(*pCur)
805 : CTXSUFF(pHeap->pbHeap) + pHeap->offPageAligned - (uint8_t *)pCur - sizeof(*pCur);
806 uint8_t *pab = (uint8_t *)(pCur + 1);
807 for (unsigned off = 0; off < cb; off++)
808 AssertReleaseMsg(pab[off] == 0xCB,
809 ("caller=%RTptr cb=%#zx off=%#x: %.*Rhxs\n",
810 pHeap->aDelayedFrees[i].uCaller, cb, off, RT_MIN(cb - off, 32), &pab[off]));
811 }
812# endif /* MMHYPER_HEAP_FREE_POISON */
813
814 /*
815 * Delayed freeing.
816 */
817 int rc = VINF_SUCCESS;
818 if (pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk)
819 {
820 PMMHYPERCHUNK pChunkFree = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk);
821 rc = mmHyperFree(pHeap, pChunkFree);
822 }
823 pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk = (uintptr_t)pChunk - (uintptr_t)pHeap;
824 pHeap->aDelayedFrees[pHeap->iDelayedFree].uCaller = (uintptr_t)ASMReturnAddress();
825 pHeap->iDelayedFree = (pHeap->iDelayedFree + 1) % ELEMENTS(pHeap->aDelayedFrees);
826
827#else /* !MMHYPER_HEAP_FREE_POISON */
828 /*
829 * Call the worker.
830 */
831 int rc = mmHyperFree(pHeap, pChunk);
832#endif /* !MMHYPER_HEAP_FREE_POISON */
833
834 /*
835 * Update statistics.
836 */
837#ifdef VBOX_WITH_STATISTICS
838 pStat->cFrees++;
839 if (VBOX_SUCCESS(rc))
840 {
841 pStat->cbFreed += cbChunk;
842 pStat->cbCurAllocated -= cbChunk;
843 }
844 else
845 pStat->cFailures++;
846#endif
847
848 return rc;
849}
850
851
852/**
853 * Free memory a memory chunk.
854 *
855 * @returns VBox status code.
856 * @param pHeap The heap.
857 * @param pChunk The memory chunk to free.
858 */
859static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk)
860{
861 Log3(("mmHyperFree: Enter pHeap=%p pChunk=%p\n", pHeap, pChunk));
862 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pChunk;
863
864#ifdef MMHYPER_HEAP_STRICT
865 mmr3HyperHeapCheck(pHeap);
866#endif
867
868 /*
869 * Insert into the free list (which is sorted on address).
870 *
871 * We'll search towards the end of the heap to locate the
872 * closest FREE chunk.
873 */
874 PMMHYPERCHUNKFREE pLeft = NULL;
875 PMMHYPERCHUNKFREE pRight = NULL;
876 if (pHeap->offFreeTail != NIL_OFFSET)
877 {
878 if (pFree->core.offNext)
879 {
880 pRight = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->core.offNext);
881 ASSERT_CHUNK(pHeap, &pRight->core);
882 while (!MMHYPERCHUNK_ISFREE(&pRight->core))
883 {
884 if (!pRight->core.offNext)
885 {
886 pRight = NULL;
887 break;
888 }
889 pRight = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->core.offNext);
890 ASSERT_CHUNK(pHeap, &pRight->core);
891 }
892 }
893 if (!pRight)
894 pRight = (PMMHYPERCHUNKFREE)((char *)CTXSUFF(pHeap->pbHeap) + pHeap->offFreeTail); /** @todo this can't be correct! 'pLeft = .. ; else' I think */
895 if (pRight)
896 {
897 ASSERT_CHUNK_FREE(pHeap, pRight);
898 if (pRight->offPrev)
899 {
900 pLeft = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->offPrev);
901 ASSERT_CHUNK_FREE(pHeap, pLeft);
902 }
903 }
904 }
905 if (pLeft == pFree)
906 {
907 AssertMsgFailed(("Freed twice! pv=%p (pChunk=%p)\n", pChunk + 1, pChunk));
908 return VERR_INVALID_POINTER;
909 }
910 pChunk->offStat = 0;
911
912 /*
913 * Head free chunk list?
914 */
915 if (!pLeft)
916 {
917 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
918 pFree->offPrev = 0;
919 pHeap->offFreeHead = (uintptr_t)pFree - (uintptr_t)CTXSUFF(pHeap->pbHeap);
920 if (pRight)
921 {
922 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
923 pRight->offPrev = -(int32_t)pFree->offNext;
924 }
925 else
926 {
927 pFree->offNext = 0;
928 pHeap->offFreeTail = pHeap->offFreeHead;
929 }
930 Log3(("mmHyperFree: Inserted %p at head of free chain.\n", pFree));
931 }
932 else
933 {
934 /*
935 * Can we merge with left hand free chunk?
936 */
937 if ((char *)pLeft + pLeft->core.offNext == (char *)pFree)
938 {
939 if (pFree->core.offNext)
940 {
941 pLeft->core.offNext = pLeft->core.offNext + pFree->core.offNext;
942 MMHYPERCHUNK_SET_OFFPREV(((PMMHYPERCHUNK)((char *)pLeft + pLeft->core.offNext)), -(int32_t)pLeft->core.offNext);
943 }
944 else
945 pLeft->core.offNext = 0;
946 pFree = pLeft;
947 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pLeft->cb, -(int32_t)pLeft->cb));
948 pHeap->cbFree -= pLeft->cb;
949 Log3(("mmHyperFree: Merging %p into %p (cb=%d).\n", pFree, pLeft, pLeft->cb));
950 }
951 /*
952 * No, just link it into the free list then.
953 */
954 else
955 {
956 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
957 pFree->offPrev = (uintptr_t)pLeft - (uintptr_t)pFree;
958 pLeft->offNext = -pFree->offPrev;
959 if (pRight)
960 {
961 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
962 pRight->offPrev = -(int32_t)pFree->offNext;
963 }
964 else
965 {
966 pFree->offNext = 0;
967 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)CTXSUFF(pHeap->pbHeap);
968 }
969 Log3(("mmHyperFree: Inserted %p after %p in free list.\n", pFree, pLeft));
970 }
971 }
972
973 /*
974 * Can we merge with right hand free chunk?
975 */
976 if (pRight && (char *)pRight == (char *)pFree + pFree->core.offNext)
977 {
978 /* core */
979 if (pRight->core.offNext)
980 {
981 pFree->core.offNext += pRight->core.offNext;
982 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
983 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
984 ASSERT_CHUNK(pHeap, pNext);
985 }
986 else
987 pFree->core.offNext = 0;
988
989 /* free */
990 if (pRight->offNext)
991 {
992 pFree->offNext += pRight->offNext;
993 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext))->offPrev = -(int32_t)pFree->offNext;
994 }
995 else
996 {
997 pFree->offNext = 0;
998 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)CTXSUFF(pHeap->pbHeap);
999 }
1000 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pRight->cb, -(int32_t)pRight->cb));
1001 pHeap->cbFree -= pRight->cb;
1002 Log3(("mmHyperFree: Merged %p (cb=%d) into %p.\n", pRight, pRight->cb, pFree));
1003 }
1004
1005 /* calculate the size. */
1006 if (pFree->core.offNext)
1007 pFree->cb = pFree->core.offNext - sizeof(MMHYPERCHUNK);
1008 else
1009 pFree->cb = pHeap->offPageAligned - ((uintptr_t)pFree - (uintptr_t)CTXSUFF(pHeap->pbHeap)) - sizeof(MMHYPERCHUNK);
1010 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree + pFree->cb, pFree->cb));
1011 pHeap->cbFree += pFree->cb;
1012 ASSERT_CHUNK_FREE(pHeap, pFree);
1013
1014#ifdef MMHYPER_HEAP_STRICT
1015 mmr3HyperHeapCheck(pHeap);
1016#endif
1017 return VINF_SUCCESS;
1018}
1019
1020
1021#ifdef MMHYPER_HEAP_STRICT
1022/**
1023 * Internal consitency check.
1024 */
1025static void mmr3HyperHeapCheck(PMMHYPERHEAP pHeap)
1026{
1027 PMMHYPERCHUNKFREE pPrev = NULL;
1028 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)CTXSUFF(pHeap->pbHeap);
1029 for (;;)
1030 {
1031 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1032 ASSERT_CHUNK_USED(pHeap, &pCur->core);
1033 else
1034 ASSERT_CHUNK_FREE(pHeap, pCur);
1035 if (pPrev)
1036 AssertMsg((int32_t)pPrev->core.offNext == -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1037 ("pPrev->core.offNext=%d offPrev=%d\n", pPrev->core.offNext, MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1038
1039 /* next */
1040 if (!pCur->core.offNext)
1041 break;
1042 pPrev = pCur;
1043 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1044 }
1045}
1046#endif
1047
1048
1049#ifdef DEBUG
1050/**
1051 * Dumps the hypervisor heap to Log.
1052 * @param pVM VM Handle.
1053 */
1054MMDECL(void) MMHyperHeapDump(PVM pVM)
1055{
1056 Log(("MMHyperHeapDump: *** heap dump - start ***\n"));
1057 PMMHYPERHEAP pHeap = CTXSUFF(pVM->mm.s.pHyperHeap);
1058 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)CTXSUFF(pHeap->pbHeap);
1059 for (;;)
1060 {
1061 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1062 {
1063 if (pCur->core.offStat)
1064 {
1065 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pCur + pCur->core.offStat);
1066 const char *pszSelf = pCur->core.offStat == sizeof(MMHYPERCHUNK) ? " stat record" : "";
1067#ifdef IN_RING3
1068 Log(("%p %06x USED offNext=%06x offPrev=-%06x %s%s\n",
1069 pCur, (uintptr_t)pCur - (uintptr_t)CTXSUFF(pHeap->pbHeap),
1070 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1071 mmR3GetTagName((MMTAG)pStat->Core.Key), pszSelf));
1072#else
1073 Log(("%p %06x USED offNext=%06x offPrev=-%06x %d%s\n",
1074 pCur, (uintptr_t)pCur - (uintptr_t)CTXSUFF(pHeap->pbHeap),
1075 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1076 (MMTAG)pStat->Core.Key, pszSelf));
1077#endif
1078 }
1079 else
1080 Log(("%p %06x USED offNext=%06x offPrev=-%06x\n",
1081 pCur, (uintptr_t)pCur - (uintptr_t)CTXSUFF(pHeap->pbHeap),
1082 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1083 }
1084 else
1085 Log(("%p %06x FREE offNext=%06x offPrev=-%06x : cb=%06x offNext=%06x offPrev=-%06x\n",
1086 pCur, (uintptr_t)pCur - (uintptr_t)CTXSUFF(pHeap->pbHeap),
1087 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core), pCur->cb, pCur->offNext, pCur->offPrev));
1088
1089 /* next */
1090 if (!pCur->core.offNext)
1091 break;
1092 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1093 }
1094 Log(("MMHyperHeapDump: *** heap dump - end ***\n"));
1095}
1096#endif
1097
1098
1099/**
1100 * Query the amount of free memory in the hypervisor heap.
1101 *
1102 * @returns Number of free bytes in the hypervisor heap.
1103 */
1104MMDECL(size_t) MMHyperHeapGetFreeSize(PVM pVM)
1105{
1106 return CTXSUFF(pVM->mm.s.pHyperHeap)->cbFree;
1107}
1108
1109/**
1110 * Query the size the hypervisor heap.
1111 *
1112 * @returns The size of the hypervisor heap in bytes.
1113 */
1114MMDECL(size_t) MMHyperHeapGetSize(PVM pVM)
1115{
1116 return CTXSUFF(pVM->mm.s.pHyperHeap)->cbHeap;
1117}
1118
1119
1120/**
1121 * Query the address and size the hypervisor memory area.
1122 *
1123 * @returns Base address of the hypervisor area.
1124 * @param pVM VM Handle.
1125 * @param pcb Where to store the size of the hypervisor area. (out)
1126 */
1127MMDECL(RTGCPTR) MMHyperGetArea(PVM pVM, size_t *pcb)
1128{
1129 if (pcb)
1130 *pcb = pVM->mm.s.cbHyperArea;
1131 return pVM->mm.s.pvHyperAreaGC;
1132}
1133
1134
1135/**
1136 * Checks if an address is within the hypervisor memory area.
1137 *
1138 * @returns true if inside.
1139 * @returns false if outside.
1140 * @param pVM VM handle.
1141 * @param GCPtr The pointer to check.
1142 */
1143MMDECL(bool) MMHyperIsInsideArea(PVM pVM, RTGCPTR GCPtr)
1144{
1145 return (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea;
1146}
1147
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette