VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp@ 20088

Last change on this file since 20088 was 19667, checked in by vboxsync, 16 years ago

Cleaned up

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 47.1 KB
Line 
1/* $Id: MMAllHyper.cpp 19667 2009-05-13 15:49:38Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_MM_HYPER_HEAP
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include "MMInternal.h"
30#include <VBox/vm.h>
31
32#include <VBox/err.h>
33#include <VBox/param.h>
34#include <iprt/assert.h>
35#include <VBox/log.h>
36#include <iprt/asm.h>
37#include <iprt/string.h>
38
39
40/*******************************************************************************
41* Defined Constants And Macros *
42*******************************************************************************/
43#define ASSERT_L(u1, u2) AssertMsg((u1) < (u2), ("u1=%#x u2=%#x\n", u1, u2))
44#define ASSERT_LE(u1, u2) AssertMsg((u1) <= (u2), ("u1=%#x u2=%#x\n", u1, u2))
45#define ASSERT_GE(u1, u2) AssertMsg((u1) >= (u2), ("u1=%#x u2=%#x\n", u1, u2))
46#define ASSERT_ALIGN(u1) AssertMsg(!((u1) & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("u1=%#x (%d)\n", u1, u1))
47
48#define ASSERT_OFFPREV(pHeap, pChunk) \
49 do { Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) <= 0); \
50 Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) >= (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
51 AssertMsg( MMHYPERCHUNK_GET_OFFPREV(pChunk) != 0 \
52 || (uint8_t *)(pChunk) == (pHeap)->CTX_SUFF(pbHeap), \
53 ("pChunk=%p pvHyperHeap=%p\n", (pChunk), (pHeap)->CTX_SUFF(pbHeap))); \
54 } while (0)
55
56#define ASSERT_OFFNEXT(pHeap, pChunk) \
57 do { ASSERT_ALIGN((pChunk)->offNext); \
58 ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
59 } while (0)
60
61#define ASSERT_OFFHEAP(pHeap, pChunk) \
62 do { Assert((pChunk)->offHeap); \
63 AssertMsg((PMMHYPERHEAP)((pChunk)->offHeap + (uintptr_t)pChunk) == (pHeap), \
64 ("offHeap=%RX32 pChunk=%p pHeap=%p\n", (pChunk)->offHeap, (pChunk), (pHeap))); \
65 Assert((pHeap)->u32Magic == MMHYPERHEAP_MAGIC); \
66 } while (0)
67
68#ifdef VBOX_WITH_STATISTICS
69#define ASSERT_OFFSTAT(pHeap, pChunk) \
70 do { if (MMHYPERCHUNK_ISFREE(pChunk)) \
71 Assert(!(pChunk)->offStat); \
72 else if ((pChunk)->offStat) \
73 { \
74 Assert((pChunk)->offStat); \
75 AssertMsg(!((pChunk)->offStat & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("offStat=%RX32\n", (pChunk)->offStat)); \
76 uintptr_t uPtr = (uintptr_t)(pChunk)->offStat + (uintptr_t)pChunk; NOREF(uPtr); \
77 AssertMsg(uPtr - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) < (pHeap)->offPageAligned, \
78 ("%p - %p < %RX32\n", uPtr, (pHeap)->CTX_SUFF(pbHeap), (pHeap)->offPageAligned)); \
79 } \
80 } while (0)
81#else
82#define ASSERT_OFFSTAT(pHeap, pChunk) \
83 do { Assert(!(pChunk)->offStat); \
84 } while (0)
85#endif
86
87#define ASSERT_CHUNK(pHeap, pChunk) \
88 do { ASSERT_OFFNEXT(pHeap, pChunk); \
89 ASSERT_OFFPREV(pHeap, pChunk); \
90 ASSERT_OFFHEAP(pHeap, pChunk); \
91 ASSERT_OFFSTAT(pHeap, pChunk); \
92 } while (0)
93#define ASSERT_CHUNK_USED(pHeap, pChunk) \
94 do { ASSERT_OFFNEXT(pHeap, pChunk); \
95 ASSERT_OFFPREV(pHeap, pChunk); \
96 Assert(MMHYPERCHUNK_ISUSED(pChunk)); \
97 } while (0)
98
99#define ASSERT_FREE_OFFPREV(pHeap, pChunk) \
100 do { ASSERT_ALIGN((pChunk)->offPrev); \
101 ASSERT_GE(((pChunk)->offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)), (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
102 Assert((pChunk)->offPrev != MMHYPERCHUNK_GET_OFFPREV(&(pChunk)->core) || !(pChunk)->offPrev); \
103 AssertMsg( (pChunk)->offPrev \
104 || (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) == (pHeap)->offFreeHead, \
105 ("pChunk=%p offChunk=%#x offFreeHead=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap),\
106 (pHeap)->offFreeHead)); \
107 } while (0)
108
109#define ASSERT_FREE_OFFNEXT(pHeap, pChunk) \
110 do { ASSERT_ALIGN((pChunk)->offNext); \
111 ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
112 Assert((pChunk)->offNext != (pChunk)->core.offNext || !(pChunk)->offNext); \
113 AssertMsg( (pChunk)->offNext \
114 || (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) == (pHeap)->offFreeTail, \
115 ("pChunk=%p offChunk=%#x offFreeTail=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap), \
116 (pHeap)->offFreeTail)); \
117 } while (0)
118
119#define ASSERT_FREE_CB(pHeap, pChunk) \
120 do { ASSERT_ALIGN((pChunk)->cb); \
121 Assert((pChunk)->cb > 0); \
122 if ((pChunk)->core.offNext) \
123 AssertMsg((pChunk)->cb == ((pChunk)->core.offNext - sizeof(MMHYPERCHUNK)), \
124 ("cb=%d offNext=%d\n", (pChunk)->cb, (pChunk)->core.offNext)); \
125 else \
126 ASSERT_LE((pChunk)->cb, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
127 } while (0)
128
129#define ASSERT_CHUNK_FREE(pHeap, pChunk) \
130 do { ASSERT_CHUNK(pHeap, &(pChunk)->core); \
131 Assert(MMHYPERCHUNK_ISFREE(pChunk)); \
132 ASSERT_FREE_OFFNEXT(pHeap, pChunk); \
133 ASSERT_FREE_OFFPREV(pHeap, pChunk); \
134 ASSERT_FREE_CB(pHeap, pChunk); \
135 } while (0)
136
137
138/*******************************************************************************
139* Internal Functions *
140*******************************************************************************/
141static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment);
142static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb);
143#ifdef VBOX_WITH_STATISTICS
144static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag);
145#ifdef IN_RING3
146static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat);
147#endif
148#endif
149static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk);
150#ifdef MMHYPER_HEAP_STRICT
151static void mmHyperHeapCheck(PMMHYPERHEAP pHeap);
152#endif
153
154/**
155 * Locks the hypervisor heap.
156 * This might call back to Ring-3 in order to deal with lock contention in GC and R3.
157 *
158 * @param pVM The VM handle.
159 */
160static int mmHyperLock(PVM pVM)
161{
162 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
163
164#ifdef IN_RING3
165 if (!PDMCritSectIsInitialized(&pHeap->Lock))
166 return VINF_SUCCESS; /* early init */
167#else
168 Assert(PDMCritSectIsInitialized(&pHeap->Lock));
169#endif
170 int rc = PDMCritSectEnter(&pHeap->Lock, VERR_SEM_BUSY);
171#ifdef IN_RC
172 if (rc == VERR_SEM_BUSY)
173 rc = VMMGCCallHost(pVM, VMMCALLHOST_MMHYPER_LOCK, 0);
174#elif defined(IN_RING0)
175 if (rc == VERR_SEM_BUSY)
176 rc = VMMR0CallHost(pVM, VMMCALLHOST_MMHYPER_LOCK, 0);
177#endif
178 AssertRC(rc);
179 return rc;
180}
181
182
183/**
184 * Unlocks the hypervisor heap.
185 *
186 * @param pVM The VM handle.
187 */
188static void mmHyperUnlock(PVM pVM)
189{
190 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
191
192#ifdef IN_RING3
193 if (!PDMCritSectIsInitialized(&pHeap->Lock))
194 return; /* early init */
195#endif
196 Assert(PDMCritSectIsInitialized(&pHeap->Lock));
197 PDMCritSectLeave(&pHeap->Lock);
198}
199
200/**
201 * Allocates memory in the Hypervisor (RC VMM) area.
202 * The returned memory is of course zeroed.
203 *
204 * @returns VBox status code.
205 * @param pVM The VM to operate on.
206 * @param cb Number of bytes to allocate.
207 * @param uAlignment Required memory alignment in bytes.
208 * Values are 0,8,16,32 and PAGE_SIZE.
209 * 0 -> default alignment, i.e. 8 bytes.
210 * @param enmTag The statistics tag.
211 * @param ppv Where to store the address to the allocated
212 * memory.
213 */
214static int mmHyperAllocInternal(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
215{
216 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
217
218 /*
219 * Validate input and adjust it to reasonable values.
220 */
221 if (!uAlignment || uAlignment < MMHYPER_HEAP_ALIGN_MIN)
222 uAlignment = MMHYPER_HEAP_ALIGN_MIN;
223 uint32_t cbAligned;
224 switch (uAlignment)
225 {
226 case 8:
227 case 16:
228 case 32:
229 cbAligned = RT_ALIGN_32(cb, MMHYPER_HEAP_ALIGN_MIN);
230 if (!cbAligned || cbAligned < cb)
231 {
232 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
233 AssertMsgFailed(("Nice try.\n"));
234 return VERR_INVALID_PARAMETER;
235 }
236 break;
237
238 case PAGE_SIZE:
239 AssertMsg(RT_ALIGN_32(cb, PAGE_SIZE) == cb, ("The size isn't page aligned. (cb=%#x)\n", cb));
240 cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
241 if (!cbAligned)
242 {
243 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
244 AssertMsgFailed(("Nice try.\n"));
245 return VERR_INVALID_PARAMETER;
246 }
247 break;
248
249 default:
250 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
251 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
252 return VERR_INVALID_PARAMETER;
253 }
254
255
256 /*
257 * Get heap and statisticsStatistics.
258 */
259 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
260#ifdef VBOX_WITH_STATISTICS
261 PMMHYPERSTAT pStat = mmHyperStat(pHeap, enmTag);
262 if (!pStat)
263 {
264 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
265 AssertMsgFailed(("Failed to allocate statistics!\n"));
266 return VERR_MM_HYPER_NO_MEMORY;
267 }
268#endif
269 if (uAlignment < PAGE_SIZE)
270 {
271 /*
272 * Allocate a chunk.
273 */
274 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, cbAligned, uAlignment);
275 if (pChunk)
276 {
277#ifdef VBOX_WITH_STATISTICS
278 const uint32_t cbChunk = pChunk->offNext
279 ? pChunk->offNext
280 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
281 pStat->cbAllocated += (uint32_t)cbChunk;
282 pStat->cbCurAllocated += (uint32_t)cbChunk;
283 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
284 pStat->cbMaxAllocated = pStat->cbCurAllocated;
285 pStat->cAllocations++;
286 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
287#else
288 pChunk->offStat = 0;
289#endif
290 void *pv = pChunk + 1;
291 *ppv = pv;
292 ASMMemZero32(pv, cbAligned);
293 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, pv));
294 return VINF_SUCCESS;
295 }
296 }
297 else
298 {
299 /*
300 * Allocate page aligned memory.
301 */
302 void *pv = mmHyperAllocPages(pHeap, cbAligned);
303 if (pv)
304 {
305#ifdef VBOX_WITH_STATISTICS
306 pStat->cbAllocated += cbAligned;
307 pStat->cbCurAllocated += cbAligned;
308 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
309 pStat->cbMaxAllocated = pStat->cbCurAllocated;
310 pStat->cAllocations++;
311#endif
312 *ppv = pv;
313 /* ASMMemZero32(pv, cbAligned); - not required since memory is alloc-only and SUPPageAlloc zeros it. */
314 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, ppv));
315 return VINF_SUCCESS;
316 }
317 }
318
319#ifdef VBOX_WITH_STATISTICS
320 pStat->cAllocations++;
321 pStat->cFailures++;
322#endif
323 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
324 AssertMsgFailed(("Failed to allocate %d bytes!\n", cb));
325 return VERR_MM_HYPER_NO_MEMORY;
326}
327
328/**
329 * Wrapper for mmHyperAllocInternal
330 */
331VMMDECL(int) MMHyperAlloc(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
332{
333 int rc;
334
335 rc = mmHyperLock(pVM);
336 AssertRCReturn(rc, rc);
337
338 rc = mmHyperAllocInternal(pVM, cb, uAlignment, enmTag, ppv);
339
340 mmHyperUnlock(pVM);
341 return rc;
342}
343
344/**
345 * Allocates a chunk of memory from the specified heap.
346 * The caller validates the parameters of this request.
347 *
348 * @returns Pointer to the allocated chunk.
349 * @returns NULL on failure.
350 * @param pHeap The heap.
351 * @param cb Size of the memory block to allocate.
352 * @param uAlignment The alignment specifications for the allocated block.
353 * @internal
354 */
355static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment)
356{
357 Log3(("mmHyperAllocChunk: Enter cb=%#x uAlignment=%#x\n", cb, uAlignment));
358#ifdef MMHYPER_HEAP_STRICT
359 mmHyperHeapCheck(pHeap);
360#endif
361#ifdef MMHYPER_HEAP_STRICT_FENCE
362 uint32_t cbFence = RT_MAX(MMHYPER_HEAP_STRICT_FENCE_SIZE, uAlignment);
363 cb += cbFence;
364#endif
365
366 /*
367 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
368 */
369 if (pHeap->offFreeHead == NIL_OFFSET)
370 return NULL;
371
372 /*
373 * Small alignments - from the front of the heap.
374 *
375 * Must split off free chunks at the end to prevent messing up the
376 * last free node which we take the page aligned memory from the top of.
377 */
378 PMMHYPERCHUNK pRet = NULL;
379 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeHead);
380 while (pFree)
381 {
382 ASSERT_CHUNK_FREE(pHeap, pFree);
383 if (pFree->cb >= cb)
384 {
385 unsigned offAlign = (uintptr_t)(&pFree->core + 1) & (uAlignment - 1);
386 if (offAlign)
387 offAlign = uAlignment - offAlign;
388 if (!offAlign || pFree->cb - offAlign >= cb)
389 {
390 Log3(("mmHyperAllocChunk: Using pFree=%p pFree->cb=%d offAlign=%d\n", pFree, pFree->cb, offAlign));
391
392 /*
393 * Adjust the node in front.
394 * Because of multiple alignments we need to special case allocation of the first block.
395 */
396 if (offAlign)
397 {
398 MMHYPERCHUNKFREE Free = *pFree;
399 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
400 {
401 /* just add a bit of memory to it. */
402 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&Free.core));
403 pPrev->core.offNext += offAlign;
404 AssertMsg(!MMHYPERCHUNK_ISFREE(&pPrev->core), ("Impossible!\n"));
405 Log3(("mmHyperAllocChunk: Added %d bytes to %p\n", offAlign, pPrev));
406 }
407 else
408 {
409 /* make new head node, mark it USED for simplisity. */
410 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)pHeap->CTX_SUFF(pbHeap);
411 Assert(pPrev == &pFree->core);
412 pPrev->offPrev = 0;
413 MMHYPERCHUNK_SET_TYPE(pPrev, MMHYPERCHUNK_FLAGS_USED);
414 pPrev->offNext = offAlign;
415 Log3(("mmHyperAllocChunk: Created new first node of %d bytes\n", offAlign));
416
417 }
418 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - offAlign, -(int)offAlign));
419 pHeap->cbFree -= offAlign;
420
421 /* Recreate pFree node and adjusting everything... */
422 pFree = (PMMHYPERCHUNKFREE)((char *)pFree + offAlign);
423 *pFree = Free;
424
425 pFree->cb -= offAlign;
426 if (pFree->core.offNext)
427 {
428 pFree->core.offNext -= offAlign;
429 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
430 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
431 ASSERT_CHUNK(pHeap, pNext);
432 }
433 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
434 MMHYPERCHUNK_SET_OFFPREV(&pFree->core, MMHYPERCHUNK_GET_OFFPREV(&pFree->core) - offAlign);
435
436 if (pFree->offNext)
437 {
438 pFree->offNext -= offAlign;
439 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
440 pNext->offPrev = -(int32_t)pFree->offNext;
441 ASSERT_CHUNK_FREE(pHeap, pNext);
442 }
443 else
444 pHeap->offFreeTail += offAlign;
445 if (pFree->offPrev)
446 {
447 pFree->offPrev -= offAlign;
448 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
449 pPrev->offNext = -pFree->offPrev;
450 ASSERT_CHUNK_FREE(pHeap, pPrev);
451 }
452 else
453 pHeap->offFreeHead += offAlign;
454 pFree->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pFree;
455 pFree->core.offStat = 0;
456 ASSERT_CHUNK_FREE(pHeap, pFree);
457 Log3(("mmHyperAllocChunk: Realigned pFree=%p\n", pFree));
458 }
459
460 /*
461 * Split off a new FREE chunk?
462 */
463 if (pFree->cb >= cb + RT_ALIGN(sizeof(MMHYPERCHUNKFREE), MMHYPER_HEAP_ALIGN_MIN))
464 {
465 /*
466 * Move the FREE chunk up to make room for the new USED chunk.
467 */
468 const int off = cb + sizeof(MMHYPERCHUNK);
469 PMMHYPERCHUNKFREE pNew = (PMMHYPERCHUNKFREE)((char *)&pFree->core + off);
470 *pNew = *pFree;
471 pNew->cb -= off;
472 if (pNew->core.offNext)
473 {
474 pNew->core.offNext -= off;
475 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pNew + pNew->core.offNext);
476 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pNew->core.offNext);
477 ASSERT_CHUNK(pHeap, pNext);
478 }
479 pNew->core.offPrev = -off;
480 MMHYPERCHUNK_SET_TYPE(pNew, MMHYPERCHUNK_FLAGS_FREE);
481
482 if (pNew->offNext)
483 {
484 pNew->offNext -= off;
485 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offNext);
486 pNext->offPrev = -(int32_t)pNew->offNext;
487 ASSERT_CHUNK_FREE(pHeap, pNext);
488 }
489 else
490 pHeap->offFreeTail += off;
491 if (pNew->offPrev)
492 {
493 pNew->offPrev -= off;
494 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offPrev);
495 pPrev->offNext = -pNew->offPrev;
496 ASSERT_CHUNK_FREE(pHeap, pPrev);
497 }
498 else
499 pHeap->offFreeHead += off;
500 pNew->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pNew;
501 pNew->core.offStat = 0;
502 ASSERT_CHUNK_FREE(pHeap, pNew);
503
504 /*
505 * Update the old FREE node making it a USED node.
506 */
507 pFree->core.offNext = off;
508 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
509
510
511 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
512 pHeap->cbFree - (cb + sizeof(MMHYPERCHUNK)), -(int)(cb + sizeof(MMHYPERCHUNK))));
513 pHeap->cbFree -= (uint32_t)(cb + sizeof(MMHYPERCHUNK));
514 pRet = &pFree->core;
515 ASSERT_CHUNK(pHeap, &pFree->core);
516 Log3(("mmHyperAllocChunk: Created free chunk pNew=%p cb=%d\n", pNew, pNew->cb));
517 }
518 else
519 {
520 /*
521 * Link out of free list.
522 */
523 if (pFree->offNext)
524 {
525 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
526 if (pFree->offPrev)
527 {
528 pNext->offPrev += pFree->offPrev;
529 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
530 pPrev->offNext += pFree->offNext;
531 ASSERT_CHUNK_FREE(pHeap, pPrev);
532 }
533 else
534 {
535 pHeap->offFreeHead += pFree->offNext;
536 pNext->offPrev = 0;
537 }
538 ASSERT_CHUNK_FREE(pHeap, pNext);
539 }
540 else
541 {
542 if (pFree->offPrev)
543 {
544 pHeap->offFreeTail += pFree->offPrev;
545 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
546 pPrev->offNext = 0;
547 ASSERT_CHUNK_FREE(pHeap, pPrev);
548 }
549 else
550 {
551 pHeap->offFreeHead = NIL_OFFSET;
552 pHeap->offFreeTail = NIL_OFFSET;
553 }
554 }
555
556 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
557 pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
558 pHeap->cbFree -= pFree->cb;
559 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
560 pRet = &pFree->core;
561 ASSERT_CHUNK(pHeap, &pFree->core);
562 Log3(("mmHyperAllocChunk: Converted free chunk %p to used chunk.\n", pFree));
563 }
564 Log3(("mmHyperAllocChunk: Returning %p\n", pRet));
565 break;
566 }
567 }
568
569 /* next */
570 pFree = pFree->offNext ? (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext) : NULL;
571 }
572
573#ifdef MMHYPER_HEAP_STRICT_FENCE
574 uint32_t *pu32End = (uint32_t *)((uint8_t *)(pRet + 1) + cb);
575 uint32_t *pu32EndReal = pRet->offNext
576 ? (uint32_t *)((uint8_t *)pRet + pRet->offNext)
577 : (uint32_t *)(pHeap->CTX_SUFF(pbHeap) + pHeap->cbHeap);
578 cbFence += (uintptr_t)pu32EndReal - (uintptr_t)pu32End; Assert(!(cbFence & 0x3));
579 ASMMemFill32((uint8_t *)pu32EndReal - cbFence, cbFence, MMHYPER_HEAP_STRICT_FENCE_U32);
580 pu32EndReal[-1] = cbFence;
581#endif
582#ifdef MMHYPER_HEAP_STRICT
583 mmHyperHeapCheck(pHeap);
584#endif
585 return pRet;
586}
587
588
589/**
590 * Allocates one or more pages of memory from the specified heap.
591 * The caller validates the parameters of this request.
592 *
593 * @returns Pointer to the allocated chunk.
594 * @returns NULL on failure.
595 * @param pHeap The heap.
596 * @param cb Size of the memory block to allocate.
597 * @internal
598 */
599static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb)
600{
601 Log3(("mmHyperAllocPages: Enter cb=%#x\n", cb));
602
603#ifdef MMHYPER_HEAP_STRICT
604 mmHyperHeapCheck(pHeap);
605#endif
606
607 /*
608 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
609 */
610 if (pHeap->offFreeHead == NIL_OFFSET)
611 return NULL;
612
613 /*
614 * Page aligned chunks.
615 *
616 * Page aligned chunks can only be allocated from the last FREE chunk.
617 * This is for reasons of simplicity and fragmentation. Page aligned memory
618 * must also be allocated in page aligned sizes. Page aligned memory cannot
619 * be freed either.
620 *
621 * So, for this to work, the last FREE chunk needs to end on a page aligned
622 * boundrary.
623 */
624 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail);
625 ASSERT_CHUNK_FREE(pHeap, pFree);
626 if ( (((uintptr_t)(&pFree->core + 1) + pFree->cb) & (PAGE_OFFSET_MASK - 1))
627 || pFree->cb + sizeof(MMHYPERCHUNK) < cb)
628 {
629 Log3(("mmHyperAllocPages: Not enough/no page aligned memory!\n"));
630 return NULL;
631 }
632
633 void *pvRet;
634 if (pFree->cb > cb)
635 {
636 /*
637 * Simple, just cut the top of the free node and return it.
638 */
639 pFree->cb -= cb;
640 pvRet = (char *)(&pFree->core + 1) + pFree->cb;
641 AssertMsg(RT_ALIGN_P(pvRet, PAGE_SIZE) == pvRet, ("pvRet=%p cb=%#x pFree=%p pFree->cb=%#x\n", pvRet, cb, pFree, pFree->cb));
642 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - cb, -(int)cb));
643 pHeap->cbFree -= cb;
644 ASSERT_CHUNK_FREE(pHeap, pFree);
645 Log3(("mmHyperAllocPages: Allocated from pFree=%p new pFree->cb=%d\n", pFree, pFree->cb));
646 }
647 else
648 {
649 /*
650 * Unlink the FREE node.
651 */
652 pvRet = (char *)(&pFree->core + 1) + pFree->cb - cb;
653 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
654 pHeap->cbFree -= pFree->cb;
655
656 /* a scrap of spare memory (unlikely)? add it to the sprevious chunk. */
657 if (pvRet != (void *)pFree)
658 {
659 AssertMsg(MMHYPERCHUNK_GET_OFFPREV(&pFree->core), ("How the *beep* did someone manage to allocated up all the heap with page aligned memory?!?\n"));
660 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&pFree->core));
661 pPrev->offNext += (uintptr_t)pvRet - (uintptr_t)pFree;
662 AssertMsg(!MMHYPERCHUNK_ISFREE(pPrev), ("Free bug?\n"));
663#ifdef VBOX_WITH_STATISTICS
664 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pPrev + pPrev->offStat);
665 pStat->cbAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
666 pStat->cbCurAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
667#endif
668 Log3(("mmHyperAllocPages: Added %d to %p (page align)\n", (uintptr_t)pvRet - (uintptr_t)pFree, pFree));
669 }
670
671 /* unlink from FREE chain. */
672 if (pFree->offPrev)
673 {
674 pHeap->offFreeTail += pFree->offPrev;
675 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev))->offNext = 0;
676 }
677 else
678 {
679 pHeap->offFreeTail = NIL_OFFSET;
680 pHeap->offFreeHead = NIL_OFFSET;
681 }
682 Log3(("mmHyperAllocPages: Unlinked pFree=%d\n", pFree));
683 }
684 pHeap->offPageAligned = (uintptr_t)pvRet - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
685 Log3(("mmHyperAllocPages: Returning %p (page aligned)\n", pvRet));
686
687#ifdef MMHYPER_HEAP_STRICT
688 mmHyperHeapCheck(pHeap);
689#endif
690 return pvRet;
691}
692
693#ifdef VBOX_WITH_STATISTICS
694
695/**
696 * Get the statistic record for a tag.
697 *
698 * @returns Pointer to a stat record.
699 * @returns NULL on failure.
700 * @param pHeap The heap.
701 * @param enmTag The tag.
702 */
703static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag)
704{
705 /* try look it up first. */
706 PMMHYPERSTAT pStat = (PMMHYPERSTAT)RTAvloGCPhysGet(&pHeap->HyperHeapStatTree, enmTag);
707 if (!pStat)
708 {
709 /* try allocate a new one */
710 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, RT_ALIGN(sizeof(*pStat), MMHYPER_HEAP_ALIGN_MIN), MMHYPER_HEAP_ALIGN_MIN);
711 if (!pChunk)
712 return NULL;
713 pStat = (PMMHYPERSTAT)(pChunk + 1);
714 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
715
716 ASMMemZero32(pStat, sizeof(*pStat));
717 pStat->Core.Key = enmTag;
718 RTAvloGCPhysInsert(&pHeap->HyperHeapStatTree, &pStat->Core);
719 }
720 if (!pStat->fRegistered)
721 {
722# ifdef IN_RING3
723 mmR3HyperStatRegisterOne(pHeap->pVMR3, pStat);
724# else
725 /** @todo schedule a R3 action. */
726# endif
727 }
728 return pStat;
729}
730
731
732# ifdef IN_RING3
733/**
734 * Registers statistics with STAM.
735 *
736 */
737static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat)
738{
739 if (pStat->fRegistered)
740 return;
741 const char *pszTag = mmR3GetTagName((MMTAG)pStat->Core.Key);
742 STAMR3RegisterF(pVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Number of bytes currently allocated.", "/MM/HyperHeap/%s", pszTag);
743 STAMR3RegisterF(pVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of alloc calls.", "/MM/HyperHeap/%s/cAllocations", pszTag);
744 STAMR3RegisterF(pVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of free calls.", "/MM/HyperHeap/%s/cFrees", pszTag);
745 STAMR3RegisterF(pVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of failures.", "/MM/HyperHeap/%s/cFailures", pszTag);
746 STAMR3RegisterF(pVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of allocated bytes.", "/MM/HyperHeap/%s/cbAllocated", pszTag);
747 STAMR3RegisterF(pVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of freed bytes.", "/MM/HyperHeap/%s/cbFreed", pszTag);
748 STAMR3RegisterF(pVM, &pStat->cbMaxAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max number of bytes allocated at the same time.","/MM/HyperHeap/%s/cbMaxAllocated", pszTag);
749 pStat->fRegistered = true;
750}
751# endif /* IN_RING3 */
752
753#endif /* VBOX_WITH_STATISTICS */
754
755
756/**
757 * Free memory allocated using MMHyperAlloc().
758 * The caller validates the parameters of this request.
759 *
760 * @returns VBox status code.
761 * @param pVM The VM to operate on.
762 * @param pv The memory to free.
763 * @remark Try avoid free hyper memory.
764 */
765static int mmHyperFreeInternal(PVM pVM, void *pv)
766{
767 Log2(("MMHyperFree: pv=%p\n", pv));
768 if (!pv)
769 return VINF_SUCCESS;
770 AssertMsgReturn(RT_ALIGN_P(pv, MMHYPER_HEAP_ALIGN_MIN) == pv,
771 ("Invalid pointer %p!\n", pv),
772 VERR_INVALID_POINTER);
773
774 /*
775 * Get the heap and stats.
776 * Validate the chunk at the same time.
777 */
778 PMMHYPERCHUNK pChunk = (PMMHYPERCHUNK)((PMMHYPERCHUNK)pv - 1);
779
780 AssertMsgReturn( (uintptr_t)pChunk + pChunk->offNext >= (uintptr_t)pChunk
781 || RT_ALIGN_32(pChunk->offNext, MMHYPER_HEAP_ALIGN_MIN) != pChunk->offNext,
782 ("%p: offNext=%#RX32\n", pv, pChunk->offNext),
783 VERR_INVALID_POINTER);
784
785 AssertMsgReturn(MMHYPERCHUNK_ISUSED(pChunk),
786 ("%p: Not used!\n", pv),
787 VERR_INVALID_POINTER);
788
789 int32_t offPrev = MMHYPERCHUNK_GET_OFFPREV(pChunk);
790 AssertMsgReturn( (uintptr_t)pChunk + offPrev <= (uintptr_t)pChunk
791 && !((uint32_t)-offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)),
792 ("%p: offPrev=%#RX32!\n", pv, offPrev),
793 VERR_INVALID_POINTER);
794
795 /* statistics */
796#ifdef VBOX_WITH_STATISTICS
797 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pChunk + pChunk->offStat);
798 AssertMsgReturn( RT_ALIGN_P(pStat, MMHYPER_HEAP_ALIGN_MIN) == (void *)pStat
799 && pChunk->offStat,
800 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
801 VERR_INVALID_POINTER);
802#else
803 AssertMsgReturn(!pChunk->offStat,
804 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
805 VERR_INVALID_POINTER);
806#endif
807
808 /* The heap structure. */
809 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)((uintptr_t)pChunk + pChunk->offHeap);
810 AssertMsgReturn( !((uintptr_t)pHeap & PAGE_OFFSET_MASK)
811 && pChunk->offHeap,
812 ("%p: pHeap=%#x offHeap=%RX32\n", pv, pHeap->u32Magic, pChunk->offHeap),
813 VERR_INVALID_POINTER);
814
815 AssertMsgReturn(pHeap->u32Magic == MMHYPERHEAP_MAGIC,
816 ("%p: u32Magic=%#x\n", pv, pHeap->u32Magic),
817 VERR_INVALID_POINTER);
818 Assert(pHeap == pVM->mm.s.CTX_SUFF(pHyperHeap));
819
820 /* Some more verifications using additional info from pHeap. */
821 AssertMsgReturn((uintptr_t)pChunk + offPrev >= (uintptr_t)pHeap->CTX_SUFF(pbHeap),
822 ("%p: offPrev=%#RX32!\n", pv, offPrev),
823 VERR_INVALID_POINTER);
824
825 AssertMsgReturn(pChunk->offNext < pHeap->cbHeap,
826 ("%p: offNext=%#RX32!\n", pv, pChunk->offNext),
827 VERR_INVALID_POINTER);
828
829 AssertMsgReturn( (uintptr_t)pv - (uintptr_t)pHeap->CTX_SUFF(pbHeap) <= pHeap->offPageAligned,
830 ("Invalid pointer %p! (heap: %p-%p)\n", pv, pHeap->CTX_SUFF(pbHeap),
831 (char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned),
832 VERR_INVALID_POINTER);
833
834#ifdef MMHYPER_HEAP_STRICT
835 mmHyperHeapCheck(pHeap);
836#endif
837
838#if defined(VBOX_WITH_STATISTICS) || defined(MMHYPER_HEAP_FREE_POISON)
839 /* calc block size. */
840 const uint32_t cbChunk = pChunk->offNext
841 ? pChunk->offNext
842 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
843#endif
844#ifdef MMHYPER_HEAP_FREE_POISON
845 /* poison the block */
846 memset(pChunk + 1, MMHYPER_HEAP_FREE_POISON, cbChunk - sizeof(*pChunk));
847#endif
848
849#ifdef MMHYPER_HEAP_FREE_DELAY
850# ifdef MMHYPER_HEAP_FREE_POISON
851 /*
852 * Check poison.
853 */
854 unsigned i = RT_ELEMENTS(pHeap->aDelayedFrees);
855 while (i-- > 0)
856 if (pHeap->aDelayedFrees[i].offChunk)
857 {
858 PMMHYPERCHUNK pCur = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[i].offChunk);
859 const size_t cb = pCur->offNext
860 ? pCur->offNext - sizeof(*pCur)
861 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pCur - sizeof(*pCur);
862 uint8_t *pab = (uint8_t *)(pCur + 1);
863 for (unsigned off = 0; off < cb; off++)
864 AssertReleaseMsg(pab[off] == 0xCB,
865 ("caller=%RTptr cb=%#zx off=%#x: %.*Rhxs\n",
866 pHeap->aDelayedFrees[i].uCaller, cb, off, RT_MIN(cb - off, 32), &pab[off]));
867 }
868# endif /* MMHYPER_HEAP_FREE_POISON */
869
870 /*
871 * Delayed freeing.
872 */
873 int rc = VINF_SUCCESS;
874 if (pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk)
875 {
876 PMMHYPERCHUNK pChunkFree = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk);
877 rc = mmHyperFree(pHeap, pChunkFree);
878 }
879 pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk = (uintptr_t)pChunk - (uintptr_t)pHeap;
880 pHeap->aDelayedFrees[pHeap->iDelayedFree].uCaller = (uintptr_t)ASMReturnAddress();
881 pHeap->iDelayedFree = (pHeap->iDelayedFree + 1) % RT_ELEMENTS(pHeap->aDelayedFrees);
882
883#else /* !MMHYPER_HEAP_FREE_POISON */
884 /*
885 * Call the worker.
886 */
887 int rc = mmHyperFree(pHeap, pChunk);
888#endif /* !MMHYPER_HEAP_FREE_POISON */
889
890 /*
891 * Update statistics.
892 */
893#ifdef VBOX_WITH_STATISTICS
894 pStat->cFrees++;
895 if (RT_SUCCESS(rc))
896 {
897 pStat->cbFreed += cbChunk;
898 pStat->cbCurAllocated -= cbChunk;
899 }
900 else
901 pStat->cFailures++;
902#endif
903
904 return rc;
905}
906
907
908/**
909 * Wrapper for mmHyperFreeInternal
910 */
911VMMDECL(int) MMHyperFree(PVM pVM, void *pv)
912{
913 int rc;
914
915 rc = mmHyperLock(pVM);
916 AssertRCReturn(rc, rc);
917
918 rc = mmHyperFreeInternal(pVM, pv);
919
920 mmHyperUnlock(pVM);
921 return rc;
922}
923
924
925/**
926 * Free memory a memory chunk.
927 *
928 * @returns VBox status code.
929 * @param pHeap The heap.
930 * @param pChunk The memory chunk to free.
931 */
932static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk)
933{
934 Log3(("mmHyperFree: Enter pHeap=%p pChunk=%p\n", pHeap, pChunk));
935 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pChunk;
936
937 /*
938 * Insert into the free list (which is sorted on address).
939 *
940 * We'll search towards the end of the heap to locate the
941 * closest FREE chunk.
942 */
943 PMMHYPERCHUNKFREE pLeft = NULL;
944 PMMHYPERCHUNKFREE pRight = NULL;
945 if (pHeap->offFreeTail != NIL_OFFSET)
946 {
947 if (pFree->core.offNext)
948 {
949 pRight = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->core.offNext);
950 ASSERT_CHUNK(pHeap, &pRight->core);
951 while (!MMHYPERCHUNK_ISFREE(&pRight->core))
952 {
953 if (!pRight->core.offNext)
954 {
955 pRight = NULL;
956 break;
957 }
958 pRight = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->core.offNext);
959 ASSERT_CHUNK(pHeap, &pRight->core);
960 }
961 }
962 if (!pRight)
963 pRight = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail); /** @todo this can't be correct! 'pLeft = .. ; else' I think */
964 if (pRight)
965 {
966 ASSERT_CHUNK_FREE(pHeap, pRight);
967 if (pRight->offPrev)
968 {
969 pLeft = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->offPrev);
970 ASSERT_CHUNK_FREE(pHeap, pLeft);
971 }
972 }
973 }
974 if (pLeft == pFree)
975 {
976 AssertMsgFailed(("Freed twice! pv=%p (pChunk=%p)\n", pChunk + 1, pChunk));
977 return VERR_INVALID_POINTER;
978 }
979 pChunk->offStat = 0;
980
981 /*
982 * Head free chunk list?
983 */
984 if (!pLeft)
985 {
986 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
987 pFree->offPrev = 0;
988 pHeap->offFreeHead = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
989 if (pRight)
990 {
991 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
992 pRight->offPrev = -(int32_t)pFree->offNext;
993 }
994 else
995 {
996 pFree->offNext = 0;
997 pHeap->offFreeTail = pHeap->offFreeHead;
998 }
999 Log3(("mmHyperFree: Inserted %p at head of free chain.\n", pFree));
1000 }
1001 else
1002 {
1003 /*
1004 * Can we merge with left hand free chunk?
1005 */
1006 if ((char *)pLeft + pLeft->core.offNext == (char *)pFree)
1007 {
1008 if (pFree->core.offNext)
1009 {
1010 pLeft->core.offNext = pLeft->core.offNext + pFree->core.offNext;
1011 MMHYPERCHUNK_SET_OFFPREV(((PMMHYPERCHUNK)((char *)pLeft + pLeft->core.offNext)), -(int32_t)pLeft->core.offNext);
1012 }
1013 else
1014 pLeft->core.offNext = 0;
1015 pFree = pLeft;
1016 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pLeft->cb, -(int32_t)pLeft->cb));
1017 pHeap->cbFree -= pLeft->cb;
1018 Log3(("mmHyperFree: Merging %p into %p (cb=%d).\n", pFree, pLeft, pLeft->cb));
1019 }
1020 /*
1021 * No, just link it into the free list then.
1022 */
1023 else
1024 {
1025 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
1026 pFree->offPrev = (uintptr_t)pLeft - (uintptr_t)pFree;
1027 pLeft->offNext = -pFree->offPrev;
1028 if (pRight)
1029 {
1030 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
1031 pRight->offPrev = -(int32_t)pFree->offNext;
1032 }
1033 else
1034 {
1035 pFree->offNext = 0;
1036 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1037 }
1038 Log3(("mmHyperFree: Inserted %p after %p in free list.\n", pFree, pLeft));
1039 }
1040 }
1041
1042 /*
1043 * Can we merge with right hand free chunk?
1044 */
1045 if (pRight && (char *)pRight == (char *)pFree + pFree->core.offNext)
1046 {
1047 /* core */
1048 if (pRight->core.offNext)
1049 {
1050 pFree->core.offNext += pRight->core.offNext;
1051 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
1052 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
1053 ASSERT_CHUNK(pHeap, pNext);
1054 }
1055 else
1056 pFree->core.offNext = 0;
1057
1058 /* free */
1059 if (pRight->offNext)
1060 {
1061 pFree->offNext += pRight->offNext;
1062 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext))->offPrev = -(int32_t)pFree->offNext;
1063 }
1064 else
1065 {
1066 pFree->offNext = 0;
1067 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1068 }
1069 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pRight->cb, -(int32_t)pRight->cb));
1070 pHeap->cbFree -= pRight->cb;
1071 Log3(("mmHyperFree: Merged %p (cb=%d) into %p.\n", pRight, pRight->cb, pFree));
1072 }
1073
1074 /* calculate the size. */
1075 if (pFree->core.offNext)
1076 pFree->cb = pFree->core.offNext - sizeof(MMHYPERCHUNK);
1077 else
1078 pFree->cb = pHeap->offPageAligned - ((uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap)) - sizeof(MMHYPERCHUNK);
1079 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree + pFree->cb, pFree->cb));
1080 pHeap->cbFree += pFree->cb;
1081 ASSERT_CHUNK_FREE(pHeap, pFree);
1082
1083#ifdef MMHYPER_HEAP_STRICT
1084 mmHyperHeapCheck(pHeap);
1085#endif
1086 return VINF_SUCCESS;
1087}
1088
1089
1090#if defined(DEBUG) || defined(MMHYPER_HEAP_STRICT)
1091/**
1092 * Dumps a heap chunk to the log.
1093 *
1094 * @param pHeap Pointer to the heap.
1095 * @param pCur Pointer to the chunk.
1096 */
1097static void mmHyperHeapDumpOne(PMMHYPERHEAP pHeap, PMMHYPERCHUNKFREE pCur)
1098{
1099 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1100 {
1101 if (pCur->core.offStat)
1102 {
1103 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pCur + pCur->core.offStat);
1104 const char *pszSelf = pCur->core.offStat == sizeof(MMHYPERCHUNK) ? " stat record" : "";
1105#ifdef IN_RING3
1106 Log(("%p %06x USED offNext=%06x offPrev=-%06x %s%s\n",
1107 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1108 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1109 mmR3GetTagName((MMTAG)pStat->Core.Key), pszSelf));
1110#else
1111 Log(("%p %06x USED offNext=%06x offPrev=-%06x %d%s\n",
1112 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1113 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1114 (MMTAG)pStat->Core.Key, pszSelf));
1115#endif
1116 }
1117 else
1118 Log(("%p %06x USED offNext=%06x offPrev=-%06x\n",
1119 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1120 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1121 }
1122 else
1123 Log(("%p %06x FREE offNext=%06x offPrev=-%06x : cb=%06x offNext=%06x offPrev=-%06x\n",
1124 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1125 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core), pCur->cb, pCur->offNext, pCur->offPrev));
1126}
1127#endif /* DEBUG || MMHYPER_HEAP_STRICT */
1128
1129
1130#ifdef MMHYPER_HEAP_STRICT
1131/**
1132 * Internal consitency check.
1133 */
1134static void mmHyperHeapCheck(PMMHYPERHEAP pHeap)
1135{
1136 PMMHYPERCHUNKFREE pPrev = NULL;
1137 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)pHeap->CTX_SUFF(pbHeap);
1138 for (;;)
1139 {
1140 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1141 ASSERT_CHUNK_USED(pHeap, &pCur->core);
1142 else
1143 ASSERT_CHUNK_FREE(pHeap, pCur);
1144 if (pPrev)
1145 AssertMsg((int32_t)pPrev->core.offNext == -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1146 ("pPrev->core.offNext=%d offPrev=%d\n", pPrev->core.offNext, MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1147
1148# ifdef MMHYPER_HEAP_STRICT_FENCE
1149 uint32_t off = (uint8_t *)pCur - pHeap->CTX_SUFF(pbHeap);
1150 if ( MMHYPERCHUNK_ISUSED(&pCur->core)
1151 && off < pHeap->offPageAligned)
1152 {
1153 uint32_t cbCur = pCur->core.offNext
1154 ? pCur->core.offNext
1155 : pHeap->cbHeap - off;
1156 uint32_t *pu32End = ((uint32_t *)((uint8_t *)pCur + cbCur));
1157 uint32_t cbFence = pu32End[-1];
1158 if (RT_UNLIKELY( cbFence >= cbCur - sizeof(*pCur)
1159 || cbFence < MMHYPER_HEAP_STRICT_FENCE_SIZE))
1160 {
1161 mmHyperHeapDumpOne(pHeap, pCur);
1162 Assert(cbFence < cbCur - sizeof(*pCur));
1163 Assert(cbFence >= MMHYPER_HEAP_STRICT_FENCE_SIZE);
1164 }
1165
1166 uint32_t *pu32Bad = ASMMemIsAllU32((uint8_t *)pu32End - cbFence, cbFence - sizeof(uint32_t), MMHYPER_HEAP_STRICT_FENCE_U32);
1167 if (RT_UNLIKELY(pu32Bad))
1168 {
1169 mmHyperHeapDumpOne(pHeap, pCur);
1170 Assert(!pu32Bad);
1171 }
1172 }
1173# endif
1174
1175 /* next */
1176 if (!pCur->core.offNext)
1177 break;
1178 pPrev = pCur;
1179 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1180 }
1181}
1182#endif
1183
1184
1185/**
1186 * Performs consistency checks on the heap if MMHYPER_HEAP_STRICT was
1187 * defined at build time.
1188 *
1189 * @param pVM Pointer to the shared VM structure.
1190 */
1191VMMDECL(void) MMHyperHeapCheck(PVM pVM)
1192{
1193#ifdef MMHYPER_HEAP_STRICT
1194 int rc;
1195
1196 rc = mmHyperLock(pVM);
1197 AssertRC(rc);
1198 mmHyperHeapCheck(pVM->mm.s.CTX_SUFF(pHyperHeap));
1199 mmHyperUnlock(pVM);
1200#endif
1201}
1202
1203
1204#ifdef DEBUG
1205/**
1206 * Dumps the hypervisor heap to Log.
1207 * @param pVM VM Handle.
1208 */
1209VMMDECL(void) MMHyperHeapDump(PVM pVM)
1210{
1211 Log(("MMHyperHeapDump: *** heap dump - start ***\n"));
1212 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
1213 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)pHeap->CTX_SUFF(pbHeap);
1214 for (;;)
1215 {
1216 mmHyperHeapDumpOne(pHeap, pCur);
1217
1218 /* next */
1219 if (!pCur->core.offNext)
1220 break;
1221 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1222 }
1223 Log(("MMHyperHeapDump: *** heap dump - end ***\n"));
1224}
1225#endif
1226
1227
1228/**
1229 * Query the amount of free memory in the hypervisor heap.
1230 *
1231 * @returns Number of free bytes in the hypervisor heap.
1232 */
1233VMMDECL(size_t) MMHyperHeapGetFreeSize(PVM pVM)
1234{
1235 return pVM->mm.s.CTX_SUFF(pHyperHeap)->cbFree;
1236}
1237
1238/**
1239 * Query the size the hypervisor heap.
1240 *
1241 * @returns The size of the hypervisor heap in bytes.
1242 */
1243VMMDECL(size_t) MMHyperHeapGetSize(PVM pVM)
1244{
1245 return pVM->mm.s.CTX_SUFF(pHyperHeap)->cbHeap;
1246}
1247
1248
1249/**
1250 * Query the address and size the hypervisor memory area.
1251 *
1252 * @returns Base address of the hypervisor area.
1253 * @param pVM VM Handle.
1254 * @param pcb Where to store the size of the hypervisor area. (out)
1255 */
1256VMMDECL(RTGCPTR) MMHyperGetArea(PVM pVM, size_t *pcb)
1257{
1258 if (pcb)
1259 *pcb = pVM->mm.s.cbHyperArea;
1260 return pVM->mm.s.pvHyperAreaGC;
1261}
1262
1263
1264/**
1265 * Checks if an address is within the hypervisor memory area.
1266 *
1267 * @returns true if inside.
1268 * @returns false if outside.
1269 * @param pVM VM handle.
1270 * @param GCPtr The pointer to check.
1271 */
1272VMMDECL(bool) MMHyperIsInsideArea(PVM pVM, RTGCPTR GCPtr)
1273{
1274 return (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea;
1275}
1276
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette