VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp@ 91247

Last change on this file since 91247 was 90991, checked in by vboxsync, 3 years ago

VMM: Eliminated VMMCALLRING3_MMHYPER_LOCK. bugref:6695

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 49.4 KB
Line 
1/* $Id: MMAllHyper.cpp 90991 2021-08-30 09:49:20Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Hypervisor Memory Area, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_MM_HYPER_HEAP
23#include <VBox/vmm/mm.h>
24#include <VBox/vmm/stam.h>
25#include "MMInternal.h"
26#include <VBox/vmm/vmcc.h>
27
28#include <VBox/err.h>
29#include <VBox/param.h>
30#include <iprt/assert.h>
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/string.h>
34
35
36/*********************************************************************************************************************************
37* Defined Constants And Macros *
38*********************************************************************************************************************************/
39#define ASSERT_L(u1, u2) AssertMsg((u1) < (u2), ("u1=%#x u2=%#x\n", u1, u2))
40#define ASSERT_LE(u1, u2) AssertMsg((u1) <= (u2), ("u1=%#x u2=%#x\n", u1, u2))
41#define ASSERT_GE(u1, u2) AssertMsg((u1) >= (u2), ("u1=%#x u2=%#x\n", u1, u2))
42#define ASSERT_ALIGN(u1) AssertMsg(!((u1) & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("u1=%#x (%d)\n", u1, u1))
43
44#define ASSERT_OFFPREV(pHeap, pChunk) \
45 do { Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) <= 0); \
46 Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) >= (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
47 AssertMsg( MMHYPERCHUNK_GET_OFFPREV(pChunk) != 0 \
48 || (uint8_t *)(pChunk) == (pHeap)->CTX_SUFF(pbHeap), \
49 ("pChunk=%p pvHyperHeap=%p\n", (pChunk), (pHeap)->CTX_SUFF(pbHeap))); \
50 } while (0)
51
52#define ASSERT_OFFNEXT(pHeap, pChunk) \
53 do { ASSERT_ALIGN((pChunk)->offNext); \
54 ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
55 } while (0)
56
57#define ASSERT_OFFHEAP(pHeap, pChunk) \
58 do { Assert((pChunk)->offHeap); \
59 AssertMsg((PMMHYPERHEAP)((pChunk)->offHeap + (uintptr_t)pChunk) == (pHeap), \
60 ("offHeap=%RX32 pChunk=%p pHeap=%p\n", (pChunk)->offHeap, (pChunk), (pHeap))); \
61 Assert((pHeap)->u32Magic == MMHYPERHEAP_MAGIC); \
62 } while (0)
63
64#ifdef VBOX_WITH_STATISTICS
65#define ASSERT_OFFSTAT(pHeap, pChunk) \
66 do { if (MMHYPERCHUNK_ISFREE(pChunk)) \
67 Assert(!(pChunk)->offStat); \
68 else if ((pChunk)->offStat) \
69 { \
70 Assert((pChunk)->offStat); \
71 AssertMsg(!((pChunk)->offStat & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("offStat=%RX32\n", (pChunk)->offStat)); \
72 uintptr_t uPtr = (uintptr_t)(pChunk)->offStat + (uintptr_t)pChunk; NOREF(uPtr); \
73 AssertMsg(uPtr - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) < (pHeap)->offPageAligned, \
74 ("%p - %p < %RX32\n", uPtr, (pHeap)->CTX_SUFF(pbHeap), (pHeap)->offPageAligned)); \
75 } \
76 } while (0)
77#else
78#define ASSERT_OFFSTAT(pHeap, pChunk) \
79 do { Assert(!(pChunk)->offStat); \
80 } while (0)
81#endif
82
83#define ASSERT_CHUNK(pHeap, pChunk) \
84 do { ASSERT_OFFNEXT(pHeap, pChunk); \
85 ASSERT_OFFPREV(pHeap, pChunk); \
86 ASSERT_OFFHEAP(pHeap, pChunk); \
87 ASSERT_OFFSTAT(pHeap, pChunk); \
88 } while (0)
89#define ASSERT_CHUNK_USED(pHeap, pChunk) \
90 do { ASSERT_OFFNEXT(pHeap, pChunk); \
91 ASSERT_OFFPREV(pHeap, pChunk); \
92 Assert(MMHYPERCHUNK_ISUSED(pChunk)); \
93 } while (0)
94
95#define ASSERT_FREE_OFFPREV(pHeap, pChunk) \
96 do { ASSERT_ALIGN((pChunk)->offPrev); \
97 ASSERT_GE(((pChunk)->offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)), (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
98 Assert((pChunk)->offPrev != MMHYPERCHUNK_GET_OFFPREV(&(pChunk)->core) || !(pChunk)->offPrev); \
99 AssertMsg( (pChunk)->offPrev \
100 || (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) == (pHeap)->offFreeHead, \
101 ("pChunk=%p offChunk=%#x offFreeHead=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap),\
102 (pHeap)->offFreeHead)); \
103 } while (0)
104
105#define ASSERT_FREE_OFFNEXT(pHeap, pChunk) \
106 do { ASSERT_ALIGN((pChunk)->offNext); \
107 ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
108 Assert((pChunk)->offNext != (pChunk)->core.offNext || !(pChunk)->offNext); \
109 AssertMsg( (pChunk)->offNext \
110 || (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) == (pHeap)->offFreeTail, \
111 ("pChunk=%p offChunk=%#x offFreeTail=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap), \
112 (pHeap)->offFreeTail)); \
113 } while (0)
114
115#define ASSERT_FREE_CB(pHeap, pChunk) \
116 do { ASSERT_ALIGN((pChunk)->cb); \
117 Assert((pChunk)->cb > 0); \
118 if ((pChunk)->core.offNext) \
119 AssertMsg((pChunk)->cb == ((pChunk)->core.offNext - sizeof(MMHYPERCHUNK)), \
120 ("cb=%d offNext=%d\n", (pChunk)->cb, (pChunk)->core.offNext)); \
121 else \
122 ASSERT_LE((pChunk)->cb, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
123 } while (0)
124
125#define ASSERT_CHUNK_FREE(pHeap, pChunk) \
126 do { ASSERT_CHUNK(pHeap, &(pChunk)->core); \
127 Assert(MMHYPERCHUNK_ISFREE(pChunk)); \
128 ASSERT_FREE_OFFNEXT(pHeap, pChunk); \
129 ASSERT_FREE_OFFPREV(pHeap, pChunk); \
130 ASSERT_FREE_CB(pHeap, pChunk); \
131 } while (0)
132
133
134/*********************************************************************************************************************************
135* Internal Functions *
136*********************************************************************************************************************************/
137static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment);
138static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb);
139#ifdef VBOX_WITH_STATISTICS
140static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag);
141#ifdef IN_RING3
142static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat);
143#endif
144#endif
145static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk);
146#ifdef MMHYPER_HEAP_STRICT
147static void mmHyperHeapCheck(PMMHYPERHEAP pHeap);
148#endif
149
150
151
152/**
153 * Locks the hypervisor heap.
154 * This might call back to Ring-3 in order to deal with lock contention in GC and R3.
155 *
156 * @param pVM The cross context VM structure.
157 */
158static int mmHyperLock(PVMCC pVM)
159{
160 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
161
162#ifdef IN_RING3
163 if (!PDMCritSectIsInitialized(&pHeap->Lock))
164 return VINF_SUCCESS; /* early init */
165#else
166 Assert(PDMCritSectIsInitialized(&pHeap->Lock));
167#endif
168 int rc = PDMCritSectEnter(pVM, &pHeap->Lock, VINF_SUCCESS);
169 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pHeap->Lock, rc);
170 return rc;
171}
172
173
174/**
175 * Unlocks the hypervisor heap.
176 *
177 * @param pVM The cross context VM structure.
178 */
179static void mmHyperUnlock(PVMCC pVM)
180{
181 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
182
183#ifdef IN_RING3
184 if (!PDMCritSectIsInitialized(&pHeap->Lock))
185 return; /* early init */
186#endif
187 Assert(PDMCritSectIsInitialized(&pHeap->Lock));
188 PDMCritSectLeave(pVM, &pHeap->Lock);
189}
190
191/**
192 * Allocates memory in the Hypervisor (RC VMM) area.
193 * The returned memory is of course zeroed.
194 *
195 * @returns VBox status code.
196 * @param pVM The cross context VM structure.
197 * @param cb Number of bytes to allocate.
198 * @param uAlignment Required memory alignment in bytes.
199 * Values are 0,8,16,32,64 and PAGE_SIZE.
200 * 0 -> default alignment, i.e. 8 bytes.
201 * @param enmTag The statistics tag.
202 * @param ppv Where to store the address to the allocated
203 * memory.
204 */
205static int mmHyperAllocInternal(PVM pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
206{
207 AssertMsg(cb >= 8, ("Hey! Do you really mean to allocate less than 8 bytes?! cb=%d\n", cb));
208
209 /*
210 * Validate input and adjust it to reasonable values.
211 */
212 if (!uAlignment || uAlignment < MMHYPER_HEAP_ALIGN_MIN)
213 uAlignment = MMHYPER_HEAP_ALIGN_MIN;
214 uint32_t cbAligned;
215 switch (uAlignment)
216 {
217 case 8:
218 case 16:
219 case 32:
220 case 64:
221 cbAligned = RT_ALIGN_32(cb, MMHYPER_HEAP_ALIGN_MIN);
222 if (!cbAligned || cbAligned < cb)
223 {
224 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
225 AssertMsgFailed(("Nice try.\n"));
226 return VERR_INVALID_PARAMETER;
227 }
228 break;
229
230 case PAGE_SIZE:
231 AssertMsg(RT_ALIGN_32(cb, PAGE_SIZE) == cb, ("The size isn't page aligned. (cb=%#x)\n", cb));
232 cbAligned = RT_ALIGN_32(cb, PAGE_SIZE);
233 if (!cbAligned)
234 {
235 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
236 AssertMsgFailed(("Nice try.\n"));
237 return VERR_INVALID_PARAMETER;
238 }
239 break;
240
241 default:
242 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_INVALID_PARAMETER\n", cb, uAlignment));
243 AssertMsgFailed(("Invalid alignment %u\n", uAlignment));
244 return VERR_INVALID_PARAMETER;
245 }
246
247
248 /*
249 * Get heap and statisticsStatistics.
250 */
251 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
252#ifdef VBOX_WITH_STATISTICS
253 PMMHYPERSTAT pStat = mmHyperStat(pHeap, enmTag);
254 if (!pStat)
255 {
256 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
257 AssertMsgFailed(("Failed to allocate statistics!\n"));
258 return VERR_MM_HYPER_NO_MEMORY;
259 }
260#else
261 NOREF(enmTag);
262#endif
263 if (uAlignment < PAGE_SIZE)
264 {
265 /*
266 * Allocate a chunk.
267 */
268 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, cbAligned, uAlignment);
269 if (pChunk)
270 {
271#ifdef VBOX_WITH_STATISTICS
272 const uint32_t cbChunk = pChunk->offNext
273 ? pChunk->offNext
274 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
275 pStat->cbAllocated += (uint32_t)cbChunk;
276 pStat->cbCurAllocated += (uint32_t)cbChunk;
277 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
278 pStat->cbMaxAllocated = pStat->cbCurAllocated;
279 pStat->cAllocations++;
280 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
281#else
282 pChunk->offStat = 0;
283#endif
284 void *pv = pChunk + 1;
285 *ppv = pv;
286 ASMMemZero32(pv, cbAligned);
287 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, pv));
288 return VINF_SUCCESS;
289 }
290 }
291 else
292 {
293 /*
294 * Allocate page aligned memory.
295 */
296 void *pv = mmHyperAllocPages(pHeap, cbAligned);
297 if (pv)
298 {
299#ifdef VBOX_WITH_STATISTICS
300 pStat->cbAllocated += cbAligned;
301 pStat->cbCurAllocated += cbAligned;
302 if (pStat->cbCurAllocated > pStat->cbMaxAllocated)
303 pStat->cbMaxAllocated = pStat->cbCurAllocated;
304 pStat->cAllocations++;
305#endif
306 *ppv = pv;
307 /* ASMMemZero32(pv, cbAligned); - not required since memory is alloc-only and SUPR3PageAlloc zeros it. */
308 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, ppv));
309 return VINF_SUCCESS;
310 }
311 }
312
313#ifdef VBOX_WITH_STATISTICS
314 pStat->cAllocations++;
315 pStat->cFailures++;
316#endif
317 Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VERR_MM_HYPER_NO_MEMORY\n", cb, uAlignment));
318 AssertMsgFailed(("Failed to allocate %d bytes!\n", cb));
319 return VERR_MM_HYPER_NO_MEMORY;
320}
321
322
323/**
324 * Wrapper for mmHyperAllocInternal
325 */
326VMMDECL(int) MMHyperAlloc(PVMCC pVM, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
327{
328 int rc = mmHyperLock(pVM);
329 AssertRCReturn(rc, rc);
330
331 LogFlow(("MMHyperAlloc %x align=%x tag=%s\n", cb, uAlignment, mmGetTagName(enmTag)));
332
333 rc = mmHyperAllocInternal(pVM, cb, uAlignment, enmTag, ppv);
334
335 mmHyperUnlock(pVM);
336 return rc;
337}
338
339
340/**
341 * Duplicates a block of memory.
342 *
343 * @returns VBox status code.
344 * @param pVM The cross context VM structure.
345 * @param pvSrc The source memory block to copy from.
346 * @param cb Size of the source memory block.
347 * @param uAlignment Required memory alignment in bytes.
348 * Values are 0,8,16,32,64 and PAGE_SIZE.
349 * 0 -> default alignment, i.e. 8 bytes.
350 * @param enmTag The statistics tag.
351 * @param ppv Where to store the address to the allocated
352 * memory.
353 */
354VMMDECL(int) MMHyperDupMem(PVMCC pVM, const void *pvSrc, size_t cb, unsigned uAlignment, MMTAG enmTag, void **ppv)
355{
356 int rc = MMHyperAlloc(pVM, cb, uAlignment, enmTag, ppv);
357 if (RT_SUCCESS(rc))
358 memcpy(*ppv, pvSrc, cb);
359 return rc;
360}
361
362
363/**
364 * Allocates a chunk of memory from the specified heap.
365 * The caller validates the parameters of this request.
366 *
367 * @returns Pointer to the allocated chunk.
368 * @returns NULL on failure.
369 * @param pHeap The heap.
370 * @param cb Size of the memory block to allocate.
371 * @param uAlignment The alignment specifications for the allocated block.
372 * @internal
373 */
374static PMMHYPERCHUNK mmHyperAllocChunk(PMMHYPERHEAP pHeap, uint32_t cb, unsigned uAlignment)
375{
376 Log3(("mmHyperAllocChunk: Enter cb=%#x uAlignment=%#x\n", cb, uAlignment));
377#ifdef MMHYPER_HEAP_STRICT
378 mmHyperHeapCheck(pHeap);
379#endif
380#ifdef MMHYPER_HEAP_STRICT_FENCE
381 uint32_t cbFence = RT_MAX(MMHYPER_HEAP_STRICT_FENCE_SIZE, uAlignment);
382 cb += cbFence;
383#endif
384
385 /*
386 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
387 */
388 if (pHeap->offFreeHead == NIL_OFFSET)
389 return NULL;
390
391 /*
392 * Small alignments - from the front of the heap.
393 *
394 * Must split off free chunks at the end to prevent messing up the
395 * last free node which we take the page aligned memory from the top of.
396 */
397 PMMHYPERCHUNK pRet = NULL;
398 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeHead);
399 while (pFree)
400 {
401 ASSERT_CHUNK_FREE(pHeap, pFree);
402 if (pFree->cb >= cb)
403 {
404 unsigned offAlign = (uintptr_t)(&pFree->core + 1) & (uAlignment - 1);
405 if (offAlign)
406 offAlign = uAlignment - offAlign;
407 if (!offAlign || pFree->cb - offAlign >= cb)
408 {
409 Log3(("mmHyperAllocChunk: Using pFree=%p pFree->cb=%d offAlign=%d\n", pFree, pFree->cb, offAlign));
410
411 /*
412 * Adjust the node in front.
413 * Because of multiple alignments we need to special case allocation of the first block.
414 */
415 if (offAlign)
416 {
417 MMHYPERCHUNKFREE Free = *pFree;
418 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
419 {
420 /* just add a bit of memory to it. */
421 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&Free.core));
422 pPrev->core.offNext += offAlign;
423 AssertMsg(!MMHYPERCHUNK_ISFREE(&pPrev->core), ("Impossible!\n"));
424 Log3(("mmHyperAllocChunk: Added %d bytes to %p\n", offAlign, pPrev));
425 }
426 else
427 {
428 /* make new head node, mark it USED for simplicity. */
429 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)pHeap->CTX_SUFF(pbHeap);
430 Assert(pPrev == &pFree->core);
431 pPrev->offPrev = 0;
432 MMHYPERCHUNK_SET_TYPE(pPrev, MMHYPERCHUNK_FLAGS_USED);
433 pPrev->offNext = offAlign;
434 Log3(("mmHyperAllocChunk: Created new first node of %d bytes\n", offAlign));
435
436 }
437 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - offAlign, -(int)offAlign));
438 pHeap->cbFree -= offAlign;
439
440 /* Recreate pFree node and adjusting everything... */
441 pFree = (PMMHYPERCHUNKFREE)((char *)pFree + offAlign);
442 *pFree = Free;
443
444 pFree->cb -= offAlign;
445 if (pFree->core.offNext)
446 {
447 pFree->core.offNext -= offAlign;
448 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
449 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
450 ASSERT_CHUNK(pHeap, pNext);
451 }
452 if (MMHYPERCHUNK_GET_OFFPREV(&pFree->core))
453 MMHYPERCHUNK_SET_OFFPREV(&pFree->core, MMHYPERCHUNK_GET_OFFPREV(&pFree->core) - offAlign);
454
455 if (pFree->offNext)
456 {
457 pFree->offNext -= offAlign;
458 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
459 pNext->offPrev = -(int32_t)pFree->offNext;
460 ASSERT_CHUNK_FREE(pHeap, pNext);
461 }
462 else
463 pHeap->offFreeTail += offAlign;
464 if (pFree->offPrev)
465 {
466 pFree->offPrev -= offAlign;
467 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
468 pPrev->offNext = -pFree->offPrev;
469 ASSERT_CHUNK_FREE(pHeap, pPrev);
470 }
471 else
472 pHeap->offFreeHead += offAlign;
473 pFree->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pFree;
474 pFree->core.offStat = 0;
475 ASSERT_CHUNK_FREE(pHeap, pFree);
476 Log3(("mmHyperAllocChunk: Realigned pFree=%p\n", pFree));
477 }
478
479 /*
480 * Split off a new FREE chunk?
481 */
482 if (pFree->cb >= cb + RT_ALIGN(sizeof(MMHYPERCHUNKFREE), MMHYPER_HEAP_ALIGN_MIN))
483 {
484 /*
485 * Move the FREE chunk up to make room for the new USED chunk.
486 */
487 const int off = cb + sizeof(MMHYPERCHUNK);
488 PMMHYPERCHUNKFREE pNew = (PMMHYPERCHUNKFREE)((char *)&pFree->core + off);
489 *pNew = *pFree;
490 pNew->cb -= off;
491 if (pNew->core.offNext)
492 {
493 pNew->core.offNext -= off;
494 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pNew + pNew->core.offNext);
495 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pNew->core.offNext);
496 ASSERT_CHUNK(pHeap, pNext);
497 }
498 pNew->core.offPrev = -off;
499 MMHYPERCHUNK_SET_TYPE(pNew, MMHYPERCHUNK_FLAGS_FREE);
500
501 if (pNew->offNext)
502 {
503 pNew->offNext -= off;
504 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offNext);
505 pNext->offPrev = -(int32_t)pNew->offNext;
506 ASSERT_CHUNK_FREE(pHeap, pNext);
507 }
508 else
509 pHeap->offFreeTail += off;
510 if (pNew->offPrev)
511 {
512 pNew->offPrev -= off;
513 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pNew + pNew->offPrev);
514 pPrev->offNext = -pNew->offPrev;
515 ASSERT_CHUNK_FREE(pHeap, pPrev);
516 }
517 else
518 pHeap->offFreeHead += off;
519 pNew->core.offHeap = (uintptr_t)pHeap - (uintptr_t)pNew;
520 pNew->core.offStat = 0;
521 ASSERT_CHUNK_FREE(pHeap, pNew);
522
523 /*
524 * Update the old FREE node making it a USED node.
525 */
526 pFree->core.offNext = off;
527 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
528
529
530 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
531 pHeap->cbFree - (cb + sizeof(MMHYPERCHUNK)), -(int)(cb + sizeof(MMHYPERCHUNK))));
532 pHeap->cbFree -= (uint32_t)(cb + sizeof(MMHYPERCHUNK));
533 pRet = &pFree->core;
534 ASSERT_CHUNK(pHeap, &pFree->core);
535 Log3(("mmHyperAllocChunk: Created free chunk pNew=%p cb=%d\n", pNew, pNew->cb));
536 }
537 else
538 {
539 /*
540 * Link out of free list.
541 */
542 if (pFree->offNext)
543 {
544 PMMHYPERCHUNKFREE pNext = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext);
545 if (pFree->offPrev)
546 {
547 pNext->offPrev += pFree->offPrev;
548 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
549 pPrev->offNext += pFree->offNext;
550 ASSERT_CHUNK_FREE(pHeap, pPrev);
551 }
552 else
553 {
554 pHeap->offFreeHead += pFree->offNext;
555 pNext->offPrev = 0;
556 }
557 ASSERT_CHUNK_FREE(pHeap, pNext);
558 }
559 else
560 {
561 if (pFree->offPrev)
562 {
563 pHeap->offFreeTail += pFree->offPrev;
564 PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev);
565 pPrev->offNext = 0;
566 ASSERT_CHUNK_FREE(pHeap, pPrev);
567 }
568 else
569 {
570 pHeap->offFreeHead = NIL_OFFSET;
571 pHeap->offFreeTail = NIL_OFFSET;
572 }
573 }
574
575 Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree,
576 pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
577 pHeap->cbFree -= pFree->cb;
578 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_USED);
579 pRet = &pFree->core;
580 ASSERT_CHUNK(pHeap, &pFree->core);
581 Log3(("mmHyperAllocChunk: Converted free chunk %p to used chunk.\n", pFree));
582 }
583 Log3(("mmHyperAllocChunk: Returning %p\n", pRet));
584 break;
585 }
586 }
587
588 /* next */
589 pFree = pFree->offNext ? (PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext) : NULL;
590 }
591
592#ifdef MMHYPER_HEAP_STRICT_FENCE
593 uint32_t *pu32End = (uint32_t *)((uint8_t *)(pRet + 1) + cb);
594 uint32_t *pu32EndReal = pRet->offNext
595 ? (uint32_t *)((uint8_t *)pRet + pRet->offNext)
596 : (uint32_t *)(pHeap->CTX_SUFF(pbHeap) + pHeap->cbHeap);
597 cbFence += (uintptr_t)pu32EndReal - (uintptr_t)pu32End; Assert(!(cbFence & 0x3));
598 ASMMemFill32((uint8_t *)pu32EndReal - cbFence, cbFence, MMHYPER_HEAP_STRICT_FENCE_U32);
599 pu32EndReal[-1] = cbFence;
600#endif
601#ifdef MMHYPER_HEAP_STRICT
602 mmHyperHeapCheck(pHeap);
603#endif
604 return pRet;
605}
606
607
608/**
609 * Allocates one or more pages of memory from the specified heap.
610 * The caller validates the parameters of this request.
611 *
612 * @returns Pointer to the allocated chunk.
613 * @returns NULL on failure.
614 * @param pHeap The heap.
615 * @param cb Size of the memory block to allocate.
616 * @internal
617 */
618static void *mmHyperAllocPages(PMMHYPERHEAP pHeap, uint32_t cb)
619{
620 Log3(("mmHyperAllocPages: Enter cb=%#x\n", cb));
621
622#ifdef MMHYPER_HEAP_STRICT
623 mmHyperHeapCheck(pHeap);
624#endif
625
626 /*
627 * Check if there are any free chunks. (NIL_OFFSET use/not-use forces this check)
628 */
629 if (pHeap->offFreeHead == NIL_OFFSET)
630 return NULL;
631
632 /*
633 * Page aligned chunks.
634 *
635 * Page aligned chunks can only be allocated from the last FREE chunk.
636 * This is for reasons of simplicity and fragmentation. Page aligned memory
637 * must also be allocated in page aligned sizes. Page aligned memory cannot
638 * be freed either.
639 *
640 * So, for this to work, the last FREE chunk needs to end on a page aligned
641 * boundary.
642 */
643 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail);
644 ASSERT_CHUNK_FREE(pHeap, pFree);
645 if ( (((uintptr_t)(&pFree->core + 1) + pFree->cb) & (PAGE_OFFSET_MASK - 1))
646 || pFree->cb + sizeof(MMHYPERCHUNK) < cb)
647 {
648 Log3(("mmHyperAllocPages: Not enough/no page aligned memory!\n"));
649 return NULL;
650 }
651
652 void *pvRet;
653 if (pFree->cb > cb)
654 {
655 /*
656 * Simple, just cut the top of the free node and return it.
657 */
658 pFree->cb -= cb;
659 pvRet = (char *)(&pFree->core + 1) + pFree->cb;
660 AssertMsg(RT_ALIGN_P(pvRet, PAGE_SIZE) == pvRet, ("pvRet=%p cb=%#x pFree=%p pFree->cb=%#x\n", pvRet, cb, pFree, pFree->cb));
661 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - cb, -(int)cb));
662 pHeap->cbFree -= cb;
663 ASSERT_CHUNK_FREE(pHeap, pFree);
664 Log3(("mmHyperAllocPages: Allocated from pFree=%p new pFree->cb=%d\n", pFree, pFree->cb));
665 }
666 else
667 {
668 /*
669 * Unlink the FREE node.
670 */
671 pvRet = (char *)(&pFree->core + 1) + pFree->cb - cb;
672 Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
673 pHeap->cbFree -= pFree->cb;
674
675 /* a scrap of spare memory (unlikely)? add it to the sprevious chunk. */
676 if (pvRet != (void *)pFree)
677 {
678 AssertMsg(MMHYPERCHUNK_GET_OFFPREV(&pFree->core), ("How the *beep* did someone manage to allocated up all the heap with page aligned memory?!?\n"));
679 PMMHYPERCHUNK pPrev = (PMMHYPERCHUNK)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&pFree->core));
680 pPrev->offNext += (uintptr_t)pvRet - (uintptr_t)pFree;
681 AssertMsg(!MMHYPERCHUNK_ISFREE(pPrev), ("Free bug?\n"));
682#ifdef VBOX_WITH_STATISTICS
683 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pPrev + pPrev->offStat);
684 pStat->cbAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
685 pStat->cbCurAllocated += (uintptr_t)pvRet - (uintptr_t)pFree;
686#endif
687 Log3(("mmHyperAllocPages: Added %d to %p (page align)\n", (uintptr_t)pvRet - (uintptr_t)pFree, pFree));
688 }
689
690 /* unlink from FREE chain. */
691 if (pFree->offPrev)
692 {
693 pHeap->offFreeTail += pFree->offPrev;
694 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offPrev))->offNext = 0;
695 }
696 else
697 {
698 pHeap->offFreeTail = NIL_OFFSET;
699 pHeap->offFreeHead = NIL_OFFSET;
700 }
701 Log3(("mmHyperAllocPages: Unlinked pFree=%d\n", pFree));
702 }
703 pHeap->offPageAligned = (uintptr_t)pvRet - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
704 Log3(("mmHyperAllocPages: Returning %p (page aligned)\n", pvRet));
705
706#ifdef MMHYPER_HEAP_STRICT
707 mmHyperHeapCheck(pHeap);
708#endif
709 return pvRet;
710}
711
712#ifdef VBOX_WITH_STATISTICS
713
714/**
715 * Get the statistic record for a tag.
716 *
717 * @returns Pointer to a stat record.
718 * @returns NULL on failure.
719 * @param pHeap The heap.
720 * @param enmTag The tag.
721 */
722static PMMHYPERSTAT mmHyperStat(PMMHYPERHEAP pHeap, MMTAG enmTag)
723{
724 /* try look it up first. */
725 PMMHYPERSTAT pStat = (PMMHYPERSTAT)RTAvloGCPhysGet(&pHeap->HyperHeapStatTree, enmTag);
726 if (!pStat)
727 {
728 /* try allocate a new one */
729 PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, RT_ALIGN(sizeof(*pStat), MMHYPER_HEAP_ALIGN_MIN), MMHYPER_HEAP_ALIGN_MIN);
730 if (!pChunk)
731 return NULL;
732 pStat = (PMMHYPERSTAT)(pChunk + 1);
733 pChunk->offStat = (uintptr_t)pStat - (uintptr_t)pChunk;
734
735 ASMMemZero32(pStat, sizeof(*pStat));
736 pStat->Core.Key = enmTag;
737 RTAvloGCPhysInsert(&pHeap->HyperHeapStatTree, &pStat->Core);
738 }
739 if (!pStat->fRegistered)
740 {
741# ifdef IN_RING3
742 mmR3HyperStatRegisterOne(pHeap->pVMR3, pStat);
743# else
744 /** @todo schedule a R3 action. */
745# endif
746 }
747 return pStat;
748}
749
750
751# ifdef IN_RING3
752/**
753 * Registers statistics with STAM.
754 *
755 */
756static void mmR3HyperStatRegisterOne(PVM pVM, PMMHYPERSTAT pStat)
757{
758 if (pStat->fRegistered)
759 return;
760 const char *pszTag = mmGetTagName((MMTAG)pStat->Core.Key);
761 STAMR3RegisterF(pVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Number of bytes currently allocated.", "/MM/HyperHeap/%s", pszTag);
762 STAMR3RegisterF(pVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of alloc calls.", "/MM/HyperHeap/%s/cAllocations", pszTag);
763 STAMR3RegisterF(pVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of free calls.", "/MM/HyperHeap/%s/cFrees", pszTag);
764 STAMR3RegisterF(pVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of failures.", "/MM/HyperHeap/%s/cFailures", pszTag);
765 STAMR3RegisterF(pVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of allocated bytes.", "/MM/HyperHeap/%s/cbAllocated", pszTag);
766 STAMR3RegisterF(pVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of freed bytes.", "/MM/HyperHeap/%s/cbFreed", pszTag);
767 STAMR3RegisterF(pVM, &pStat->cbMaxAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max number of bytes allocated at the same time.","/MM/HyperHeap/%s/cbMaxAllocated", pszTag);
768 pStat->fRegistered = true;
769}
770# endif /* IN_RING3 */
771
772#endif /* VBOX_WITH_STATISTICS */
773
774
775/**
776 * Free memory allocated using MMHyperAlloc().
777 * The caller validates the parameters of this request.
778 *
779 * @returns VBox status code.
780 * @param pVM The cross context VM structure.
781 * @param pv The memory to free.
782 * @remark Try avoid free hyper memory.
783 */
784static int mmHyperFreeInternal(PVM pVM, void *pv)
785{
786 Log2(("MMHyperFree: pv=%p\n", pv));
787 if (!pv)
788 return VINF_SUCCESS;
789 AssertMsgReturn(RT_ALIGN_P(pv, MMHYPER_HEAP_ALIGN_MIN) == pv,
790 ("Invalid pointer %p!\n", pv),
791 VERR_INVALID_POINTER);
792
793 /*
794 * Get the heap and stats.
795 * Validate the chunk at the same time.
796 */
797 PMMHYPERCHUNK pChunk = (PMMHYPERCHUNK)((PMMHYPERCHUNK)pv - 1);
798
799 AssertMsgReturn( (uintptr_t)pChunk + pChunk->offNext >= (uintptr_t)pChunk
800 || RT_ALIGN_32(pChunk->offNext, MMHYPER_HEAP_ALIGN_MIN) != pChunk->offNext,
801 ("%p: offNext=%#RX32\n", pv, pChunk->offNext),
802 VERR_INVALID_POINTER);
803
804 AssertMsgReturn(MMHYPERCHUNK_ISUSED(pChunk),
805 ("%p: Not used!\n", pv),
806 VERR_INVALID_POINTER);
807
808 int32_t offPrev = MMHYPERCHUNK_GET_OFFPREV(pChunk);
809 AssertMsgReturn( (uintptr_t)pChunk + offPrev <= (uintptr_t)pChunk
810 && !((uint32_t)-offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)),
811 ("%p: offPrev=%#RX32!\n", pv, offPrev),
812 VERR_INVALID_POINTER);
813
814 /* statistics */
815#ifdef VBOX_WITH_STATISTICS
816 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pChunk + pChunk->offStat);
817 AssertMsgReturn( RT_ALIGN_P(pStat, MMHYPER_HEAP_ALIGN_MIN) == (void *)pStat
818 && pChunk->offStat,
819 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
820 VERR_INVALID_POINTER);
821#else
822 AssertMsgReturn(!pChunk->offStat,
823 ("%p: offStat=%#RX32!\n", pv, pChunk->offStat),
824 VERR_INVALID_POINTER);
825#endif
826
827 /* The heap structure. */
828 PMMHYPERHEAP pHeap = (PMMHYPERHEAP)((uintptr_t)pChunk + pChunk->offHeap);
829 AssertMsgReturn( !((uintptr_t)pHeap & PAGE_OFFSET_MASK)
830 && pChunk->offHeap,
831 ("%p: pHeap=%#x offHeap=%RX32\n", pv, pHeap->u32Magic, pChunk->offHeap),
832 VERR_INVALID_POINTER);
833
834 AssertMsgReturn(pHeap->u32Magic == MMHYPERHEAP_MAGIC,
835 ("%p: u32Magic=%#x\n", pv, pHeap->u32Magic),
836 VERR_INVALID_POINTER);
837 Assert(pHeap == pVM->mm.s.CTX_SUFF(pHyperHeap)); NOREF(pVM);
838
839 /* Some more verifications using additional info from pHeap. */
840 AssertMsgReturn((uintptr_t)pChunk + offPrev >= (uintptr_t)pHeap->CTX_SUFF(pbHeap),
841 ("%p: offPrev=%#RX32!\n", pv, offPrev),
842 VERR_INVALID_POINTER);
843
844 AssertMsgReturn(pChunk->offNext < pHeap->cbHeap,
845 ("%p: offNext=%#RX32!\n", pv, pChunk->offNext),
846 VERR_INVALID_POINTER);
847
848 AssertMsgReturn( (uintptr_t)pv - (uintptr_t)pHeap->CTX_SUFF(pbHeap) <= pHeap->offPageAligned,
849 ("Invalid pointer %p! (heap: %p-%p)\n", pv, pHeap->CTX_SUFF(pbHeap),
850 (char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned),
851 VERR_INVALID_POINTER);
852
853#ifdef MMHYPER_HEAP_STRICT
854 mmHyperHeapCheck(pHeap);
855#endif
856
857#if defined(VBOX_WITH_STATISTICS) || defined(MMHYPER_HEAP_FREE_POISON)
858 /* calc block size. */
859 const uint32_t cbChunk = pChunk->offNext
860 ? pChunk->offNext
861 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pChunk;
862#endif
863#ifdef MMHYPER_HEAP_FREE_POISON
864 /* poison the block */
865 memset(pChunk + 1, MMHYPER_HEAP_FREE_POISON, cbChunk - sizeof(*pChunk));
866#endif
867
868#ifdef MMHYPER_HEAP_FREE_DELAY
869# ifdef MMHYPER_HEAP_FREE_POISON
870 /*
871 * Check poison.
872 */
873 unsigned i = RT_ELEMENTS(pHeap->aDelayedFrees);
874 while (i-- > 0)
875 if (pHeap->aDelayedFrees[i].offChunk)
876 {
877 PMMHYPERCHUNK pCur = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[i].offChunk);
878 const size_t cb = pCur->offNext
879 ? pCur->offNext - sizeof(*pCur)
880 : pHeap->CTX_SUFF(pbHeap) + pHeap->offPageAligned - (uint8_t *)pCur - sizeof(*pCur);
881 uint8_t *pab = (uint8_t *)(pCur + 1);
882 for (unsigned off = 0; off < cb; off++)
883 AssertReleaseMsg(pab[off] == 0xCB,
884 ("caller=%RTptr cb=%#zx off=%#x: %.*Rhxs\n",
885 pHeap->aDelayedFrees[i].uCaller, cb, off, RT_MIN(cb - off, 32), &pab[off]));
886 }
887# endif /* MMHYPER_HEAP_FREE_POISON */
888
889 /*
890 * Delayed freeing.
891 */
892 int rc = VINF_SUCCESS;
893 if (pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk)
894 {
895 PMMHYPERCHUNK pChunkFree = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk);
896 rc = mmHyperFree(pHeap, pChunkFree);
897 }
898 pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk = (uintptr_t)pChunk - (uintptr_t)pHeap;
899 pHeap->aDelayedFrees[pHeap->iDelayedFree].uCaller = (uintptr_t)ASMReturnAddress();
900 pHeap->iDelayedFree = (pHeap->iDelayedFree + 1) % RT_ELEMENTS(pHeap->aDelayedFrees);
901
902#else /* !MMHYPER_HEAP_FREE_POISON */
903 /*
904 * Call the worker.
905 */
906 int rc = mmHyperFree(pHeap, pChunk);
907#endif /* !MMHYPER_HEAP_FREE_POISON */
908
909 /*
910 * Update statistics.
911 */
912#ifdef VBOX_WITH_STATISTICS
913 pStat->cFrees++;
914 if (RT_SUCCESS(rc))
915 {
916 pStat->cbFreed += cbChunk;
917 pStat->cbCurAllocated -= cbChunk;
918 }
919 else
920 pStat->cFailures++;
921#endif
922
923 return rc;
924}
925
926
927/**
928 * Wrapper for mmHyperFreeInternal
929 */
930VMMDECL(int) MMHyperFree(PVMCC pVM, void *pv)
931{
932 int rc = mmHyperLock(pVM);
933 AssertRCReturn(rc, rc);
934
935 LogFlow(("MMHyperFree %p\n", pv));
936
937 rc = mmHyperFreeInternal(pVM, pv);
938
939 mmHyperUnlock(pVM);
940 return rc;
941}
942
943
944/**
945 * Free memory a memory chunk.
946 *
947 * @returns VBox status code.
948 * @param pHeap The heap.
949 * @param pChunk The memory chunk to free.
950 */
951static int mmHyperFree(PMMHYPERHEAP pHeap, PMMHYPERCHUNK pChunk)
952{
953 Log3(("mmHyperFree: Enter pHeap=%p pChunk=%p\n", pHeap, pChunk));
954 PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)pChunk;
955
956 /*
957 * Insert into the free list (which is sorted on address).
958 *
959 * We'll search towards the end of the heap to locate the
960 * closest FREE chunk.
961 */
962 PMMHYPERCHUNKFREE pLeft = NULL;
963 PMMHYPERCHUNKFREE pRight = NULL;
964 if (pHeap->offFreeTail != NIL_OFFSET)
965 {
966 if (pFree->core.offNext)
967 {
968 pRight = (PMMHYPERCHUNKFREE)((char *)pFree + pFree->core.offNext);
969 ASSERT_CHUNK(pHeap, &pRight->core);
970 while (!MMHYPERCHUNK_ISFREE(&pRight->core))
971 {
972 if (!pRight->core.offNext)
973 {
974 pRight = NULL;
975 break;
976 }
977 pRight = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->core.offNext);
978 ASSERT_CHUNK(pHeap, &pRight->core);
979 }
980 }
981 if (!pRight)
982 pRight = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail); /** @todo this can't be correct! 'pLeft = .. ; else' I think */
983 if (pRight)
984 {
985 ASSERT_CHUNK_FREE(pHeap, pRight);
986 if (pRight->offPrev)
987 {
988 pLeft = (PMMHYPERCHUNKFREE)((char *)pRight + pRight->offPrev);
989 ASSERT_CHUNK_FREE(pHeap, pLeft);
990 }
991 }
992 }
993 if (pLeft == pFree)
994 {
995 AssertMsgFailed(("Freed twice! pv=%p (pChunk=%p)\n", pChunk + 1, pChunk));
996 return VERR_INVALID_POINTER;
997 }
998 pChunk->offStat = 0;
999
1000 /*
1001 * Head free chunk list?
1002 */
1003 if (!pLeft)
1004 {
1005 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
1006 pFree->offPrev = 0;
1007 pHeap->offFreeHead = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1008 if (pRight)
1009 {
1010 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
1011 pRight->offPrev = -(int32_t)pFree->offNext;
1012 }
1013 else
1014 {
1015 pFree->offNext = 0;
1016 pHeap->offFreeTail = pHeap->offFreeHead;
1017 }
1018 Log3(("mmHyperFree: Inserted %p at head of free chain.\n", pFree));
1019 }
1020 else
1021 {
1022 /*
1023 * Can we merge with left hand free chunk?
1024 */
1025 if ((char *)pLeft + pLeft->core.offNext == (char *)pFree)
1026 {
1027 if (pFree->core.offNext)
1028 {
1029 pLeft->core.offNext = pLeft->core.offNext + pFree->core.offNext;
1030 MMHYPERCHUNK_SET_OFFPREV(((PMMHYPERCHUNK)((char *)pLeft + pLeft->core.offNext)), -(int32_t)pLeft->core.offNext);
1031 }
1032 else
1033 pLeft->core.offNext = 0;
1034 pFree = pLeft;
1035 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pLeft->cb, -(int32_t)pLeft->cb));
1036 pHeap->cbFree -= pLeft->cb;
1037 Log3(("mmHyperFree: Merging %p into %p (cb=%d).\n", pFree, pLeft, pLeft->cb));
1038 }
1039 /*
1040 * No, just link it into the free list then.
1041 */
1042 else
1043 {
1044 MMHYPERCHUNK_SET_TYPE(&pFree->core, MMHYPERCHUNK_FLAGS_FREE);
1045 pFree->offPrev = (uintptr_t)pLeft - (uintptr_t)pFree;
1046 pLeft->offNext = -pFree->offPrev;
1047 if (pRight)
1048 {
1049 pFree->offNext = (uintptr_t)pRight - (uintptr_t)pFree;
1050 pRight->offPrev = -(int32_t)pFree->offNext;
1051 }
1052 else
1053 {
1054 pFree->offNext = 0;
1055 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1056 }
1057 Log3(("mmHyperFree: Inserted %p after %p in free list.\n", pFree, pLeft));
1058 }
1059 }
1060
1061 /*
1062 * Can we merge with right hand free chunk?
1063 */
1064 if (pRight && (char *)pRight == (char *)pFree + pFree->core.offNext)
1065 {
1066 /* core */
1067 if (pRight->core.offNext)
1068 {
1069 pFree->core.offNext += pRight->core.offNext;
1070 PMMHYPERCHUNK pNext = (PMMHYPERCHUNK)((char *)pFree + pFree->core.offNext);
1071 MMHYPERCHUNK_SET_OFFPREV(pNext, -(int32_t)pFree->core.offNext);
1072 ASSERT_CHUNK(pHeap, pNext);
1073 }
1074 else
1075 pFree->core.offNext = 0;
1076
1077 /* free */
1078 if (pRight->offNext)
1079 {
1080 pFree->offNext += pRight->offNext;
1081 ((PMMHYPERCHUNKFREE)((char *)pFree + pFree->offNext))->offPrev = -(int32_t)pFree->offNext;
1082 }
1083 else
1084 {
1085 pFree->offNext = 0;
1086 pHeap->offFreeTail = (uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap);
1087 }
1088 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pRight->cb, -(int32_t)pRight->cb));
1089 pHeap->cbFree -= pRight->cb;
1090 Log3(("mmHyperFree: Merged %p (cb=%d) into %p.\n", pRight, pRight->cb, pFree));
1091 }
1092
1093 /* calculate the size. */
1094 if (pFree->core.offNext)
1095 pFree->cb = pFree->core.offNext - sizeof(MMHYPERCHUNK);
1096 else
1097 pFree->cb = pHeap->offPageAligned - ((uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap)) - sizeof(MMHYPERCHUNK);
1098 Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree + pFree->cb, pFree->cb));
1099 pHeap->cbFree += pFree->cb;
1100 ASSERT_CHUNK_FREE(pHeap, pFree);
1101
1102#ifdef MMHYPER_HEAP_STRICT
1103 mmHyperHeapCheck(pHeap);
1104#endif
1105 return VINF_SUCCESS;
1106}
1107
1108
1109#if defined(DEBUG) || defined(MMHYPER_HEAP_STRICT_FENCE)
1110/**
1111 * Dumps a heap chunk to the log.
1112 *
1113 * @param pHeap Pointer to the heap.
1114 * @param pCur Pointer to the chunk.
1115 */
1116static void mmHyperHeapDumpOne(PMMHYPERHEAP pHeap, PMMHYPERCHUNKFREE pCur)
1117{
1118 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1119 {
1120 if (pCur->core.offStat)
1121 {
1122 PMMHYPERSTAT pStat = (PMMHYPERSTAT)((uintptr_t)pCur + pCur->core.offStat);
1123 const char *pszSelf = pCur->core.offStat == sizeof(MMHYPERCHUNK) ? " stat record" : "";
1124#ifdef IN_RING3
1125 Log(("%p %06x USED offNext=%06x offPrev=-%06x %s%s\n",
1126 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1127 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1128 mmGetTagName((MMTAG)pStat->Core.Key), pszSelf));
1129#else
1130 Log(("%p %06x USED offNext=%06x offPrev=-%06x %d%s\n",
1131 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1132 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1133 (MMTAG)pStat->Core.Key, pszSelf));
1134#endif
1135 NOREF(pStat); NOREF(pszSelf);
1136 }
1137 else
1138 Log(("%p %06x USED offNext=%06x offPrev=-%06x\n",
1139 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1140 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1141 }
1142 else
1143 Log(("%p %06x FREE offNext=%06x offPrev=-%06x : cb=%06x offNext=%06x offPrev=-%06x\n",
1144 pCur, (uintptr_t)pCur - (uintptr_t)pHeap->CTX_SUFF(pbHeap),
1145 pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core), pCur->cb, pCur->offNext, pCur->offPrev));
1146}
1147#endif /* DEBUG || MMHYPER_HEAP_STRICT */
1148
1149
1150#ifdef MMHYPER_HEAP_STRICT
1151/**
1152 * Internal consistency check.
1153 */
1154static void mmHyperHeapCheck(PMMHYPERHEAP pHeap)
1155{
1156 PMMHYPERCHUNKFREE pPrev = NULL;
1157 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)pHeap->CTX_SUFF(pbHeap);
1158 for (;;)
1159 {
1160 if (MMHYPERCHUNK_ISUSED(&pCur->core))
1161 ASSERT_CHUNK_USED(pHeap, &pCur->core);
1162 else
1163 ASSERT_CHUNK_FREE(pHeap, pCur);
1164 if (pPrev)
1165 AssertMsg((int32_t)pPrev->core.offNext == -MMHYPERCHUNK_GET_OFFPREV(&pCur->core),
1166 ("pPrev->core.offNext=%d offPrev=%d\n", pPrev->core.offNext, MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
1167
1168# ifdef MMHYPER_HEAP_STRICT_FENCE
1169 uint32_t off = (uint8_t *)pCur - pHeap->CTX_SUFF(pbHeap);
1170 if ( MMHYPERCHUNK_ISUSED(&pCur->core)
1171 && off < pHeap->offPageAligned)
1172 {
1173 uint32_t cbCur = pCur->core.offNext
1174 ? pCur->core.offNext
1175 : pHeap->cbHeap - off;
1176 uint32_t *pu32End = ((uint32_t *)((uint8_t *)pCur + cbCur));
1177 uint32_t cbFence = pu32End[-1];
1178 if (RT_UNLIKELY( cbFence >= cbCur - sizeof(*pCur)
1179 || cbFence < MMHYPER_HEAP_STRICT_FENCE_SIZE))
1180 {
1181 mmHyperHeapDumpOne(pHeap, pCur);
1182 Assert(cbFence < cbCur - sizeof(*pCur));
1183 Assert(cbFence >= MMHYPER_HEAP_STRICT_FENCE_SIZE);
1184 }
1185
1186 uint32_t *pu32Bad = ASMMemFirstMismatchingU32((uint8_t *)pu32End - cbFence, cbFence - sizeof(uint32_t), MMHYPER_HEAP_STRICT_FENCE_U32);
1187 if (RT_UNLIKELY(pu32Bad))
1188 {
1189 mmHyperHeapDumpOne(pHeap, pCur);
1190 Assert(!pu32Bad);
1191 }
1192 }
1193# endif
1194
1195 /* next */
1196 if (!pCur->core.offNext)
1197 break;
1198 pPrev = pCur;
1199 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1200 }
1201}
1202#endif
1203
1204
1205/**
1206 * Performs consistency checks on the heap if MMHYPER_HEAP_STRICT was
1207 * defined at build time.
1208 *
1209 * @param pVM The cross context VM structure.
1210 */
1211VMMDECL(void) MMHyperHeapCheck(PVMCC pVM)
1212{
1213#ifdef MMHYPER_HEAP_STRICT
1214 int rc = mmHyperLock(pVM);
1215 AssertRC(rc);
1216 mmHyperHeapCheck(pVM->mm.s.CTX_SUFF(pHyperHeap));
1217 mmHyperUnlock(pVM);
1218#else
1219 NOREF(pVM);
1220#endif
1221}
1222
1223
1224#ifdef DEBUG
1225/**
1226 * Dumps the hypervisor heap to Log.
1227 * @param pVM The cross context VM structure.
1228 */
1229VMMDECL(void) MMHyperHeapDump(PVM pVM)
1230{
1231 Log(("MMHyperHeapDump: *** heap dump - start ***\n"));
1232 PMMHYPERHEAP pHeap = pVM->mm.s.CTX_SUFF(pHyperHeap);
1233 PMMHYPERCHUNKFREE pCur = (PMMHYPERCHUNKFREE)pHeap->CTX_SUFF(pbHeap);
1234 for (;;)
1235 {
1236 mmHyperHeapDumpOne(pHeap, pCur);
1237
1238 /* next */
1239 if (!pCur->core.offNext)
1240 break;
1241 pCur = (PMMHYPERCHUNKFREE)((char *)pCur + pCur->core.offNext);
1242 }
1243 Log(("MMHyperHeapDump: *** heap dump - end ***\n"));
1244}
1245#endif
1246
1247
1248/**
1249 * Query the amount of free memory in the hypervisor heap.
1250 *
1251 * @returns Number of free bytes in the hypervisor heap.
1252 */
1253VMMDECL(size_t) MMHyperHeapGetFreeSize(PVM pVM)
1254{
1255 return pVM->mm.s.CTX_SUFF(pHyperHeap)->cbFree;
1256}
1257
1258
1259/**
1260 * Query the size the hypervisor heap.
1261 *
1262 * @returns The size of the hypervisor heap in bytes.
1263 */
1264VMMDECL(size_t) MMHyperHeapGetSize(PVM pVM)
1265{
1266 return pVM->mm.s.CTX_SUFF(pHyperHeap)->cbHeap;
1267}
1268
1269
1270/**
1271 * Converts a context neutral heap offset into a pointer.
1272 *
1273 * @returns Pointer to hyper heap data.
1274 * @param pVM The cross context VM structure.
1275 * @param offHeap The hyper heap offset.
1276 */
1277VMMDECL(void *) MMHyperHeapOffsetToPtr(PVM pVM, uint32_t offHeap)
1278{
1279 Assert(offHeap - MMYPERHEAP_HDR_SIZE <= pVM->mm.s.CTX_SUFF(pHyperHeap)->cbHeap);
1280 return (uint8_t *)pVM->mm.s.CTX_SUFF(pHyperHeap) + offHeap;
1281}
1282
1283
1284/**
1285 * Converts a context specific heap pointer into a neutral heap offset.
1286 *
1287 * @returns Heap offset.
1288 * @param pVM The cross context VM structure.
1289 * @param pv Pointer to the heap data.
1290 */
1291VMMDECL(uint32_t) MMHyperHeapPtrToOffset(PVM pVM, void *pv)
1292{
1293 size_t offHeap = (uint8_t *)pv - (uint8_t *)pVM->mm.s.CTX_SUFF(pHyperHeap);
1294 Assert(offHeap - MMYPERHEAP_HDR_SIZE <= pVM->mm.s.CTX_SUFF(pHyperHeap)->cbHeap);
1295 return (uint32_t)offHeap;
1296}
1297
1298
1299/**
1300 * Query the address and size the hypervisor memory area.
1301 *
1302 * @returns Base address of the hypervisor area.
1303 * @param pVM The cross context VM structure.
1304 * @param pcb Where to store the size of the hypervisor area. (out)
1305 */
1306VMMDECL(RTGCPTR) MMHyperGetArea(PVM pVM, size_t *pcb)
1307{
1308 if (pcb)
1309 *pcb = pVM->mm.s.cbHyperArea;
1310 return pVM->mm.s.pvHyperAreaGC;
1311}
1312
1313
1314/**
1315 * Checks if an address is within the hypervisor memory area.
1316 *
1317 * @returns true if inside.
1318 * @returns false if outside.
1319 * @param pVM The cross context VM structure.
1320 * @param GCPtr The pointer to check.
1321 *
1322 * @note Caller must check that we're in raw-mode before calling!
1323 */
1324VMMDECL(bool) MMHyperIsInsideArea(PVM pVM, RTGCPTR GCPtr)
1325{
1326 Assert(VM_IS_RAW_MODE_ENABLED(pVM));
1327 return (RTGCUINTPTR)GCPtr - (RTGCUINTPTR)pVM->mm.s.pvHyperAreaGC < pVM->mm.s.cbHyperArea;
1328}
1329
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette