VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/MMHeap.cpp@ 97295

Last change on this file since 97295 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.0 KB
Line 
1/* $Id: MMHeap.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * MM - Memory Manager - Heap.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_MM_HEAP
33#include <VBox/vmm/mm.h>
34#include <VBox/vmm/stam.h>
35#include <VBox/vmm/pgm.h>
36#include "MMInternal.h"
37#include <VBox/vmm/vm.h>
38#include <VBox/vmm/uvm.h>
39#include <iprt/errcore.h>
40#include <VBox/param.h>
41#include <VBox/log.h>
42
43#include <iprt/alloc.h>
44#include <iprt/assert.h>
45#include <iprt/string.h>
46
47
48/*********************************************************************************************************************************
49* Internal Functions *
50*********************************************************************************************************************************/
51static void *mmR3HeapAlloc(PMMHEAP pHeap, MMTAG enmTag, size_t cbSize, bool fZero);
52
53
54
55/**
56 * Allocate and initialize a heap structure and it's associated substructures.
57 *
58 * @returns VBox status code.
59 * @param pUVM Pointer to the user mode VM structure.
60 * @param ppHeap Where to store the heap pointer.
61 */
62int mmR3HeapCreateU(PUVM pUVM, PMMHEAP *ppHeap)
63{
64 PMMHEAP pHeap = (PMMHEAP)RTMemAllocZ(sizeof(MMHEAP) + sizeof(MMHEAPSTAT));
65 if (pHeap)
66 {
67 int rc = RTCritSectInit(&pHeap->Lock);
68 if (RT_SUCCESS(rc))
69 {
70 /*
71 * Initialize the global stat record.
72 */
73 pHeap->pUVM = pUVM;
74 pHeap->Stat.pHeap = pHeap;
75#ifdef MMR3HEAP_WITH_STATISTICS
76 PMMHEAPSTAT pStat = &pHeap->Stat;
77 STAMR3RegisterU(pUVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cAllocations", STAMUNIT_CALLS, "Number or MMR3HeapAlloc() calls.");
78 STAMR3RegisterU(pUVM, &pStat->cReallocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cReallocations", STAMUNIT_CALLS, "Number of MMR3HeapRealloc() calls.");
79 STAMR3RegisterU(pUVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cFrees", STAMUNIT_CALLS, "Number of MMR3HeapFree() calls.");
80 STAMR3RegisterU(pUVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cFailures", STAMUNIT_COUNT, "Number of failures.");
81 STAMR3RegisterU(pUVM, &pStat->cbCurAllocated, sizeof(pStat->cbCurAllocated) == sizeof(uint32_t) ? STAMTYPE_U32 : STAMTYPE_U64,
82 STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cbCurAllocated", STAMUNIT_BYTES, "Number of bytes currently allocated.");
83 STAMR3RegisterU(pUVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cbAllocated", STAMUNIT_BYTES, "Total number of bytes allocated.");
84 STAMR3RegisterU(pUVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, "/MM/R3Heap/cbFreed", STAMUNIT_BYTES, "Total number of bytes freed.");
85#endif
86 *ppHeap = pHeap;
87 return VINF_SUCCESS;
88 }
89 AssertRC(rc);
90 RTMemFree(pHeap);
91 }
92 AssertMsgFailed(("failed to allocate heap structure\n"));
93 return VERR_NO_MEMORY;
94}
95
96
97/**
98 * MM heap statistics tree destroy callback.
99 */
100static DECLCALLBACK(int) mmR3HeapStatTreeDestroy(PAVLULNODECORE pCore, void *pvParam)
101{
102 RT_NOREF(pvParam);
103
104 /* Don't bother deregistering the stat samples as they get destroyed by STAM. */
105 RTMemFree(pCore);
106 return VINF_SUCCESS;
107}
108
109
110/**
111 * Destroy a heap.
112 *
113 * @param pHeap Heap handle.
114 */
115void mmR3HeapDestroy(PMMHEAP pHeap)
116{
117 /*
118 * Start by deleting the lock, that'll trap anyone
119 * attempting to use the heap.
120 */
121 RTCritSectDelete(&pHeap->Lock);
122
123 /*
124 * Walk the node list and free all the memory.
125 */
126 PMMHEAPHDR pHdr = pHeap->pHead;
127 while (pHdr)
128 {
129 void *pv = pHdr;
130 pHdr = pHdr->pNext;
131 RTMemFree(pv);
132 }
133
134 /*
135 * Free the stat nodes.
136 */
137 RTAvlULDestroy(&pHeap->pStatTree, mmR3HeapStatTreeDestroy, NULL);
138 RTMemFree(pHeap);
139}
140
141
142/**
143 * Allocate memory associating it with the VM for collective cleanup.
144 *
145 * The memory will be allocated from the default heap but a header
146 * is added in which we keep track of which VM it belongs to and chain
147 * all the allocations together so they can be freed in one go.
148 *
149 * This interface is typically used for memory block which will not be
150 * freed during the life of the VM.
151 *
152 * @returns Pointer to allocated memory.
153 * @param pUVM Pointer to the user mode VM structure.
154 * @param enmTag Statistics tag. Statistics are collected on a per tag
155 * basis in addition to a global one. Thus we can easily
156 * identify how memory is used by the VM. See MM_TAG_*.
157 * @param cbSize Size of the block.
158 */
159VMMR3DECL(void *) MMR3HeapAllocU(PUVM pUVM, MMTAG enmTag, size_t cbSize)
160{
161 Assert(pUVM->mm.s.pHeap);
162 return mmR3HeapAlloc(pUVM->mm.s.pHeap, enmTag, cbSize, false);
163}
164
165
166/**
167 * Allocate memory associating it with the VM for collective cleanup.
168 *
169 * The memory will be allocated from the default heap but a header
170 * is added in which we keep track of which VM it belongs to and chain
171 * all the allocations together so they can be freed in one go.
172 *
173 * This interface is typically used for memory block which will not be
174 * freed during the life of the VM.
175 *
176 * @returns Pointer to allocated memory.
177 * @param pVM The cross context VM structure.
178 * @param enmTag Statistics tag. Statistics are collected on a per tag
179 * basis in addition to a global one. Thus we can easily
180 * identify how memory is used by the VM. See MM_TAG_*.
181 * @param cbSize Size of the block.
182 */
183VMMR3DECL(void *) MMR3HeapAlloc(PVM pVM, MMTAG enmTag, size_t cbSize)
184{
185 return mmR3HeapAlloc(pVM->pUVM->mm.s.pHeap, enmTag, cbSize, false);
186}
187
188
189/**
190 * Same as MMR3HeapAllocU().
191 *
192 * @returns Pointer to allocated memory.
193 * @param pUVM Pointer to the user mode VM structure.
194 * @param enmTag Statistics tag. Statistics are collected on a per tag
195 * basis in addition to a global one. Thus we can easily
196 * identify how memory is used by the VM. See MM_TAG_*.
197 * @param cbSize Size of the block.
198 * @param ppv Where to store the pointer to the allocated memory on success.
199 */
200VMMR3DECL(int) MMR3HeapAllocExU(PUVM pUVM, MMTAG enmTag, size_t cbSize, void **ppv)
201{
202 Assert(pUVM->mm.s.pHeap);
203 void *pv = mmR3HeapAlloc(pUVM->mm.s.pHeap, enmTag, cbSize, false);
204 if (pv)
205 {
206 *ppv = pv;
207 return VINF_SUCCESS;
208 }
209 return VERR_NO_MEMORY;
210}
211
212
213/**
214 * Same as MMR3HeapAlloc().
215 *
216 * @returns Pointer to allocated memory.
217 * @param pVM The cross context VM structure.
218 * @param enmTag Statistics tag. Statistics are collected on a per tag
219 * basis in addition to a global one. Thus we can easily
220 * identify how memory is used by the VM. See MM_TAG_*.
221 * @param cbSize Size of the block.
222 * @param ppv Where to store the pointer to the allocated memory on success.
223 */
224VMMR3DECL(int) MMR3HeapAllocEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv)
225{
226 void *pv = mmR3HeapAlloc(pVM->pUVM->mm.s.pHeap, enmTag, cbSize, false);
227 if (pv)
228 {
229 *ppv = pv;
230 return VINF_SUCCESS;
231 }
232 return VERR_NO_MEMORY;
233}
234
235
236/**
237 * Same as MMR3HeapAlloc() only the memory is zeroed.
238 *
239 * @returns Pointer to allocated memory.
240 * @param pUVM Pointer to the user mode VM structure.
241 * @param enmTag Statistics tag. Statistics are collected on a per tag
242 * basis in addition to a global one. Thus we can easily
243 * identify how memory is used by the VM. See MM_TAG_*.
244 * @param cbSize Size of the block.
245 */
246VMMR3DECL(void *) MMR3HeapAllocZU(PUVM pUVM, MMTAG enmTag, size_t cbSize)
247{
248 return mmR3HeapAlloc(pUVM->mm.s.pHeap, enmTag, cbSize, true);
249}
250
251
252/**
253 * Same as MMR3HeapAlloc() only the memory is zeroed.
254 *
255 * @returns Pointer to allocated memory.
256 * @param pVM The cross context VM structure.
257 * @param enmTag Statistics tag. Statistics are collected on a per tag
258 * basis in addition to a global one. Thus we can easily
259 * identify how memory is used by the VM. See MM_TAG_*.
260 * @param cbSize Size of the block.
261 */
262VMMR3DECL(void *) MMR3HeapAllocZ(PVM pVM, MMTAG enmTag, size_t cbSize)
263{
264 return mmR3HeapAlloc(pVM->pUVM->mm.s.pHeap, enmTag, cbSize, true);
265}
266
267
268/**
269 * Same as MMR3HeapAllocZ().
270 *
271 * @returns Pointer to allocated memory.
272 * @param pUVM Pointer to the user mode VM structure.
273 * @param enmTag Statistics tag. Statistics are collected on a per tag
274 * basis in addition to a global one. Thus we can easily
275 * identify how memory is used by the VM. See MM_TAG_*.
276 * @param cbSize Size of the block.
277 * @param ppv Where to store the pointer to the allocated memory on success.
278 */
279VMMR3DECL(int) MMR3HeapAllocZExU(PUVM pUVM, MMTAG enmTag, size_t cbSize, void **ppv)
280{
281 Assert(pUVM->mm.s.pHeap);
282 void *pv = mmR3HeapAlloc(pUVM->mm.s.pHeap, enmTag, cbSize, true);
283 if (pv)
284 {
285 *ppv = pv;
286 return VINF_SUCCESS;
287 }
288 return VERR_NO_MEMORY;
289}
290
291
292/**
293 * Same as MMR3HeapAllocZ().
294 *
295 * @returns Pointer to allocated memory.
296 * @param pVM The cross context VM structure.
297 * @param enmTag Statistics tag. Statistics are collected on a per tag
298 * basis in addition to a global one. Thus we can easily
299 * identify how memory is used by the VM. See MM_TAG_*.
300 * @param cbSize Size of the block.
301 * @param ppv Where to store the pointer to the allocated memory on success.
302 */
303VMMR3DECL(int) MMR3HeapAllocZEx(PVM pVM, MMTAG enmTag, size_t cbSize, void **ppv)
304{
305 void *pv = mmR3HeapAlloc(pVM->pUVM->mm.s.pHeap, enmTag, cbSize, true);
306 if (pv)
307 {
308 *ppv = pv;
309 return VINF_SUCCESS;
310 }
311 return VERR_NO_MEMORY;
312}
313
314
315/**
316 * Links @a pHdr into the heap block list (tail).
317 *
318 * @param pHeap Heap handle.
319 * @param pHdr The block to link.
320 *
321 * @note Caller has locked the heap!
322 */
323DECLINLINE(void) mmR3HeapLink(PMMHEAP pHeap, PMMHEAPHDR pHdr)
324{
325 /* Tail insertion: */
326 pHdr->pNext = NULL;
327 PMMHEAPHDR pTail = pHeap->pTail;
328 pHdr->pPrev = pTail;
329 if (pTail)
330 {
331 Assert(!pTail->pNext);
332 pTail->pNext = pHdr;
333 }
334 else
335 {
336 Assert(!pHeap->pHead);
337 pHeap->pHead = pHdr;
338 }
339 pHeap->pTail = pHdr;
340}
341
342
343/**
344 * Unlinks @a pHdr from the heal block list.
345 *
346 * @param pHeap Heap handle.
347 * @param pHdr The block to unlink.
348 *
349 * @note Caller has locked the heap!
350 */
351DECLINLINE(void) mmR3HeapUnlink(PMMHEAP pHeap, PMMHEAPHDR pHdr)
352{
353 PMMHEAPHDR const pPrev = pHdr->pPrev;
354 PMMHEAPHDR const pNext = pHdr->pNext;
355 if (pPrev)
356 pPrev->pNext = pNext;
357 else
358 pHeap->pHead = pNext;
359
360 if (pNext)
361 pNext->pPrev = pPrev;
362 else
363 pHeap->pTail = pHdr->pPrev;
364}
365
366
367/**
368 * Allocate memory from the heap.
369 *
370 * @returns Pointer to allocated memory.
371 * @param pHeap Heap handle.
372 * @param enmTag Statistics tag. Statistics are collected on a per tag
373 * basis in addition to a global one. Thus we can easily
374 * identify how memory is used by the VM. See MM_TAG_*.
375 * @param cbSize Size of the block.
376 * @param fZero Whether or not to zero the memory block.
377 */
378void *mmR3HeapAlloc(PMMHEAP pHeap, MMTAG enmTag, size_t cbSize, bool fZero)
379{
380#ifdef MMR3HEAP_WITH_STATISTICS
381 RTCritSectEnter(&pHeap->Lock);
382
383 /*
384 * Find/alloc statistics nodes.
385 */
386 pHeap->Stat.cAllocations++;
387 PMMHEAPSTAT pStat = (PMMHEAPSTAT)RTAvlULGet(&pHeap->pStatTree, (AVLULKEY)enmTag);
388 if (pStat)
389 {
390 pStat->cAllocations++;
391
392 RTCritSectLeave(&pHeap->Lock);
393 }
394 else
395 {
396 pStat = (PMMHEAPSTAT)RTMemAllocZ(sizeof(MMHEAPSTAT));
397 if (!pStat)
398 {
399 pHeap->Stat.cFailures++;
400 AssertMsgFailed(("Failed to allocate heap stat record.\n"));
401 RTCritSectLeave(&pHeap->Lock);
402 return NULL;
403 }
404 pStat->Core.Key = (AVLULKEY)enmTag;
405 pStat->pHeap = pHeap;
406 RTAvlULInsert(&pHeap->pStatTree, &pStat->Core);
407
408 pStat->cAllocations++;
409 RTCritSectLeave(&pHeap->Lock);
410
411 /* register the statistics */
412 PUVM pUVM = pHeap->pUVM;
413 const char *pszTag = mmGetTagName(enmTag);
414 STAMR3RegisterFU(pUVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Number of bytes currently allocated.", "/MM/R3Heap/%s", pszTag);
415 STAMR3RegisterFU(pUVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number or MMR3HeapAlloc() calls.", "/MM/R3Heap/%s/cAllocations", pszTag);
416 STAMR3RegisterFU(pUVM, &pStat->cReallocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number of MMR3HeapRealloc() calls.", "/MM/R3Heap/%s/cReallocations", pszTag);
417 STAMR3RegisterFU(pUVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Number of MMR3HeapFree() calls.", "/MM/R3Heap/%s/cFrees", pszTag);
418 STAMR3RegisterFU(pUVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of failures.", "/MM/R3Heap/%s/cFailures", pszTag);
419 STAMR3RegisterFU(pUVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of bytes allocated.", "/MM/R3Heap/%s/cbAllocated", pszTag);
420 STAMR3RegisterFU(pUVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of bytes freed.", "/MM/R3Heap/%s/cbFreed", pszTag);
421 }
422#else
423 RT_NOREF_PV(enmTag);
424#endif
425
426 /*
427 * Validate input.
428 */
429 if (cbSize == 0)
430 {
431#ifdef MMR3HEAP_WITH_STATISTICS
432 RTCritSectEnter(&pHeap->Lock);
433 pStat->cFailures++;
434 pHeap->Stat.cFailures++;
435 RTCritSectLeave(&pHeap->Lock);
436#endif
437 AssertFailed();
438 return NULL;
439 }
440
441 /*
442 * Allocate heap block.
443 */
444 cbSize = RT_ALIGN_Z(cbSize, MMR3HEAP_SIZE_ALIGNMENT) + sizeof(MMHEAPHDR);
445 PMMHEAPHDR const pHdr = (PMMHEAPHDR)(fZero ? RTMemAllocZ(cbSize) : RTMemAlloc(cbSize));
446 if (pHdr)
447 { /* likely */ }
448 else
449 {
450 AssertMsgFailed(("Failed to allocate heap block %d, enmTag=%x(%.4s).\n", cbSize, enmTag, &enmTag));
451#ifdef MMR3HEAP_WITH_STATISTICS
452 RTCritSectEnter(&pHeap->Lock);
453 pStat->cFailures++;
454 pHeap->Stat.cFailures++;
455 RTCritSectLeave(&pHeap->Lock);
456#endif
457 return NULL;
458 }
459 Assert(!((uintptr_t)pHdr & (RTMEM_ALIGNMENT - 1)));
460
461 /*
462 * Init and link in the header.
463 */
464#ifdef MMR3HEAP_WITH_STATISTICS
465 pHdr->pStat = pStat;
466#else
467 pHdr->pStat = &pHeap->Stat;
468#endif
469 pHdr->cbSize = cbSize;
470
471 RTCritSectEnter(&pHeap->Lock);
472
473 mmR3HeapLink(pHeap, pHdr);
474
475 /*
476 * Update statistics
477 */
478#ifdef MMR3HEAP_WITH_STATISTICS
479 pStat->cbAllocated += cbSize;
480 pStat->cbCurAllocated += cbSize;
481 pHeap->Stat.cbAllocated += cbSize;
482 pHeap->Stat.cbCurAllocated += cbSize;
483#endif
484
485 RTCritSectLeave(&pHeap->Lock);
486
487 return pHdr + 1;
488}
489
490
491/**
492 * Reallocate memory allocated with MMR3HeapAlloc(), MMR3HeapAllocZ() or
493 * MMR3HeapRealloc().
494 *
495 * Any additional memory is zeroed (only reliable if the initial allocation was
496 * also of the zeroing kind).
497 *
498 * @returns Pointer to reallocated memory.
499 * @param pv Pointer to the memory block to reallocate.
500 * Must not be NULL!
501 * @param cbNewSize New block size.
502 */
503VMMR3DECL(void *) MMR3HeapRealloc(void *pv, size_t cbNewSize)
504{
505 AssertMsg(pv, ("Invalid pointer pv=%p\n", pv));
506 if (!pv)
507 return NULL;
508
509 /*
510 * If newsize is zero then this is a free.
511 */
512 if (!cbNewSize)
513 {
514 MMR3HeapFree(pv);
515 return NULL;
516 }
517
518 /*
519 * Validate header.
520 */
521 PMMHEAPHDR const pHdr = (PMMHEAPHDR)pv - 1;
522 size_t const cbOldSize = pHdr->cbSize;
523 AssertMsgReturn( !(cbOldSize & (MMR3HEAP_SIZE_ALIGNMENT - 1))
524 && !((uintptr_t)pHdr & (RTMEM_ALIGNMENT - 1)),
525 ("Invalid heap header! pv=%p, size=%#x\n", pv, cbOldSize),
526 NULL);
527 Assert(pHdr->pStat != NULL);
528 Assert(!((uintptr_t)pHdr->pNext & (RTMEM_ALIGNMENT - 1)));
529 Assert(!((uintptr_t)pHdr->pPrev & (RTMEM_ALIGNMENT - 1)));
530
531 PMMHEAP pHeap = pHdr->pStat->pHeap;
532
533 /*
534 * Unlink the header before we reallocate the block.
535 */
536 RTCritSectEnter(&pHeap->Lock);
537#ifdef MMR3HEAP_WITH_STATISTICS
538 pHdr->pStat->cReallocations++;
539 pHeap->Stat.cReallocations++;
540#endif
541 mmR3HeapUnlink(pHeap, pHdr);
542 RTCritSectLeave(&pHeap->Lock);
543
544 /*
545 * Reallocate the block. Clear added space.
546 */
547 cbNewSize = RT_ALIGN_Z(cbNewSize, MMR3HEAP_SIZE_ALIGNMENT) + sizeof(MMHEAPHDR);
548 PMMHEAPHDR pHdrNew = (PMMHEAPHDR)RTMemReallocZ(pHdr, cbOldSize, cbNewSize);
549 if (pHdrNew)
550 pHdrNew->cbSize = cbNewSize;
551 else
552 {
553 RTCritSectEnter(&pHeap->Lock);
554 mmR3HeapLink(pHeap, pHdr);
555#ifdef MMR3HEAP_WITH_STATISTICS
556 pHdr->pStat->cFailures++;
557 pHeap->Stat.cFailures++;
558#endif
559 RTCritSectLeave(&pHeap->Lock);
560 return NULL;
561 }
562
563 RTCritSectEnter(&pHeap->Lock);
564
565 /*
566 * Relink the header.
567 */
568 mmR3HeapLink(pHeap, pHdrNew);
569
570 /*
571 * Update statistics.
572 */
573#ifdef MMR3HEAP_WITH_STATISTICS
574 pHdrNew->pStat->cbAllocated += cbNewSize - pHdrNew->cbSize;
575 pHeap->Stat.cbAllocated += cbNewSize - pHdrNew->cbSize;
576#endif
577
578 RTCritSectLeave(&pHeap->Lock);
579
580 return pHdrNew + 1;
581}
582
583
584/**
585 * Duplicates the specified string.
586 *
587 * @returns Pointer to the duplicate.
588 * @returns NULL on failure or when input NULL.
589 * @param pUVM Pointer to the user mode VM structure.
590 * @param enmTag Statistics tag. Statistics are collected on a per tag
591 * basis in addition to a global one. Thus we can easily
592 * identify how memory is used by the VM. See MM_TAG_*.
593 * @param psz The string to duplicate. NULL is allowed.
594 */
595VMMR3DECL(char *) MMR3HeapStrDupU(PUVM pUVM, MMTAG enmTag, const char *psz)
596{
597 if (!psz)
598 return NULL;
599 AssertPtr(psz);
600
601 size_t cch = strlen(psz) + 1;
602 char *pszDup = (char *)MMR3HeapAllocU(pUVM, enmTag, cch);
603 if (pszDup)
604 memcpy(pszDup, psz, cch);
605 return pszDup;
606}
607
608
609/**
610 * Duplicates the specified string.
611 *
612 * @returns Pointer to the duplicate.
613 * @returns NULL on failure or when input NULL.
614 * @param pVM The cross context VM structure.
615 * @param enmTag Statistics tag. Statistics are collected on a per tag
616 * basis in addition to a global one. Thus we can easily
617 * identify how memory is used by the VM. See MM_TAG_*.
618 * @param psz The string to duplicate. NULL is allowed.
619 */
620VMMR3DECL(char *) MMR3HeapStrDup(PVM pVM, MMTAG enmTag, const char *psz)
621{
622 return MMR3HeapStrDupU(pVM->pUVM, enmTag, psz);
623}
624
625
626/**
627 * Allocating string printf.
628 *
629 * @returns Pointer to the string.
630 * @param pVM The cross context VM structure.
631 * @param enmTag The statistics tag.
632 * @param pszFormat The format string.
633 * @param ... Format arguments.
634 */
635VMMR3DECL(char *) MMR3HeapAPrintf(PVM pVM, MMTAG enmTag, const char *pszFormat, ...)
636{
637 va_list va;
638 va_start(va, pszFormat);
639 char *psz = MMR3HeapAPrintfVU(pVM->pUVM, enmTag, pszFormat, va);
640 va_end(va);
641 return psz;
642}
643
644
645/**
646 * Allocating string printf.
647 *
648 * @returns Pointer to the string.
649 * @param pUVM Pointer to the user mode VM structure.
650 * @param enmTag The statistics tag.
651 * @param pszFormat The format string.
652 * @param ... Format arguments.
653 */
654VMMR3DECL(char *) MMR3HeapAPrintfU(PUVM pUVM, MMTAG enmTag, const char *pszFormat, ...)
655{
656 va_list va;
657 va_start(va, pszFormat);
658 char *psz = MMR3HeapAPrintfVU(pUVM, enmTag, pszFormat, va);
659 va_end(va);
660 return psz;
661}
662
663
664/**
665 * Allocating string printf.
666 *
667 * @returns Pointer to the string.
668 * @param pVM The cross context VM structure.
669 * @param enmTag The statistics tag.
670 * @param pszFormat The format string.
671 * @param va Format arguments.
672 */
673VMMR3DECL(char *) MMR3HeapAPrintfV(PVM pVM, MMTAG enmTag, const char *pszFormat, va_list va)
674{
675 return MMR3HeapAPrintfVU(pVM->pUVM, enmTag, pszFormat, va);
676}
677
678
679/**
680 * Allocating string printf.
681 *
682 * @returns Pointer to the string.
683 * @param pUVM Pointer to the user mode VM structure.
684 * @param enmTag The statistics tag.
685 * @param pszFormat The format string.
686 * @param va Format arguments.
687 */
688VMMR3DECL(char *) MMR3HeapAPrintfVU(PUVM pUVM, MMTAG enmTag, const char *pszFormat, va_list va)
689{
690 /*
691 * The lazy bird way.
692 */
693 char *psz;
694 int cch = RTStrAPrintfV(&psz, pszFormat, va);
695 if (cch < 0)
696 return NULL;
697 Assert(psz[cch] == '\0');
698 char *pszRet = (char *)MMR3HeapAllocU(pUVM, enmTag, cch + 1);
699 if (pszRet)
700 memcpy(pszRet, psz, cch + 1);
701 RTStrFree(psz);
702 return pszRet;
703}
704
705
706/**
707 * Releases memory allocated with MMR3HeapAlloc() or MMR3HeapRealloc().
708 *
709 * The memory is cleared/filled before freeing to prevent heap spraying, info
710 * leaks, and help detect use after free trouble.
711 *
712 * @param pv Pointer to the memory block to free.
713 */
714VMMR3DECL(void) MMR3HeapFree(void *pv)
715{
716 /* Ignore NULL pointers. */
717 if (!pv)
718 return;
719
720 /*
721 * Validate header.
722 */
723 PMMHEAPHDR const pHdr = (PMMHEAPHDR)pv - 1;
724 size_t const cbAllocation = pHdr->cbSize;
725 AssertMsgReturnVoid( !(pHdr->cbSize & (MMR3HEAP_SIZE_ALIGNMENT - 1))
726 && !((uintptr_t)pHdr & (RTMEM_ALIGNMENT - 1)),
727 ("Invalid heap header! pv=%p, size=%#x\n", pv, pHdr->cbSize));
728 AssertPtr(pHdr->pStat);
729 Assert(!((uintptr_t)pHdr->pNext & (RTMEM_ALIGNMENT - 1)));
730 Assert(!((uintptr_t)pHdr->pPrev & (RTMEM_ALIGNMENT - 1)));
731
732 /*
733 * Update statistics
734 */
735 PMMHEAP pHeap = pHdr->pStat->pHeap;
736 RTCritSectEnter(&pHeap->Lock);
737
738#ifdef MMR3HEAP_WITH_STATISTICS
739 pHdr->pStat->cFrees++;
740 pHeap->Stat.cFrees++;
741 pHdr->pStat->cbFreed += cbAllocation;
742 pHeap->Stat.cbFreed += cbAllocation;
743 pHdr->pStat->cbCurAllocated -= cbAllocation;
744 pHeap->Stat.cbCurAllocated -= cbAllocation;
745#endif
746
747 /*
748 * Unlink it.
749 */
750 mmR3HeapUnlink(pHeap, pHdr);
751
752 RTCritSectLeave(&pHeap->Lock);
753
754 /*
755 * Free the memory. We clear just to be on the safe size wrt
756 * heap spraying and leaking sensitive info (also helps detecting
757 * double freeing).
758 */
759 RTMemFreeZ(pHdr, cbAllocation);
760}
761
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette