VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/alloc-ef-r0drv.cpp@ 94293

Last change on this file since 94293 was 93115, checked in by vboxsync, 3 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 30.9 KB
Line 
1/* $Id: alloc-ef-r0drv.cpp 93115 2022-01-01 11:31:46Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocation, electric fence for ring-0 drivers.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTMEM_NO_WRAP_TO_EF_APIS
32#include "internal/iprt.h"
33#include <iprt/mem.h>
34
35#include <iprt/alloc.h>
36#include <iprt/asm.h>
37#include <iprt/asm-amd64-x86.h>
38#include <iprt/assert.h>
39#include <iprt/errcore.h>
40#include <iprt/log.h>
41#include <iprt/memobj.h>
42#include <iprt/param.h>
43#include <iprt/string.h>
44#include <iprt/thread.h>
45
46#include "internal/mem.h"
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52#if defined(DOXYGEN_RUNNING)
53# define RTR0MEM_EF_IN_FRONT
54#endif
55
56/** @def RTR0MEM_EF_SIZE
57 * The size of the fence. This must be page aligned.
58 */
59#define RTR0MEM_EF_SIZE PAGE_SIZE
60
61/** @def RTR0MEM_EF_ALIGNMENT
62 * The allocation alignment, power of two of course.
63 *
64 * Use this for working around misaligned sizes, usually stemming from
65 * allocating a string or something after the main structure. When you
66 * encounter this, please fix the allocation to RTMemAllocVar or RTMemAllocZVar.
67 */
68#if 0
69# define RTR0MEM_EF_ALIGNMENT (ARCH_BITS / 8)
70#else
71# define RTR0MEM_EF_ALIGNMENT 1
72#endif
73
74/** @def RTR0MEM_EF_IN_FRONT
75 * Define this to put the fence up in front of the block.
76 * The default (when this isn't defined) is to up it up after the block.
77 */
78//# define RTR0MEM_EF_IN_FRONT
79
80/** @def RTR0MEM_EF_FREE_DELAYED
81 * This define will enable free() delay and protection of the freed data
82 * while it's being delayed. The value of RTR0MEM_EF_FREE_DELAYED defines
83 * the threshold of the delayed blocks.
84 * Delayed blocks does not consume any physical memory, only virtual address space.
85 */
86#define RTR0MEM_EF_FREE_DELAYED (20 * _1M)
87
88/** @def RTR0MEM_EF_FREE_FILL
89 * This define will enable memset(,RTR0MEM_EF_FREE_FILL,)'ing the user memory
90 * in the block before freeing/decommitting it. This is useful in GDB since GDB
91 * appears to be able to read the content of the page even after it's been
92 * decommitted.
93 */
94#define RTR0MEM_EF_FREE_FILL 'f'
95
96/** @def RTR0MEM_EF_FILLER
97 * This define will enable memset(,RTR0MEM_EF_FILLER,)'ing the allocated
98 * memory when the API doesn't require it to be zero'd.
99 */
100#define RTR0MEM_EF_FILLER 0xef
101
102/** @def RTR0MEM_EF_NOMAN_FILLER
103 * This define will enable memset(,RTR0MEM_EF_NOMAN_FILLER,)'ing the
104 * unprotected but not allocated area of memory, the so called no man's land.
105 */
106#define RTR0MEM_EF_NOMAN_FILLER 0xaa
107
108/** @def RTR0MEM_EF_FENCE_FILLER
109 * This define will enable memset(,RTR0MEM_EF_FENCE_FILLER,)'ing the
110 * fence itself, as debuggers can usually read them.
111 */
112#define RTR0MEM_EF_FENCE_FILLER 0xcc
113
114
115/*********************************************************************************************************************************
116* Header Files *
117*********************************************************************************************************************************/
118#ifdef RT_OS_WINDOWS
119# include <iprt/win/windows.h>
120#elif !defined(RT_OS_FREEBSD)
121# include <sys/mman.h>
122#endif
123#include <iprt/avl.h>
124#include <iprt/thread.h>
125
126
127/*********************************************************************************************************************************
128* Structures and Typedefs *
129*********************************************************************************************************************************/
130/**
131 * Allocation types.
132 */
133typedef enum RTMEMTYPE
134{
135 RTMEMTYPE_RTMEMALLOC,
136 RTMEMTYPE_RTMEMALLOCZ,
137 RTMEMTYPE_RTMEMREALLOC,
138 RTMEMTYPE_RTMEMFREE,
139 RTMEMTYPE_RTMEMFREEZ,
140
141 RTMEMTYPE_NEW,
142 RTMEMTYPE_NEW_ARRAY,
143 RTMEMTYPE_DELETE,
144 RTMEMTYPE_DELETE_ARRAY
145} RTMEMTYPE;
146
147/**
148 * Node tracking a memory allocation.
149 */
150typedef struct RTR0MEMEFBLOCK
151{
152 /** Avl node code, key is the user block pointer. */
153 AVLPVNODECORE Core;
154 /** Allocation type. */
155 RTMEMTYPE enmType;
156 /** The memory object. */
157 RTR0MEMOBJ hMemObj;
158 /** The unaligned size of the block. */
159 size_t cbUnaligned;
160 /** The aligned size of the block. */
161 size_t cbAligned;
162 /** The allocation tag (read-only string). */
163 const char *pszTag;
164 /** The return address of the allocator function. */
165 void *pvCaller;
166 /** Line number of the alloc call. */
167 unsigned iLine;
168 /** File from within the allocation was made. */
169 const char *pszFile;
170 /** Function from within the allocation was made. */
171 const char *pszFunction;
172} RTR0MEMEFBLOCK, *PRTR0MEMEFBLOCK;
173
174
175
176/*********************************************************************************************************************************
177* Global Variables *
178*********************************************************************************************************************************/
179/** Spinlock protecting the all the block's globals. */
180static volatile uint32_t g_BlocksLock;
181/** Tree tracking the allocations. */
182static AVLPVTREE g_BlocksTree;
183
184#ifdef RTR0MEM_EF_FREE_DELAYED
185/** Tail of the delayed blocks. */
186static volatile PRTR0MEMEFBLOCK g_pBlocksDelayHead;
187/** Tail of the delayed blocks. */
188static volatile PRTR0MEMEFBLOCK g_pBlocksDelayTail;
189/** Number of bytes in the delay list (includes fences). */
190static volatile size_t g_cbBlocksDelay;
191#endif /* RTR0MEM_EF_FREE_DELAYED */
192
193/** Array of pointers free watches for. */
194void *gapvRTMemFreeWatch[4] = {NULL, NULL, NULL, NULL};
195/** Enable logging of all freed memory. */
196bool gfRTMemFreeLog = false;
197
198
199/*********************************************************************************************************************************
200* Internal Functions *
201*********************************************************************************************************************************/
202
203
204/**
205 * @callback_method_impl{FNRTSTROUTPUT}
206 */
207static DECLCALLBACK(size_t) rtR0MemEfWrite(void *pvArg, const char *pachChars, size_t cbChars)
208{
209 RT_NOREF1(pvArg);
210 if (cbChars)
211 {
212 RTLogWriteDebugger(pachChars, cbChars);
213 RTLogWriteStdOut(pachChars, cbChars);
214 RTLogWriteUser(pachChars, cbChars);
215 }
216 return cbChars;
217}
218
219
220/**
221 * Complains about something.
222 */
223static void rtR0MemComplain(const char *pszOp, const char *pszFormat, ...)
224{
225 va_list args;
226 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem error: %s: ", pszOp);
227 va_start(args, pszFormat);
228 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
229 va_end(args);
230 RTAssertDoPanic();
231}
232
233/**
234 * Log an event.
235 */
236DECLINLINE(void) rtR0MemLog(const char *pszOp, const char *pszFormat, ...)
237{
238#if 0
239 va_list args;
240 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem info: %s: ", pszOp);
241 va_start(args, pszFormat);
242 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
243 va_end(args);
244#else
245 NOREF(pszOp); NOREF(pszFormat);
246#endif
247}
248
249
250
251/**
252 * Acquires the lock.
253 */
254DECLINLINE(RTCCUINTREG) rtR0MemBlockLock(void)
255{
256 RTCCUINTREG uRet;
257 unsigned c = 0;
258 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
259 {
260 for (;;)
261 {
262 uRet = ASMIntDisableFlags();
263 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
264 break;
265 ASMSetFlags(uRet);
266 RTThreadSleepNoLog(((++c) >> 2) & 31);
267 }
268 }
269 else
270 {
271 for (;;)
272 {
273 uRet = ASMIntDisableFlags();
274 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
275 break;
276 ASMSetFlags(uRet);
277 ASMNopPause();
278 if (++c & 3)
279 ASMNopPause();
280 }
281 }
282 return uRet;
283}
284
285
286/**
287 * Releases the lock.
288 */
289DECLINLINE(void) rtR0MemBlockUnlock(RTCCUINTREG fSavedIntFlags)
290{
291 Assert(g_BlocksLock == 1);
292 ASMAtomicXchgU32(&g_BlocksLock, 0);
293 ASMSetFlags(fSavedIntFlags);
294}
295
296
297/**
298 * Creates a block.
299 */
300DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
301 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
302{
303 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTMemAlloc(sizeof(*pBlock));
304 if (pBlock)
305 {
306 pBlock->enmType = enmType;
307 pBlock->cbUnaligned = cbUnaligned;
308 pBlock->cbAligned = cbAligned;
309 pBlock->pszTag = pszTag;
310 pBlock->pvCaller = pvCaller;
311 pBlock->iLine = iLine;
312 pBlock->pszFile = pszFile;
313 pBlock->pszFunction = pszFunction;
314 }
315 return pBlock;
316}
317
318
319/**
320 * Frees a block.
321 */
322DECLINLINE(void) rtR0MemBlockFree(PRTR0MEMEFBLOCK pBlock)
323{
324 RTMemFree(pBlock);
325}
326
327
328/**
329 * Insert a block from the tree.
330 */
331DECLINLINE(void) rtR0MemBlockInsert(PRTR0MEMEFBLOCK pBlock, void *pv, RTR0MEMOBJ hMemObj)
332{
333 pBlock->Core.Key = pv;
334 pBlock->hMemObj = hMemObj;
335 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
336 bool fRc = RTAvlPVInsert(&g_BlocksTree, &pBlock->Core);
337 rtR0MemBlockUnlock(fSavedIntFlags);
338 AssertRelease(fRc);
339}
340
341
342/**
343 * Remove a block from the tree and returns it to the caller.
344 */
345DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockRemove(void *pv)
346{
347 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
348 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVRemove(&g_BlocksTree, pv);
349 rtR0MemBlockUnlock(fSavedIntFlags);
350 return pBlock;
351}
352
353
354/**
355 * Gets a block.
356 */
357DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockGet(void *pv)
358{
359 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
360 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVGet(&g_BlocksTree, pv);
361 rtR0MemBlockUnlock(fSavedIntFlags);
362 return pBlock;
363}
364
365
366/**
367 * Dumps one allocation.
368 */
369static DECLCALLBACK(int) RTMemDumpOne(PAVLPVNODECORE pNode, void *pvUser)
370{
371 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
372 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "%p %08lx(+%02lx) %p\n",
373 pBlock->Core.Key,
374 (unsigned long)pBlock->cbUnaligned,
375 (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned),
376 pBlock->pvCaller);
377 NOREF(pvUser);
378 return 0;
379}
380
381
382/**
383 * Dumps the allocated blocks.
384 * This is something which you should call from gdb.
385 */
386RT_C_DECLS_BEGIN
387void RTMemDump(void);
388RT_C_DECLS_END
389
390void RTMemDump(void)
391{
392 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "address size(alg) caller\n");
393 RTAvlPVDoWithAll(&g_BlocksTree, true, RTMemDumpOne, NULL);
394}
395
396#ifdef RTR0MEM_EF_FREE_DELAYED
397
398/**
399 * Insert a delayed block.
400 */
401DECLINLINE(void) rtR0MemBlockDelayInsert(PRTR0MEMEFBLOCK pBlock)
402{
403 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
404 pBlock->Core.pRight = NULL;
405 pBlock->Core.pLeft = NULL;
406 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
407 if (g_pBlocksDelayHead)
408 {
409 g_pBlocksDelayHead->Core.pLeft = (PAVLPVNODECORE)pBlock;
410 pBlock->Core.pRight = (PAVLPVNODECORE)g_pBlocksDelayHead;
411 g_pBlocksDelayHead = pBlock;
412 }
413 else
414 {
415 g_pBlocksDelayTail = pBlock;
416 g_pBlocksDelayHead = pBlock;
417 }
418 g_cbBlocksDelay += cbBlock;
419 rtR0MemBlockUnlock(fSavedIntFlags);
420}
421
422/**
423 * Removes a delayed block.
424 */
425DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockDelayRemove(void)
426{
427 PRTR0MEMEFBLOCK pBlock = NULL;
428 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
429 if (g_cbBlocksDelay > RTR0MEM_EF_FREE_DELAYED)
430 {
431 pBlock = g_pBlocksDelayTail;
432 if (pBlock)
433 {
434 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
435 if (pBlock->Core.pLeft)
436 pBlock->Core.pLeft->pRight = NULL;
437 else
438 g_pBlocksDelayHead = NULL;
439 g_cbBlocksDelay -= RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
440 }
441 }
442 rtR0MemBlockUnlock(fSavedIntFlags);
443 return pBlock;
444}
445
446#endif /* RTR0MEM_EF_FREE_DELAYED */
447
448
449static void rtR0MemFreeBlock(PRTR0MEMEFBLOCK pBlock, const char *pszOp)
450{
451 void *pv = pBlock->Core.Key;
452# ifdef RTR0MEM_EF_IN_FRONT
453 void *pvBlock = (char *)pv - RTR0MEM_EF_SIZE;
454# else
455 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
456# endif
457 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
458
459 int rc = RTR0MemObjProtect(pBlock->hMemObj, 0 /*offSub*/, RT_ALIGN_Z(cbBlock, PAGE_SIZE), RTMEM_PROT_READ | RTMEM_PROT_WRITE);
460 if (RT_FAILURE(rc))
461 rtR0MemComplain(pszOp, "RTR0MemObjProtect([%p], 0, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %Rrc\n",
462 pvBlock, cbBlock, rc);
463
464 rc = RTR0MemObjFree(pBlock->hMemObj, true /*fFreeMappings*/);
465 if (RT_FAILURE(rc))
466 rtR0MemComplain(pszOp, "RTR0MemObjFree([%p LB %#x]) -> %Rrc\n", pvBlock, cbBlock, rc);
467 pBlock->hMemObj = NIL_RTR0MEMOBJ;
468
469 rtR0MemBlockFree(pBlock);
470}
471
472
473/**
474 * Initialize call, we shouldn't fail here.
475 */
476void rtR0MemEfInit(void)
477{
478
479}
480
481/**
482 * @callback_method_impl{AVLPVCALLBACK}
483 */
484static DECLCALLBACK(int) rtR0MemEfDestroyBlock(PAVLPVNODECORE pNode, void *pvUser)
485{
486 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
487
488 /* Note! pszFile and pszFunction may be invalid at this point. */
489 rtR0MemComplain("rtR0MemEfDestroyBlock", "Leaking %zu bytes at %p (iLine=%u pvCaller=%p)\n",
490 pBlock->cbAligned, pBlock->Core.Key, pBlock->iLine, pBlock->pvCaller);
491
492 rtR0MemFreeBlock(pBlock, "rtR0MemEfDestroyBlock");
493
494 NOREF(pvUser);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Termination call.
501 *
502 * Will check and free memory.
503 */
504void rtR0MemEfTerm(void)
505{
506#ifdef RTR0MEM_EF_FREE_DELAYED
507 /*
508 * Release delayed frees.
509 */
510 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
511 for (;;)
512 {
513 PRTR0MEMEFBLOCK pBlock = g_pBlocksDelayTail;
514 if (pBlock)
515 {
516 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
517 if (pBlock->Core.pLeft)
518 pBlock->Core.pLeft->pRight = NULL;
519 else
520 g_pBlocksDelayHead = NULL;
521 rtR0MemBlockUnlock(fSavedIntFlags);
522
523 rtR0MemFreeBlock(pBlock, "rtR0MemEfTerm");
524
525 rtR0MemBlockLock();
526 }
527 else
528 break;
529 }
530 g_cbBlocksDelay = 0;
531 rtR0MemBlockUnlock(fSavedIntFlags);
532#endif
533
534 /*
535 * Complain about leaks. Then release them.
536 */
537 RTAvlPVDestroy(&g_BlocksTree, rtR0MemEfDestroyBlock, NULL);
538}
539
540
541/**
542 * Internal allocator.
543 */
544static void * rtR0MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
545 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
546{
547 /*
548 * Sanity.
549 */
550 if ( RT_ALIGN_Z(RTR0MEM_EF_SIZE, PAGE_SIZE) != RTR0MEM_EF_SIZE
551 && RTR0MEM_EF_SIZE <= 0)
552 {
553 rtR0MemComplain(pszOp, "Invalid E-fence size! %#x\n", RTR0MEM_EF_SIZE);
554 return NULL;
555 }
556 if (!cbUnaligned)
557 {
558#if 1
559 rtR0MemComplain(pszOp, "Request of ZERO bytes allocation!\n");
560 return NULL;
561#else
562 cbAligned = cbUnaligned = 1;
563#endif
564 }
565
566#ifndef RTR0MEM_EF_IN_FRONT
567 /* Alignment decreases fence accuracy, but this is at least partially
568 * counteracted by filling and checking the alignment padding. When the
569 * fence is in front then then no extra alignment is needed. */
570 cbAligned = RT_ALIGN_Z(cbAligned, RTR0MEM_EF_ALIGNMENT);
571#endif
572
573 /*
574 * Allocate the trace block.
575 */
576 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS);
577 if (!pBlock)
578 {
579 rtR0MemComplain(pszOp, "Failed to allocate trace block!\n");
580 return NULL;
581 }
582
583 /*
584 * Allocate a block with page alignment space + the size of the E-fence.
585 */
586 void *pvBlock = NULL;
587 RTR0MEMOBJ hMemObj;
588 size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
589 int rc = RTR0MemObjAllocPage(&hMemObj, cbBlock, false /*fExecutable*/);
590 if (RT_SUCCESS(rc))
591 pvBlock = RTR0MemObjAddress(hMemObj);
592 if (pvBlock)
593 {
594 /*
595 * Calc the start of the fence and the user block
596 * and then change the page protection of the fence.
597 */
598#ifdef RTR0MEM_EF_IN_FRONT
599 void *pvEFence = pvBlock;
600 void *pv = (char *)pvEFence + RTR0MEM_EF_SIZE;
601# ifdef RTR0MEM_EF_NOMAN_FILLER
602 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbUnaligned);
603# endif
604#else
605 void *pvEFence = (char *)pvBlock + (cbBlock - RTR0MEM_EF_SIZE);
606 void *pv = (char *)pvEFence - cbAligned;
607# ifdef RTR0MEM_EF_NOMAN_FILLER
608 memset(pvBlock, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbAligned);
609 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbAligned - cbUnaligned);
610# endif
611#endif
612
613#ifdef RTR0MEM_EF_FENCE_FILLER
614 memset(pvEFence, RTR0MEM_EF_FENCE_FILLER, RTR0MEM_EF_SIZE);
615#endif
616 rc = RTR0MemObjProtect(hMemObj, (uint8_t *)pvEFence - (uint8_t *)pvBlock, RTR0MEM_EF_SIZE, RTMEM_PROT_NONE);
617 if (!rc)
618 {
619 rtR0MemBlockInsert(pBlock, pv, hMemObj);
620 if (enmType == RTMEMTYPE_RTMEMALLOCZ)
621 memset(pv, 0, cbUnaligned);
622#ifdef RTR0MEM_EF_FILLER
623 else
624 memset(pv, RTR0MEM_EF_FILLER, cbUnaligned);
625#endif
626
627 rtR0MemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);
628 return pv;
629 }
630 rtR0MemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTR0MEM_EF_SIZE, rc);
631 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
632 }
633 else
634 {
635 rtR0MemComplain(pszOp, "Failed to allocated %zu (%zu) bytes (rc=%Rrc).\n", cbBlock, cbUnaligned, rc);
636 if (RT_SUCCESS(rc))
637 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
638 }
639
640 rtR0MemBlockFree(pBlock);
641 return NULL;
642}
643
644
645/**
646 * Internal free.
647 */
648static void rtR0MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, size_t cbUser, void *pvCaller, RT_SRC_POS_DECL)
649{
650 NOREF(enmType); RT_SRC_POS_NOREF();
651
652 /*
653 * Simple case.
654 */
655 if (!pv)
656 return;
657
658 /*
659 * Check watch points.
660 */
661 for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++)
662 if (gapvRTMemFreeWatch[i] == pv)
663 RTAssertDoPanic();
664
665 /*
666 * Find the block.
667 */
668 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockRemove(pv);
669 if (pBlock)
670 {
671 if (gfRTMemFreeLog)
672 RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned);
673
674#ifdef RTR0MEM_EF_NOMAN_FILLER
675 /*
676 * Check whether the no man's land is untouched.
677 */
678# ifdef RTR0MEM_EF_IN_FRONT
679 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
680 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned,
681 RTR0MEM_EF_NOMAN_FILLER);
682# else
683 /* Alignment must match allocation alignment in rtMemAlloc(). */
684 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
685 pBlock->cbAligned - pBlock->cbUnaligned,
686 RTR0MEM_EF_NOMAN_FILLER);
687 if (pvWrong)
688 RTAssertDoPanic();
689 pvWrong = ASMMemFirstMismatchingU8((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK),
690 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned,
691 RTR0MEM_EF_NOMAN_FILLER);
692# endif
693 if (pvWrong)
694 RTAssertDoPanic();
695#endif
696
697 /*
698 * Fill the user part of the block.
699 */
700 AssertMsg(enmType != RTMEMTYPE_RTMEMFREEZ || cbUser == pBlock->cbUnaligned,
701 ("cbUser=%#zx cbUnaligned=%#zx\n", cbUser, pBlock->cbUnaligned));
702 RT_NOREF(cbUser);
703 if (enmType == RTMEMTYPE_RTMEMFREEZ)
704 RT_BZERO(pv, pBlock->cbUnaligned);
705#ifdef RTR0MEM_EF_FREE_FILL
706 else
707 memset(pv, RTR0MEM_EF_FREE_FILL, pBlock->cbUnaligned);
708#endif
709
710#if defined(RTR0MEM_EF_FREE_DELAYED) && RTR0MEM_EF_FREE_DELAYED > 0
711 /*
712 * We're doing delayed freeing.
713 * That means we'll expand the E-fence to cover the entire block.
714 */
715 int rc = RTR0MemObjProtect(pBlock->hMemObj,
716# ifdef RTR0MEM_EF_IN_FRONT
717 RTR0MEM_EF_SIZE,
718# else
719 0 /*offSub*/,
720# endif
721 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE),
722 RTMEM_PROT_NONE);
723 if (RT_SUCCESS(rc))
724 {
725 /*
726 * Insert it into the free list and process pending frees.
727 */
728 rtR0MemBlockDelayInsert(pBlock);
729 while ((pBlock = rtR0MemBlockDelayRemove()) != NULL)
730 rtR0MemFreeBlock(pBlock, pszOp);
731 }
732 else
733 rtR0MemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc);
734
735#else /* !RTR0MEM_EF_FREE_DELAYED */
736 rtR0MemFreeBlock(pBlock, pszOp);
737#endif /* !RTR0MEM_EF_FREE_DELAYED */
738 }
739 else
740 rtR0MemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv);
741}
742
743
744/**
745 * Internal realloc.
746 */
747static void *rtR0MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew,
748 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
749{
750 /*
751 * Allocate new and copy.
752 */
753 if (!pvOld)
754 return rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
755 if (!cbNew)
756 {
757 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
758 return NULL;
759 }
760
761 /*
762 * Get the block, allocate the new, copy the data, free the old one.
763 */
764 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockGet(pvOld);
765 if (pBlock)
766 {
767 void *pvRet = rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
768 if (pvRet)
769 {
770 memcpy(pvRet, pvOld, RT_MIN(cbNew, pBlock->cbUnaligned));
771 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, 0, pvCaller, RT_SRC_POS_ARGS);
772 }
773 return pvRet;
774 }
775 rtR0MemComplain(pszOp, "pvOld=%p was not found!\n", pvOld);
776 return NULL;
777}
778
779
780
781
782RTDECL(void *) RTMemEfTmpAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
783{
784 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
785}
786
787
788RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
789{
790 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
791}
792
793
794RTDECL(void) RTMemEfTmpFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
795{
796 if (pv)
797 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
798}
799
800
801RTDECL(void) RTMemEfTmpFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
802{
803 if (pv)
804 rtR0MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
805}
806
807
808RTDECL(void *) RTMemEfAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
809{
810 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
811}
812
813
814RTDECL(void *) RTMemEfAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
815{
816 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
817}
818
819
820RTDECL(void *) RTMemEfAllocVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
821{
822 size_t cbAligned;
823 if (cbUnaligned >= 16)
824 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
825 else
826 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
827 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
828}
829
830
831RTDECL(void *) RTMemEfAllocZVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
832{
833 size_t cbAligned;
834 if (cbUnaligned >= 16)
835 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
836 else
837 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
838 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
839}
840
841
842RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
843{
844 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
845}
846
847RTDECL(void *) RTMemEfReallocZ(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
848{
849 void *pvDst = rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
850 if (pvDst && cbNew > cbOld)
851 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
852 return pvDst;
853}
854
855
856RTDECL(void) RTMemEfFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
857{
858 if (pv)
859 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), RT_SRC_POS_ARGS);
860}
861
862
863RTDECL(void) RTMemEfFreeZ(void *pv, size_t cb, RT_SRC_POS_DECL) RT_NO_THROW_DEF
864{
865 if (pv)
866 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), RT_SRC_POS_ARGS);
867}
868
869
870RTDECL(void *) RTMemEfDup(const void *pvSrc, size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
871{
872 void *pvDst = RTMemEfAlloc(cb, pszTag, RT_SRC_POS_ARGS);
873 if (pvDst)
874 memcpy(pvDst, pvSrc, cb);
875 return pvDst;
876}
877
878
879RTDECL(void *) RTMemEfDupEx(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
880{
881 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, RT_SRC_POS_ARGS);
882 if (pvDst)
883 {
884 memcpy(pvDst, pvSrc, cbSrc);
885 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
886 }
887 return pvDst;
888}
889
890
891
892
893/*
894 *
895 * The NP (no position) versions.
896 *
897 */
898
899
900
901RTDECL(void *) RTMemEfTmpAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
902{
903 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
904}
905
906
907RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
908{
909 return rtR0MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
910}
911
912
913RTDECL(void) RTMemEfTmpFreeNP(void *pv) RT_NO_THROW_DEF
914{
915 if (pv)
916 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
917}
918
919
920RTDECL(void) RTMemEfTmpFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
921{
922 if (pv)
923 rtR0MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
924}
925
926
927RTDECL(void *) RTMemEfAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
928{
929 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
930}
931
932
933RTDECL(void *) RTMemEfAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
934{
935 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
936}
937
938
939RTDECL(void *) RTMemEfAllocVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
940{
941 size_t cbAligned;
942 if (cbUnaligned >= 16)
943 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
944 else
945 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
946 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
947}
948
949
950RTDECL(void *) RTMemEfAllocZVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
951{
952 size_t cbAligned;
953 if (cbUnaligned >= 16)
954 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
955 else
956 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
957 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
958}
959
960
961RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
962{
963 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
964}
965
966
967RTDECL(void *) RTMemEfReallocZNP(void *pvOld, size_t cbOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
968{
969 void *pvDst = rtR0MemRealloc("ReallocZ", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
970 if (pvDst && cbNew > cbOld)
971 memset((uint8_t *)pvDst + cbOld, 0, cbNew - cbOld);
972 return pvDst;
973}
974
975
976RTDECL(void) RTMemEfFreeNP(void *pv) RT_NO_THROW_DEF
977{
978 if (pv)
979 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, 0, ASMReturnAddress(), NULL, 0, NULL);
980}
981
982
983RTDECL(void) RTMemEfFreeZNP(void *pv, size_t cb) RT_NO_THROW_DEF
984{
985 if (pv)
986 rtR0MemFree("FreeZ", RTMEMTYPE_RTMEMFREEZ, pv, cb, ASMReturnAddress(), NULL, 0, NULL);
987}
988
989
990RTDECL(void *) RTMemEfDupNP(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW_DEF
991{
992 void *pvDst = RTMemEfAlloc(cb, pszTag, NULL, 0, NULL);
993 if (pvDst)
994 memcpy(pvDst, pvSrc, cb);
995 return pvDst;
996}
997
998
999RTDECL(void *) RTMemEfDupExNP(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW_DEF
1000{
1001 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, NULL, 0, NULL);
1002 if (pvDst)
1003 {
1004 memcpy(pvDst, pvSrc, cbSrc);
1005 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
1006 }
1007 return pvDst;
1008}
1009
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette