VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/alloc-ef-r0drv.cpp@ 78381

Last change on this file since 78381 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 29.1 KB
Line 
1/* $Id: alloc-ef-r0drv.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocation, electric fence for ring-0 drivers.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#define RTMEM_NO_WRAP_TO_EF_APIS
32#include "internal/iprt.h"
33#include <iprt/mem.h>
34
35#include <iprt/alloc.h>
36#include <iprt/asm.h>
37#include <iprt/asm-amd64-x86.h>
38#include <iprt/assert.h>
39#include <iprt/errcore.h>
40#include <iprt/log.h>
41#include <iprt/memobj.h>
42#include <iprt/param.h>
43#include <iprt/string.h>
44#include <iprt/thread.h>
45
46#include "internal/mem.h"
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52#if defined(DOXYGEN_RUNNING)
53# define RTR0MEM_EF_IN_FRONT
54#endif
55
56/** @def RTR0MEM_EF_SIZE
57 * The size of the fence. This must be page aligned.
58 */
59#define RTR0MEM_EF_SIZE PAGE_SIZE
60
61/** @def RTR0MEM_EF_ALIGNMENT
62 * The allocation alignment, power of two of course.
63 *
64 * Use this for working around misaligned sizes, usually stemming from
65 * allocating a string or something after the main structure. When you
66 * encounter this, please fix the allocation to RTMemAllocVar or RTMemAllocZVar.
67 */
68#if 0
69# define RTR0MEM_EF_ALIGNMENT (ARCH_BITS / 8)
70#else
71# define RTR0MEM_EF_ALIGNMENT 1
72#endif
73
74/** @def RTR0MEM_EF_IN_FRONT
75 * Define this to put the fence up in front of the block.
76 * The default (when this isn't defined) is to up it up after the block.
77 */
78//# define RTR0MEM_EF_IN_FRONT
79
80/** @def RTR0MEM_EF_FREE_DELAYED
81 * This define will enable free() delay and protection of the freed data
82 * while it's being delayed. The value of RTR0MEM_EF_FREE_DELAYED defines
83 * the threshold of the delayed blocks.
84 * Delayed blocks does not consume any physical memory, only virtual address space.
85 */
86#define RTR0MEM_EF_FREE_DELAYED (20 * _1M)
87
88/** @def RTR0MEM_EF_FREE_FILL
89 * This define will enable memset(,RTR0MEM_EF_FREE_FILL,)'ing the user memory
90 * in the block before freeing/decommitting it. This is useful in GDB since GDB
91 * appears to be able to read the content of the page even after it's been
92 * decommitted.
93 */
94#define RTR0MEM_EF_FREE_FILL 'f'
95
96/** @def RTR0MEM_EF_FILLER
97 * This define will enable memset(,RTR0MEM_EF_FILLER,)'ing the allocated
98 * memory when the API doesn't require it to be zero'd.
99 */
100#define RTR0MEM_EF_FILLER 0xef
101
102/** @def RTR0MEM_EF_NOMAN_FILLER
103 * This define will enable memset(,RTR0MEM_EF_NOMAN_FILLER,)'ing the
104 * unprotected but not allocated area of memory, the so called no man's land.
105 */
106#define RTR0MEM_EF_NOMAN_FILLER 0xaa
107
108/** @def RTR0MEM_EF_FENCE_FILLER
109 * This define will enable memset(,RTR0MEM_EF_FENCE_FILLER,)'ing the
110 * fence itself, as debuggers can usually read them.
111 */
112#define RTR0MEM_EF_FENCE_FILLER 0xcc
113
114
115/*********************************************************************************************************************************
116* Header Files *
117*********************************************************************************************************************************/
118#ifdef RT_OS_WINDOWS
119# include <iprt/win/windows.h>
120#elif !defined(RT_OS_FREEBSD)
121# include <sys/mman.h>
122#endif
123#include <iprt/avl.h>
124#include <iprt/thread.h>
125
126
127/*********************************************************************************************************************************
128* Structures and Typedefs *
129*********************************************************************************************************************************/
130/**
131 * Allocation types.
132 */
133typedef enum RTMEMTYPE
134{
135 RTMEMTYPE_RTMEMALLOC,
136 RTMEMTYPE_RTMEMALLOCZ,
137 RTMEMTYPE_RTMEMREALLOC,
138 RTMEMTYPE_RTMEMFREE,
139
140 RTMEMTYPE_NEW,
141 RTMEMTYPE_NEW_ARRAY,
142 RTMEMTYPE_DELETE,
143 RTMEMTYPE_DELETE_ARRAY
144} RTMEMTYPE;
145
146/**
147 * Node tracking a memory allocation.
148 */
149typedef struct RTR0MEMEFBLOCK
150{
151 /** Avl node code, key is the user block pointer. */
152 AVLPVNODECORE Core;
153 /** Allocation type. */
154 RTMEMTYPE enmType;
155 /** The memory object. */
156 RTR0MEMOBJ hMemObj;
157 /** The unaligned size of the block. */
158 size_t cbUnaligned;
159 /** The aligned size of the block. */
160 size_t cbAligned;
161 /** The allocation tag (read-only string). */
162 const char *pszTag;
163 /** The return address of the allocator function. */
164 void *pvCaller;
165 /** Line number of the alloc call. */
166 unsigned iLine;
167 /** File from within the allocation was made. */
168 const char *pszFile;
169 /** Function from within the allocation was made. */
170 const char *pszFunction;
171} RTR0MEMEFBLOCK, *PRTR0MEMEFBLOCK;
172
173
174
175/*********************************************************************************************************************************
176* Global Variables *
177*********************************************************************************************************************************/
178/** Spinlock protecting the all the block's globals. */
179static volatile uint32_t g_BlocksLock;
180/** Tree tracking the allocations. */
181static AVLPVTREE g_BlocksTree;
182
183#ifdef RTR0MEM_EF_FREE_DELAYED
184/** Tail of the delayed blocks. */
185static volatile PRTR0MEMEFBLOCK g_pBlocksDelayHead;
186/** Tail of the delayed blocks. */
187static volatile PRTR0MEMEFBLOCK g_pBlocksDelayTail;
188/** Number of bytes in the delay list (includes fences). */
189static volatile size_t g_cbBlocksDelay;
190#endif /* RTR0MEM_EF_FREE_DELAYED */
191
192/** Array of pointers free watches for. */
193void *gapvRTMemFreeWatch[4] = {NULL, NULL, NULL, NULL};
194/** Enable logging of all freed memory. */
195bool gfRTMemFreeLog = false;
196
197
198/*********************************************************************************************************************************
199* Internal Functions *
200*********************************************************************************************************************************/
201
202
203/**
204 * @callback_method_impl{FNRTSTROUTPUT}
205 */
206static DECLCALLBACK(size_t) rtR0MemEfWrite(void *pvArg, const char *pachChars, size_t cbChars)
207{
208 RT_NOREF1(pvArg);
209 if (cbChars)
210 {
211 RTLogWriteDebugger(pachChars, cbChars);
212 RTLogWriteStdOut(pachChars, cbChars);
213 RTLogWriteUser(pachChars, cbChars);
214 }
215 return cbChars;
216}
217
218
219/**
220 * Complains about something.
221 */
222static void rtR0MemComplain(const char *pszOp, const char *pszFormat, ...)
223{
224 va_list args;
225 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem error: %s: ", pszOp);
226 va_start(args, pszFormat);
227 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
228 va_end(args);
229 RTAssertDoPanic();
230}
231
232/**
233 * Log an event.
234 */
235DECLINLINE(void) rtR0MemLog(const char *pszOp, const char *pszFormat, ...)
236{
237#if 0
238 va_list args;
239 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem info: %s: ", pszOp);
240 va_start(args, pszFormat);
241 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args);
242 va_end(args);
243#else
244 NOREF(pszOp); NOREF(pszFormat);
245#endif
246}
247
248
249
250/**
251 * Acquires the lock.
252 */
253DECLINLINE(RTCCUINTREG) rtR0MemBlockLock(void)
254{
255 RTCCUINTREG uRet;
256 unsigned c = 0;
257 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
258 {
259 for (;;)
260 {
261 uRet = ASMIntDisableFlags();
262 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
263 break;
264 ASMSetFlags(uRet);
265 RTThreadSleepNoLog(((++c) >> 2) & 31);
266 }
267 }
268 else
269 {
270 for (;;)
271 {
272 uRet = ASMIntDisableFlags();
273 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0))
274 break;
275 ASMSetFlags(uRet);
276 ASMNopPause();
277 if (++c & 3)
278 ASMNopPause();
279 }
280 }
281 return uRet;
282}
283
284
285/**
286 * Releases the lock.
287 */
288DECLINLINE(void) rtR0MemBlockUnlock(RTCCUINTREG fSavedIntFlags)
289{
290 Assert(g_BlocksLock == 1);
291 ASMAtomicXchgU32(&g_BlocksLock, 0);
292 ASMSetFlags(fSavedIntFlags);
293}
294
295
296/**
297 * Creates a block.
298 */
299DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
300 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
301{
302 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTMemAlloc(sizeof(*pBlock));
303 if (pBlock)
304 {
305 pBlock->enmType = enmType;
306 pBlock->cbUnaligned = cbUnaligned;
307 pBlock->cbAligned = cbAligned;
308 pBlock->pszTag = pszTag;
309 pBlock->pvCaller = pvCaller;
310 pBlock->iLine = iLine;
311 pBlock->pszFile = pszFile;
312 pBlock->pszFunction = pszFunction;
313 }
314 return pBlock;
315}
316
317
318/**
319 * Frees a block.
320 */
321DECLINLINE(void) rtR0MemBlockFree(PRTR0MEMEFBLOCK pBlock)
322{
323 RTMemFree(pBlock);
324}
325
326
327/**
328 * Insert a block from the tree.
329 */
330DECLINLINE(void) rtR0MemBlockInsert(PRTR0MEMEFBLOCK pBlock, void *pv, RTR0MEMOBJ hMemObj)
331{
332 pBlock->Core.Key = pv;
333 pBlock->hMemObj = hMemObj;
334 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
335 bool fRc = RTAvlPVInsert(&g_BlocksTree, &pBlock->Core);
336 rtR0MemBlockUnlock(fSavedIntFlags);
337 AssertRelease(fRc);
338}
339
340
341/**
342 * Remove a block from the tree and returns it to the caller.
343 */
344DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockRemove(void *pv)
345{
346 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
347 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVRemove(&g_BlocksTree, pv);
348 rtR0MemBlockUnlock(fSavedIntFlags);
349 return pBlock;
350}
351
352
353/**
354 * Gets a block.
355 */
356DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockGet(void *pv)
357{
358 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
359 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVGet(&g_BlocksTree, pv);
360 rtR0MemBlockUnlock(fSavedIntFlags);
361 return pBlock;
362}
363
364
365/**
366 * Dumps one allocation.
367 */
368static DECLCALLBACK(int) RTMemDumpOne(PAVLPVNODECORE pNode, void *pvUser)
369{
370 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
371 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "%p %08lx(+%02lx) %p\n",
372 pBlock->Core.Key,
373 (unsigned long)pBlock->cbUnaligned,
374 (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned),
375 pBlock->pvCaller);
376 NOREF(pvUser);
377 return 0;
378}
379
380
381/**
382 * Dumps the allocated blocks.
383 * This is something which you should call from gdb.
384 */
385RT_C_DECLS_BEGIN
386void RTMemDump(void);
387RT_C_DECLS_END
388
389void RTMemDump(void)
390{
391 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "address size(alg) caller\n");
392 RTAvlPVDoWithAll(&g_BlocksTree, true, RTMemDumpOne, NULL);
393}
394
395#ifdef RTR0MEM_EF_FREE_DELAYED
396
397/**
398 * Insert a delayed block.
399 */
400DECLINLINE(void) rtR0MemBlockDelayInsert(PRTR0MEMEFBLOCK pBlock)
401{
402 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
403 pBlock->Core.pRight = NULL;
404 pBlock->Core.pLeft = NULL;
405 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
406 if (g_pBlocksDelayHead)
407 {
408 g_pBlocksDelayHead->Core.pLeft = (PAVLPVNODECORE)pBlock;
409 pBlock->Core.pRight = (PAVLPVNODECORE)g_pBlocksDelayHead;
410 g_pBlocksDelayHead = pBlock;
411 }
412 else
413 {
414 g_pBlocksDelayTail = pBlock;
415 g_pBlocksDelayHead = pBlock;
416 }
417 g_cbBlocksDelay += cbBlock;
418 rtR0MemBlockUnlock(fSavedIntFlags);
419}
420
421/**
422 * Removes a delayed block.
423 */
424DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockDelayRemove(void)
425{
426 PRTR0MEMEFBLOCK pBlock = NULL;
427 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
428 if (g_cbBlocksDelay > RTR0MEM_EF_FREE_DELAYED)
429 {
430 pBlock = g_pBlocksDelayTail;
431 if (pBlock)
432 {
433 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
434 if (pBlock->Core.pLeft)
435 pBlock->Core.pLeft->pRight = NULL;
436 else
437 g_pBlocksDelayHead = NULL;
438 g_cbBlocksDelay -= RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
439 }
440 }
441 rtR0MemBlockUnlock(fSavedIntFlags);
442 return pBlock;
443}
444
445#endif /* RTR0MEM_EF_FREE_DELAYED */
446
447
448static void rtR0MemFreeBlock(PRTR0MEMEFBLOCK pBlock, const char *pszOp)
449{
450 void *pv = pBlock->Core.Key;
451# ifdef RTR0MEM_EF_IN_FRONT
452 void *pvBlock = (char *)pv - RTR0MEM_EF_SIZE;
453# else
454 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK);
455# endif
456 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
457
458 int rc = RTR0MemObjProtect(pBlock->hMemObj, 0 /*offSub*/, RT_ALIGN_Z(cbBlock, PAGE_SIZE), RTMEM_PROT_READ | RTMEM_PROT_WRITE);
459 if (RT_FAILURE(rc))
460 rtR0MemComplain(pszOp, "RTR0MemObjProtect([%p], 0, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %Rrc\n",
461 pvBlock, cbBlock, rc);
462
463 rc = RTR0MemObjFree(pBlock->hMemObj, true /*fFreeMappings*/);
464 if (RT_FAILURE(rc))
465 rtR0MemComplain(pszOp, "RTR0MemObjFree([%p LB %#x]) -> %Rrc\n", pvBlock, cbBlock, rc);
466 pBlock->hMemObj = NIL_RTR0MEMOBJ;
467
468 rtR0MemBlockFree(pBlock);
469}
470
471
472/**
473 * Initialize call, we shouldn't fail here.
474 */
475void rtR0MemEfInit(void)
476{
477
478}
479
480/**
481 * @callback_method_impl{AVLPVCALLBACK}
482 */
483static DECLCALLBACK(int) rtR0MemEfDestroyBlock(PAVLPVNODECORE pNode, void *pvUser)
484{
485 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode;
486
487 /* Note! pszFile and pszFunction may be invalid at this point. */
488 rtR0MemComplain("rtR0MemEfDestroyBlock", "Leaking %zu bytes at %p (iLine=%u pvCaller=%p)\n",
489 pBlock->cbAligned, pBlock->Core.Key, pBlock->iLine, pBlock->pvCaller);
490
491 rtR0MemFreeBlock(pBlock, "rtR0MemEfDestroyBlock");
492
493 NOREF(pvUser);
494 return VINF_SUCCESS;
495}
496
497
498/**
499 * Termination call.
500 *
501 * Will check and free memory.
502 */
503void rtR0MemEfTerm(void)
504{
505#ifdef RTR0MEM_EF_FREE_DELAYED
506 /*
507 * Release delayed frees.
508 */
509 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock();
510 for (;;)
511 {
512 PRTR0MEMEFBLOCK pBlock = g_pBlocksDelayTail;
513 if (pBlock)
514 {
515 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft;
516 if (pBlock->Core.pLeft)
517 pBlock->Core.pLeft->pRight = NULL;
518 else
519 g_pBlocksDelayHead = NULL;
520 rtR0MemBlockUnlock(fSavedIntFlags);
521
522 rtR0MemFreeBlock(pBlock, "rtR0MemEfTerm");
523
524 rtR0MemBlockLock();
525 }
526 else
527 break;
528 }
529 g_cbBlocksDelay = 0;
530 rtR0MemBlockUnlock(fSavedIntFlags);
531#endif
532
533 /*
534 * Complain about leaks. Then release them.
535 */
536 RTAvlPVDestroy(&g_BlocksTree, rtR0MemEfDestroyBlock, NULL);
537}
538
539
540/**
541 * Internal allocator.
542 */
543static void * rtR0MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,
544 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
545{
546 /*
547 * Sanity.
548 */
549 if ( RT_ALIGN_Z(RTR0MEM_EF_SIZE, PAGE_SIZE) != RTR0MEM_EF_SIZE
550 && RTR0MEM_EF_SIZE <= 0)
551 {
552 rtR0MemComplain(pszOp, "Invalid E-fence size! %#x\n", RTR0MEM_EF_SIZE);
553 return NULL;
554 }
555 if (!cbUnaligned)
556 {
557#if 1
558 rtR0MemComplain(pszOp, "Request of ZERO bytes allocation!\n");
559 return NULL;
560#else
561 cbAligned = cbUnaligned = 1;
562#endif
563 }
564
565#ifndef RTR0MEM_EF_IN_FRONT
566 /* Alignment decreases fence accuracy, but this is at least partially
567 * counteracted by filling and checking the alignment padding. When the
568 * fence is in front then then no extra alignment is needed. */
569 cbAligned = RT_ALIGN_Z(cbAligned, RTR0MEM_EF_ALIGNMENT);
570#endif
571
572 /*
573 * Allocate the trace block.
574 */
575 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS);
576 if (!pBlock)
577 {
578 rtR0MemComplain(pszOp, "Failed to allocate trace block!\n");
579 return NULL;
580 }
581
582 /*
583 * Allocate a block with page alignment space + the size of the E-fence.
584 */
585 void *pvBlock = NULL;
586 RTR0MEMOBJ hMemObj;
587 size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE;
588 int rc = RTR0MemObjAllocPage(&hMemObj, cbBlock, false /*fExecutable*/);
589 if (RT_SUCCESS(rc))
590 pvBlock = RTR0MemObjAddress(hMemObj);
591 if (pvBlock)
592 {
593 /*
594 * Calc the start of the fence and the user block
595 * and then change the page protection of the fence.
596 */
597#ifdef RTR0MEM_EF_IN_FRONT
598 void *pvEFence = pvBlock;
599 void *pv = (char *)pvEFence + RTR0MEM_EF_SIZE;
600# ifdef RTR0MEM_EF_NOMAN_FILLER
601 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbUnaligned);
602# endif
603#else
604 void *pvEFence = (char *)pvBlock + (cbBlock - RTR0MEM_EF_SIZE);
605 void *pv = (char *)pvEFence - cbAligned;
606# ifdef RTR0MEM_EF_NOMAN_FILLER
607 memset(pvBlock, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbAligned);
608 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbAligned - cbUnaligned);
609# endif
610#endif
611
612#ifdef RTR0MEM_EF_FENCE_FILLER
613 memset(pvEFence, RTR0MEM_EF_FENCE_FILLER, RTR0MEM_EF_SIZE);
614#endif
615 rc = RTR0MemObjProtect(hMemObj, (uint8_t *)pvEFence - (uint8_t *)pvBlock, RTR0MEM_EF_SIZE, RTMEM_PROT_NONE);
616 if (!rc)
617 {
618 rtR0MemBlockInsert(pBlock, pv, hMemObj);
619 if (enmType == RTMEMTYPE_RTMEMALLOCZ)
620 memset(pv, 0, cbUnaligned);
621#ifdef RTR0MEM_EF_FILLER
622 else
623 memset(pv, RTR0MEM_EF_FILLER, cbUnaligned);
624#endif
625
626 rtR0MemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);
627 return pv;
628 }
629 rtR0MemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTR0MEM_EF_SIZE, rc);
630 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
631 }
632 else
633 {
634 rtR0MemComplain(pszOp, "Failed to allocated %zu (%zu) bytes (rc=%Rrc).\n", cbBlock, cbUnaligned, rc);
635 if (RT_SUCCESS(rc))
636 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/);
637 }
638
639 rtR0MemBlockFree(pBlock);
640 return NULL;
641}
642
643
644/**
645 * Internal free.
646 */
647static void rtR0MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, void *pvCaller, RT_SRC_POS_DECL)
648{
649 NOREF(enmType); RT_SRC_POS_NOREF();
650
651 /*
652 * Simple case.
653 */
654 if (!pv)
655 return;
656
657 /*
658 * Check watch points.
659 */
660 for (unsigned i = 0; i < RT_ELEMENTS(gapvRTMemFreeWatch); i++)
661 if (gapvRTMemFreeWatch[i] == pv)
662 RTAssertDoPanic();
663
664 /*
665 * Find the block.
666 */
667 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockRemove(pv);
668 if (pBlock)
669 {
670 if (gfRTMemFreeLog)
671 RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned);
672
673#ifdef RTR0MEM_EF_NOMAN_FILLER
674 /*
675 * Check whether the no man's land is untouched.
676 */
677# ifdef RTR0MEM_EF_IN_FRONT
678 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
679 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned,
680 RTR0MEM_EF_NOMAN_FILLER);
681# else
682 /* Alignment must match allocation alignment in rtMemAlloc(). */
683 void *pvWrong = ASMMemFirstMismatchingU8((char *)pv + pBlock->cbUnaligned,
684 pBlock->cbAligned - pBlock->cbUnaligned,
685 RTR0MEM_EF_NOMAN_FILLER);
686 if (pvWrong)
687 RTAssertDoPanic();
688 pvWrong = ASMMemFirstMismatchingU8((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK),
689 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned,
690 RTR0MEM_EF_NOMAN_FILLER);
691# endif
692 if (pvWrong)
693 RTAssertDoPanic();
694#endif
695
696#ifdef RTR0MEM_EF_FREE_FILL
697 /*
698 * Fill the user part of the block.
699 */
700 memset(pv, RTR0MEM_EF_FREE_FILL, pBlock->cbUnaligned);
701#endif
702
703#if defined(RTR0MEM_EF_FREE_DELAYED) && RTR0MEM_EF_FREE_DELAYED > 0
704 /*
705 * We're doing delayed freeing.
706 * That means we'll expand the E-fence to cover the entire block.
707 */
708 int rc = RTR0MemObjProtect(pBlock->hMemObj,
709# ifdef RTR0MEM_EF_IN_FRONT
710 RTR0MEM_EF_SIZE,
711# else
712 0 /*offSub*/,
713# endif
714 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE),
715 RTMEM_PROT_NONE);
716 if (RT_SUCCESS(rc))
717 {
718 /*
719 * Insert it into the free list and process pending frees.
720 */
721 rtR0MemBlockDelayInsert(pBlock);
722 while ((pBlock = rtR0MemBlockDelayRemove()) != NULL)
723 rtR0MemFreeBlock(pBlock, pszOp);
724 }
725 else
726 rtR0MemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc);
727
728#else /* !RTR0MEM_EF_FREE_DELAYED */
729 rtR0MemFreeBlock(pBlock, pszOp);
730#endif /* !RTR0MEM_EF_FREE_DELAYED */
731 }
732 else
733 rtR0MemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv);
734}
735
736
737/**
738 * Internal realloc.
739 */
740static void *rtR0MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew,
741 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL)
742{
743 /*
744 * Allocate new and copy.
745 */
746 if (!pvOld)
747 return rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
748 if (!cbNew)
749 {
750 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS);
751 return NULL;
752 }
753
754 /*
755 * Get the block, allocate the new, copy the data, free the old one.
756 */
757 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockGet(pvOld);
758 if (pBlock)
759 {
760 void *pvRet = rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);
761 if (pvRet)
762 {
763 memcpy(pvRet, pvOld, RT_MIN(cbNew, pBlock->cbUnaligned));
764 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS);
765 }
766 return pvRet;
767 }
768 rtR0MemComplain(pszOp, "pvOld=%p was not found!\n", pvOld);
769 return NULL;
770}
771
772
773
774
775RTDECL(void *) RTMemEfTmpAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
776{
777 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
778}
779
780
781RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
782{
783 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
784}
785
786
787RTDECL(void) RTMemEfTmpFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
788{
789 if (pv)
790 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS);
791}
792
793
794RTDECL(void *) RTMemEfAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
795{
796 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
797}
798
799
800RTDECL(void *) RTMemEfAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
801{
802 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
803}
804
805
806RTDECL(void *) RTMemEfAllocVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
807{
808 size_t cbAligned;
809 if (cbUnaligned >= 16)
810 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
811 else
812 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
813 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
814}
815
816
817RTDECL(void *) RTMemEfAllocZVar(size_t cbUnaligned, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
818{
819 size_t cbAligned;
820 if (cbUnaligned >= 16)
821 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
822 else
823 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
824 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
825}
826
827
828RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
829{
830 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);
831}
832
833
834RTDECL(void) RTMemEfFree(void *pv, RT_SRC_POS_DECL) RT_NO_THROW_DEF
835{
836 if (pv)
837 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS);
838}
839
840
841RTDECL(void *) RTMemEfDup(const void *pvSrc, size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
842{
843 void *pvDst = RTMemEfAlloc(cb, pszTag, RT_SRC_POS_ARGS);
844 if (pvDst)
845 memcpy(pvDst, pvSrc, cb);
846 return pvDst;
847}
848
849
850RTDECL(void *) RTMemEfDupEx(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF
851{
852 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, RT_SRC_POS_ARGS);
853 if (pvDst)
854 {
855 memcpy(pvDst, pvSrc, cbSrc);
856 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
857 }
858 return pvDst;
859}
860
861
862
863
864/*
865 *
866 * The NP (no position) versions.
867 *
868 */
869
870
871
872RTDECL(void *) RTMemEfTmpAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
873{
874 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
875}
876
877
878RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
879{
880 return rtR0MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
881}
882
883
884RTDECL(void) RTMemEfTmpFreeNP(void *pv) RT_NO_THROW_DEF
885{
886 if (pv)
887 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL);
888}
889
890
891RTDECL(void *) RTMemEfAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
892{
893 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
894}
895
896
897RTDECL(void *) RTMemEfAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF
898{
899 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);
900}
901
902
903RTDECL(void *) RTMemEfAllocVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
904{
905 size_t cbAligned;
906 if (cbUnaligned >= 16)
907 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
908 else
909 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
910 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
911}
912
913
914RTDECL(void *) RTMemEfAllocZVarNP(size_t cbUnaligned, const char *pszTag) RT_NO_THROW_DEF
915{
916 size_t cbAligned;
917 if (cbUnaligned >= 16)
918 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
919 else
920 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
921 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);
922}
923
924
925RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
926{
927 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);
928}
929
930
931RTDECL(void) RTMemEfFreeNP(void *pv) RT_NO_THROW_DEF
932{
933 if (pv)
934 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL);
935}
936
937
938RTDECL(void *) RTMemEfDupNP(const void *pvSrc, size_t cb, const char *pszTag) RT_NO_THROW_DEF
939{
940 void *pvDst = RTMemEfAlloc(cb, pszTag, NULL, 0, NULL);
941 if (pvDst)
942 memcpy(pvDst, pvSrc, cb);
943 return pvDst;
944}
945
946
947RTDECL(void *) RTMemEfDupExNP(const void *pvSrc, size_t cbSrc, size_t cbExtra, const char *pszTag) RT_NO_THROW_DEF
948{
949 void *pvDst = RTMemEfAlloc(cbSrc + cbExtra, pszTag, NULL, 0, NULL);
950 if (pvDst)
951 {
952 memcpy(pvDst, pvSrc, cbSrc);
953 memset((uint8_t *)pvDst + cbSrc, 0, cbExtra);
954 }
955 return pvDst;
956}
957
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette