Changeset 58269 in vbox for trunk/src/VBox/Runtime/r0drv
- Timestamp:
- Oct 15, 2015 6:29:21 PM (9 years ago)
- Location:
- trunk/src/VBox/Runtime/r0drv
- Files:
-
- 1 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/alloc-ef-r0drv.cpp
r58253 r58269 1 1 /* $Id$ */ 2 2 /** @file 3 * IPRT - Memory Allocation, electric fence .3 * IPRT - Memory Allocation, electric fence for ring-0 drivers. 4 4 */ 5 5 … … 29 29 * Header Files * 30 30 *********************************************************************************************************************************/ 31 #include " alloc-ef.h"31 #include "internal/iprt.h" 32 32 #include <iprt/mem.h> 33 #include <iprt/memobj.h> 33 34 #include <iprt/log.h> 34 35 #include <iprt/asm.h> … … 36 37 #include <VBox/sup.h> 37 38 #include <iprt/err.h> 38 #include <errno.h>39 #include <stdio.h>40 #include <stdlib.h>41 39 42 40 #include <iprt/alloc.h> … … 45 43 #include <iprt/string.h> 46 44 47 #ifdef RTALLOC_REPLACE_MALLOC 48 # include <VBox/dis.h> 49 # include <VBox/disopcode.h> 50 # include <dlfcn.h> 51 # ifdef RT_OS_DARWIN 52 # include <malloc/malloc.h> 53 # endif 54 #endif 55 56 57 /********************************************************************************************************************************* 58 * Defined Constants And Macros * 59 *********************************************************************************************************************************/ 60 #ifdef RTALLOC_REPLACE_MALLOC 61 # define RTMEM_REPLACMENT_ALIGN(a_cb) ((a_cb) >= 16 ? RT_ALIGN_Z(a_cb, 16) \ 62 : (a_cb) >= sizeof(uintptr_t) ? RT_ALIGN_Z(a_cb, sizeof(uintptr_t)) : (a_cb)) 63 #endif 45 46 47 /******************************************************************************* 48 * Defined Constants And Macros * 49 *******************************************************************************/ 50 #if defined(DOXYGEN_RUNNING) 51 # define RTR0MEM_EF_IN_FRONT 52 #endif 53 54 /** @def RTR0MEM_EF_SIZE 55 * The size of the fence. This must be page aligned. 56 */ 57 #define RTR0MEM_EF_SIZE PAGE_SIZE 58 59 /** @def RTR0MEM_EF_ALIGNMENT 60 * The allocation alignment, power of two of course. 61 * 62 * Use this for working around misaligned sizes, usually stemming from 63 * allocating a string or something after the main structure. When you 64 * encounter this, please fix the allocation to RTMemAllocVar or RTMemAllocZVar. 65 */ 66 #if 0 67 # define RTR0MEM_EF_ALIGNMENT (ARCH_BITS / 8) 68 #else 69 # define RTR0MEM_EF_ALIGNMENT 1 70 #endif 71 72 /** @def RTR0MEM_EF_IN_FRONT 73 * Define this to put the fence up in front of the block. 74 * The default (when this isn't defined) is to up it up after the block. 75 */ 76 //# define RTR0MEM_EF_IN_FRONT 77 78 /** @def RTR0MEM_EF_FREE_DELAYED 79 * This define will enable free() delay and protection of the freed data 80 * while it's being delayed. The value of RTR0MEM_EF_FREE_DELAYED defines 81 * the threshold of the delayed blocks. 82 * Delayed blocks does not consume any physical memory, only virtual address space. 83 */ 84 #define RTR0MEM_EF_FREE_DELAYED (20 * _1M) 85 86 /** @def RTR0MEM_EF_FREE_FILL 87 * This define will enable memset(,RTR0MEM_EF_FREE_FILL,)'ing the user memory 88 * in the block before freeing/decommitting it. This is useful in GDB since GDB 89 * appears to be able to read the content of the page even after it's been 90 * decommitted. 91 */ 92 #define RTR0MEM_EF_FREE_FILL 'f' 93 94 /** @def RTR0MEM_EF_FILLER 95 * This define will enable memset(,RTR0MEM_EF_FILLER,)'ing the allocated 96 * memory when the API doesn't require it to be zero'd. 97 */ 98 #define RTR0MEM_EF_FILLER 0xef 99 100 /** @def RTR0MEM_EF_NOMAN_FILLER 101 * This define will enable memset(,RTR0MEM_EF_NOMAN_FILLER,)'ing the 102 * unprotected but not allocated area of memory, the so called no man's land. 103 */ 104 #define RTR0MEM_EF_NOMAN_FILLER 0xaa 105 106 /** @def RTR0MEM_EF_FENCE_FILLER 107 * This define will enable memset(,RTR0MEM_EF_FENCE_FILLER,)'ing the 108 * fence itself, as debuggers can usually read them. 109 */ 110 #define RTR0MEM_EF_FENCE_FILLER 0xcc 111 112 113 /******************************************************************************* 114 * Header Files * 115 *******************************************************************************/ 116 #ifdef RT_OS_WINDOWS 117 # include <Windows.h> 118 #else 119 # include <sys/mman.h> 120 #endif 121 #include <iprt/avl.h> 122 #include <iprt/thread.h> 123 124 125 /******************************************************************************* 126 * Structures and Typedefs * 127 *******************************************************************************/ 128 /** 129 * Allocation types. 130 */ 131 typedef enum RTMEMTYPE 132 { 133 RTMEMTYPE_RTMEMALLOC, 134 RTMEMTYPE_RTMEMALLOCZ, 135 RTMEMTYPE_RTMEMREALLOC, 136 RTMEMTYPE_RTMEMFREE, 137 138 RTMEMTYPE_NEW, 139 RTMEMTYPE_NEW_ARRAY, 140 RTMEMTYPE_DELETE, 141 RTMEMTYPE_DELETE_ARRAY 142 } RTMEMTYPE; 143 144 /** 145 * Node tracking a memory allocation. 146 */ 147 typedef struct RTR0MEMEFBLOCK 148 { 149 /** Avl node code, key is the user block pointer. */ 150 AVLPVNODECORE Core; 151 /** Allocation type. */ 152 RTMEMTYPE enmType; 153 /** The memory object. */ 154 RTR0MEMOBJ hMemObj; 155 /** The unaligned size of the block. */ 156 size_t cbUnaligned; 157 /** The aligned size of the block. */ 158 size_t cbAligned; 159 /** The allocation tag (read-only string). */ 160 const char *pszTag; 161 /** The return address of the allocator function. */ 162 void *pvCaller; 163 /** Line number of the alloc call. */ 164 unsigned iLine; 165 /** File from within the allocation was made. */ 166 const char *pszFile; 167 /** Function from within the allocation was made. */ 168 const char *pszFunction; 169 } RTR0MEMEFBLOCK, *PRTR0MEMEFBLOCK; 170 64 171 65 172 … … 67 174 * Global Variables * 68 175 *********************************************************************************************************************************/ 69 #ifdef RTALLOC_EFENCE_TRACE70 176 /** Spinlock protecting the all the block's globals. */ 71 static volatile uint32_t g_BlocksLock;177 static volatile uint32_t g_BlocksLock; 72 178 /** Tree tracking the allocations. */ 73 static AVLPVTREE g_BlocksTree; 74 # ifdef RTALLOC_EFENCE_FREE_DELAYED 179 static AVLPVTREE g_BlocksTree; 180 181 #ifdef RTR0MEM_EF_FREE_DELAYED 75 182 /** Tail of the delayed blocks. */ 76 static volatile PRT MEMBLOCK g_pBlocksDelayHead;183 static volatile PRTR0MEMEFBLOCK g_pBlocksDelayHead; 77 184 /** Tail of the delayed blocks. */ 78 static volatile PRT MEMBLOCK g_pBlocksDelayTail;185 static volatile PRTR0MEMEFBLOCK g_pBlocksDelayTail; 79 186 /** Number of bytes in the delay list (includes fences). */ 80 static volatile size_t g_cbBlocksDelay; 81 # endif /* RTALLOC_EFENCE_FREE_DELAYED */ 82 # ifdef RTALLOC_REPLACE_MALLOC 83 /** @name For calling the real allocation API we've replaced. 84 * @{ */ 85 void * (*g_pfnOrgMalloc)(size_t); 86 void * (*g_pfnOrgCalloc)(size_t, size_t); 87 void * (*g_pfnOrgRealloc)(void *, size_t); 88 void (*g_pfnOrgFree)(void *); 89 size_t (*g_pfnOrgMallocSize)(void *); 90 /** @} */ 91 # endif 92 #endif /* RTALLOC_EFENCE_TRACE */ 187 static volatile size_t g_cbBlocksDelay; 188 #endif /* RTR0MEM_EF_FREE_DELAYED */ 189 93 190 /** Array of pointers free watches for. */ 94 191 void *gapvRTMemFreeWatch[4] = {NULL, NULL, NULL, NULL}; … … 100 197 * Internal Functions * 101 198 *********************************************************************************************************************************/ 102 #ifdef RTALLOC_REPLACE_MALLOC 103 static void rtMemReplaceMallocAndFriends(void); 104 #endif 199 200 201 /** 202 * @callback_method_impl{FNRTSTROUTPUT} 203 */ 204 static DECLCALLBACK(size_t) rtR0MemEfWrite(void *pvArg, const char *pachChars, size_t cbChars) 205 { 206 if (cbChars) 207 { 208 RTLogWriteDebugger(pachChars, cbChars); 209 RTLogWriteStdOut(pachChars, cbChars); 210 RTLogWriteUser(pachChars, cbChars); 211 } 212 return cbChars; 213 } 105 214 106 215 … … 108 217 * Complains about something. 109 218 */ 110 static void rt memComplain(const char *pszOp, const char *pszFormat, ...)219 static void rtR0MemComplain(const char *pszOp, const char *pszFormat, ...) 111 220 { 112 221 va_list args; 113 fprintf(stderr, "RTMem error: %s: ", pszOp);222 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem error: %s: ", pszOp); 114 223 va_start(args, pszFormat); 115 vfprintf(stderr, pszFormat, args);224 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args); 116 225 va_end(args); 117 226 RTAssertDoPanic(); … … 121 230 * Log an event. 122 231 */ 123 DECLINLINE(void) rt memLog(const char *pszOp, const char *pszFormat, ...)232 DECLINLINE(void) rtR0MemLog(const char *pszOp, const char *pszFormat, ...) 124 233 { 125 234 #if 0 126 235 va_list args; 127 fprintf(stderr, "RTMem info: %s: ", pszOp);236 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "RTMem info: %s: ", pszOp); 128 237 va_start(args, pszFormat); 129 vfprintf(stderr, pszFormat, args);238 RTStrFormatV(rtR0MemEfWrite, NULL, NULL, NULL, pszFormat, args); 130 239 va_end(args); 131 240 #else … … 135 244 136 245 137 #ifdef RTALLOC_EFENCE_TRACE138 246 139 247 /** 140 248 * Acquires the lock. 141 249 */ 142 DECLINLINE(void) rtmemBlockLock(void) 143 { 250 DECLINLINE(RTCCUINTREG) rtR0MemBlockLock(void) 251 { 252 RTCCUINTREG uRet; 144 253 unsigned c = 0; 145 while (!ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0)) 146 RTThreadSleepNoLog(((++c) >> 2) & 31); 254 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 255 { 256 for (;;) 257 { 258 uRet = ASMIntDisableFlags(); 259 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0)) 260 break; 261 ASMSetFlags(uRet); 262 RTThreadSleepNoLog(((++c) >> 2) & 31); 263 } 264 } 265 else 266 { 267 for (;;) 268 { 269 uRet = ASMIntDisableFlags(); 270 if (ASMAtomicCmpXchgU32(&g_BlocksLock, 1, 0)) 271 break; 272 ASMSetFlags(uRet); 273 ASMNopPause(); 274 if (++c & 3) 275 ASMNopPause(); 276 } 277 } 278 return uRet; 147 279 } 148 280 … … 151 283 * Releases the lock. 152 284 */ 153 DECLINLINE(void) rt memBlockUnlock(void)285 DECLINLINE(void) rtR0MemBlockUnlock(RTCCUINTREG fSavedIntFlags) 154 286 { 155 287 Assert(g_BlocksLock == 1); 156 288 ASMAtomicXchgU32(&g_BlocksLock, 0); 289 ASMSetFlags(fSavedIntFlags); 157 290 } 158 291 … … 161 294 * Creates a block. 162 295 */ 163 DECLINLINE(PRTMEMBLOCK) rtmemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned, 164 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL) 165 { 166 # ifdef RTALLOC_REPLACE_MALLOC 167 if (!g_pfnOrgMalloc) 168 rtMemReplaceMallocAndFriends(); 169 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)g_pfnOrgMalloc(sizeof(*pBlock)); 170 # else 171 PRTMEMBLOCK pBlock = (PRTMEMBLOCK)malloc(sizeof(*pBlock)); 172 # endif 296 DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockCreate(RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned, 297 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL) 298 { 299 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTMemAlloc(sizeof(*pBlock)); 173 300 if (pBlock) 174 301 { … … 189 316 * Frees a block. 190 317 */ 191 DECLINLINE(void) rtmemBlockFree(PRTMEMBLOCK pBlock) 192 { 193 # ifdef RTALLOC_REPLACE_MALLOC 194 g_pfnOrgFree(pBlock); 195 # else 196 free(pBlock); 197 # endif 318 DECLINLINE(void) rtR0MemBlockFree(PRTR0MEMEFBLOCK pBlock) 319 { 320 RTMemFree(pBlock); 198 321 } 199 322 … … 202 325 * Insert a block from the tree. 203 326 */ 204 DECLINLINE(void) rt memBlockInsert(PRTMEMBLOCK pBlock, void *pv)327 DECLINLINE(void) rtR0MemBlockInsert(PRTR0MEMEFBLOCK pBlock, void *pv, RTR0MEMOBJ hMemObj) 205 328 { 206 329 pBlock->Core.Key = pv; 207 rtmemBlockLock(); 330 pBlock->hMemObj = hMemObj; 331 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock(); 208 332 bool fRc = RTAvlPVInsert(&g_BlocksTree, &pBlock->Core); 209 rt memBlockUnlock();333 rtR0MemBlockUnlock(fSavedIntFlags); 210 334 AssertRelease(fRc); 211 335 } … … 215 339 * Remove a block from the tree and returns it to the caller. 216 340 */ 217 DECLINLINE(PRT MEMBLOCK) rtmemBlockRemove(void *pv)218 { 219 rtmemBlockLock();220 PRT MEMBLOCK pBlock = (PRTMEMBLOCK)RTAvlPVRemove(&g_BlocksTree, pv);221 rt memBlockUnlock();341 DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockRemove(void *pv) 342 { 343 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock(); 344 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVRemove(&g_BlocksTree, pv); 345 rtR0MemBlockUnlock(fSavedIntFlags); 222 346 return pBlock; 223 347 } 224 348 349 225 350 /** 226 351 * Gets a block. 227 352 */ 228 DECLINLINE(PRT MEMBLOCK) rtmemBlockGet(void *pv)229 { 230 rtmemBlockLock();231 PRT MEMBLOCK pBlock = (PRTMEMBLOCK)RTAvlPVGet(&g_BlocksTree, pv);232 rt memBlockUnlock();353 DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockGet(void *pv) 354 { 355 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock(); 356 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)RTAvlPVGet(&g_BlocksTree, pv); 357 rtR0MemBlockUnlock(fSavedIntFlags); 233 358 return pBlock; 234 359 } 235 360 361 236 362 /** 237 363 * Dumps one allocation. … … 239 365 static DECLCALLBACK(int) RTMemDumpOne(PAVLPVNODECORE pNode, void *pvUser) 240 366 { 241 PRT MEMBLOCK pBlock = (PRTMEMBLOCK)pNode;242 fprintf(stderr, "%p %08lx(+%02lx) %p\n",243 pBlock->Core.Key,244 (unsigned long)pBlock->cbUnaligned,245 (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned),246 pBlock->pvCaller);367 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode; 368 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "%p %08lx(+%02lx) %p\n", 369 pBlock->Core.Key, 370 (unsigned long)pBlock->cbUnaligned, 371 (unsigned long)(pBlock->cbAligned - pBlock->cbUnaligned), 372 pBlock->pvCaller); 247 373 NOREF(pvUser); 248 374 return 0; 249 375 } 250 376 377 251 378 /** 252 379 * Dumps the allocated blocks. … … 256 383 void RTMemDump(void) 257 384 { 258 fprintf(stderr, "address size(alg) caller\n");385 RTStrFormat(rtR0MemEfWrite, NULL, NULL, NULL, "address size(alg) caller\n"); 259 386 RTAvlPVDoWithAll(&g_BlocksTree, true, RTMemDumpOne, NULL); 260 387 } 261 388 262 # ifdef RTALLOC_EFENCE_FREE_DELAYED389 #ifdef RTR0MEM_EF_FREE_DELAYED 263 390 264 391 /** 265 392 * Insert a delayed block. 266 393 */ 267 DECLINLINE(void) rt memBlockDelayInsert(PRTMEMBLOCK pBlock)268 { 269 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RT ALLOC_EFENCE_SIZE;394 DECLINLINE(void) rtR0MemBlockDelayInsert(PRTR0MEMEFBLOCK pBlock) 395 { 396 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE; 270 397 pBlock->Core.pRight = NULL; 271 398 pBlock->Core.pLeft = NULL; 272 rtmemBlockLock();399 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock(); 273 400 if (g_pBlocksDelayHead) 274 401 { … … 283 410 } 284 411 g_cbBlocksDelay += cbBlock; 285 rt memBlockUnlock();412 rtR0MemBlockUnlock(fSavedIntFlags); 286 413 } 287 414 … … 289 416 * Removes a delayed block. 290 417 */ 291 DECLINLINE(PRT MEMBLOCK) rtmemBlockDelayRemove(void)292 { 293 PRT MEMBLOCK pBlock = NULL;294 rtmemBlockLock();295 if (g_cbBlocksDelay > RT ALLOC_EFENCE_FREE_DELAYED)418 DECLINLINE(PRTR0MEMEFBLOCK) rtR0MemBlockDelayRemove(void) 419 { 420 PRTR0MEMEFBLOCK pBlock = NULL; 421 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock(); 422 if (g_cbBlocksDelay > RTR0MEM_EF_FREE_DELAYED) 296 423 { 297 424 pBlock = g_pBlocksDelayTail; 298 425 if (pBlock) 299 426 { 300 g_pBlocksDelayTail = (PRT MEMBLOCK)pBlock->Core.pLeft;427 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft; 301 428 if (pBlock->Core.pLeft) 302 429 pBlock->Core.pLeft->pRight = NULL; 303 430 else 304 431 g_pBlocksDelayHead = NULL; 305 g_cbBlocksDelay -= RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RT ALLOC_EFENCE_SIZE;432 g_cbBlocksDelay -= RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE; 306 433 } 307 434 } 308 rt memBlockUnlock();435 rtR0MemBlockUnlock(fSavedIntFlags); 309 436 return pBlock; 310 437 } 311 438 312 # endif /* RTALLOC_EFENCE_FREE_DELAYED */ 313 314 #endif /* RTALLOC_EFENCE_TRACE */ 315 316 317 #if defined(RTALLOC_REPLACE_MALLOC) && defined(RTALLOC_EFENCE_TRACE) 318 /* 439 #endif /* RTR0MEM_EF_FREE_DELAYED */ 440 441 442 static void rtR0MemFreeBlock(PRTR0MEMEFBLOCK pBlock, const char *pszOp) 443 { 444 void *pv = pBlock->Core.Key; 445 # ifdef RTR0MEM_EF_IN_FRONT 446 void *pvBlock = (char *)pv - RTR0MEM_EF_SIZE; 447 # else 448 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK); 449 # endif 450 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE; 451 452 int rc = RTR0MemObjProtect(pBlock->hMemObj, 0 /*offSub*/, cbBlock, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 453 if (RT_FAILURE(rc)) 454 rtR0MemComplain(pszOp, "RTR0MemObjProtect([%p], 0, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %Rrc\n", 455 pvBlock, cbBlock, rc); 456 457 rc = RTR0MemObjFree(pBlock->hMemObj, true /*fFreeMappings*/); 458 if (RT_FAILURE(rc)) 459 rtR0MemComplain(pszOp, "RTR0MemObjFree([%p LB %#x]) -> %Rrc\n", pvBlock, cbBlock, rc); 460 pBlock->hMemObj = NIL_RTR0MEMOBJ; 461 462 rtR0MemBlockFree(pBlock); 463 } 464 465 466 /** 467 * Initialize call, we shouldn't fail here. 468 */ 469 void rtR0MemEfInit(void) 470 { 471 472 } 473 474 /** 475 * @callback_method_impl{AVLPVCALLBACK} 476 */ 477 static DECLCALLBACK(int) rtR0MemEfDestroyBlock(PAVLPVNODECORE pNode, void *pvUser) 478 { 479 PRTR0MEMEFBLOCK pBlock = (PRTR0MEMEFBLOCK)pNode; 480 481 /* Note! pszFile and pszFunction may be invalid at this point. */ 482 rtR0MemComplain("rtR0MemEfDestroyBlock", "Leaking %zu bytes at %p (iLine=%u pvCaller=%p)\n", 483 pBlock->cbAligned, pBlock->Core.Key, pBlock->iLine, pBlock->pvCaller); 484 485 rtR0MemFreeBlock(pBlock, "rtR0MemEfDestroyBlock"); 486 487 NOREF(pvUser); 488 return VINF_SUCCESS; 489 } 490 491 492 /** 493 * Termination call. 319 494 * 320 * Replacing malloc, calloc, realloc, & free. 321 * 322 */ 323 324 /** Replacement for malloc. */ 325 static void *rtMemReplacementMalloc(size_t cb) 326 { 327 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cb); 328 void *pv = rtR3MemAlloc("r-malloc", RTMEMTYPE_RTMEMALLOC, cb, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS); 329 if (!pv) 330 pv = g_pfnOrgMalloc(cb); 331 return pv; 332 } 333 334 /** Replacement for calloc. */ 335 static void *rtMemReplacementCalloc(size_t cbItem, size_t cItems) 336 { 337 size_t cb = cbItem * cItems; 338 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cb); 339 void *pv = rtR3MemAlloc("r-calloc", RTMEMTYPE_RTMEMALLOCZ, cb, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS); 340 if (!pv) 341 pv = g_pfnOrgCalloc(cbItem, cItems); 342 return pv; 343 } 344 345 /** Replacement for realloc. */ 346 static void *rtMemReplacementRealloc(void *pvOld, size_t cbNew) 347 { 348 if (pvOld) 349 { 350 /* We're not strict about where the memory was allocated. */ 351 PRTMEMBLOCK pBlock = rtmemBlockGet(pvOld); 352 if (pBlock) 353 { 354 size_t cbAligned = RTMEM_REPLACMENT_ALIGN(cbNew); 355 return rtR3MemRealloc("r-realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbAligned, "heap", ASMReturnAddress(), RT_SRC_POS); 356 } 357 return g_pfnOrgRealloc(pvOld, cbNew); 358 } 359 return rtMemReplacementMalloc(cbNew); 360 } 361 362 /** Replacement for free(). */ 363 static void rtMemReplacementFree(void *pv) 364 { 365 if (pv) 366 { 367 /* We're not strict about where the memory was allocated. */ 368 PRTMEMBLOCK pBlock = rtmemBlockGet(pv); 369 if (pBlock) 370 rtR3MemFree("r-free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS); 495 * Will check and free memory. 496 */ 497 void rtR0MemEfTerm(void) 498 { 499 #ifdef RTR0MEM_EF_FREE_DELAYED 500 /* 501 * Release delayed frees. 502 */ 503 RTCCUINTREG fSavedIntFlags = rtR0MemBlockLock(); 504 for (;;) 505 { 506 PRTR0MEMEFBLOCK pBlock = g_pBlocksDelayTail; 507 g_pBlocksDelayTail = (PRTR0MEMEFBLOCK)pBlock->Core.pLeft; 508 if (pBlock->Core.pLeft) 509 pBlock->Core.pLeft->pRight = NULL; 371 510 else 372 g_pfnOrgFree(pv); 373 } 374 } 375 376 # ifdef RT_OS_DARWIN 377 /** Replacement for malloc. */ 378 static size_t rtMemReplacementMallocSize(void *pv) 379 { 380 size_t cb; 381 if (pv) 382 { 383 /* We're not strict about where the memory was allocated. */ 384 PRTMEMBLOCK pBlock = rtmemBlockGet(pv); 385 if (pBlock) 386 cb = pBlock->cbUnaligned; 387 else 388 cb = g_pfnOrgMallocSize(pv); 389 } 390 else 391 cb = 0; 392 return cb; 393 } 394 # endif 395 396 397 static void rtMemReplaceMallocAndFriends(void) 398 { 399 struct 400 { 401 const char *pszName; 402 PFNRT pfnReplacement; 403 PFNRT pfnOrg; 404 PFNRT *ppfnJumpBack; 405 } aApis[] = 406 { 407 { "free", (PFNRT)rtMemReplacementFree, (PFNRT)free, (PFNRT *)&g_pfnOrgFree }, 408 { "realloc", (PFNRT)rtMemReplacementRealloc, (PFNRT)realloc, (PFNRT *)&g_pfnOrgRealloc }, 409 { "calloc", (PFNRT)rtMemReplacementCalloc, (PFNRT)calloc, (PFNRT *)&g_pfnOrgCalloc }, 410 { "malloc", (PFNRT)rtMemReplacementMalloc, (PFNRT)malloc, (PFNRT *)&g_pfnOrgMalloc }, 411 #ifdef RT_OS_DARWIN 412 { "malloc_size", (PFNRT)rtMemReplacementMallocSize, (PFNRT)malloc_size, (PFNRT *)&g_pfnOrgMallocSize }, 413 #endif 414 }; 511 g_pBlocksDelayHead = NULL; 512 rtR0MemBlockUnlock(fSavedIntFlags); 513 514 rtR0MemFreeBlock(pBlock, "rtR0MemEfTerm"); 515 516 rtR0MemBlockLock(); 517 } 518 g_cbBlocksDelay = 0; 519 rtR0MemBlockUnlock(fSavedIntFlags); 520 #endif 415 521 416 522 /* 417 * Initialize the jump backs to avoid recursivly entering this function.523 * Complain about leaks. Then release them. 418 524 */ 419 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++) 420 *aApis[i].ppfnJumpBack = aApis[i].pfnOrg; 421 422 /* 423 * Give the user an option to skip replacing malloc. 424 */ 425 if (getenv("IPRT_DONT_REPLACE_MALLOC")) 426 return; 427 428 /* 429 * Allocate a page for jump back code (we leak it). 430 */ 431 uint8_t *pbExecPage = (uint8_t *)RTMemPageAlloc(PAGE_SIZE); AssertFatal(pbExecPage); 432 int rc = RTMemProtect(pbExecPage, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc); 433 434 /* 435 * Do the ground work. 436 */ 437 uint8_t *pb = pbExecPage; 438 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++) 439 { 440 /* Resolve it. */ 441 PFNRT pfnOrg = (PFNRT)(uintptr_t)dlsym(RTLD_DEFAULT, aApis[i].pszName); 442 if (pfnOrg) 443 aApis[i].pfnOrg = pfnOrg; 444 else 445 pfnOrg = aApis[i].pfnOrg; 446 447 /* Figure what we can replace and how much to duplicate in the jump back code. */ 448 # ifdef RT_ARCH_AMD64 449 uint32_t cbNeeded = 12; 450 DISCPUMODE const enmCpuMode = DISCPUMODE_64BIT; 451 # elif defined(RT_ARCH_X86) 452 uint32_t const cbNeeded = 5; 453 DISCPUMODE const enmCpuMode = DISCPUMODE_32BIT; 454 # else 455 # error "Port me" 456 # endif 457 uint32_t offJmpBack = 0; 458 uint32_t cbCopy = 0; 459 while (offJmpBack < cbNeeded) 460 { 461 DISCPUSTATE Dis; 462 uint32_t cbInstr = 1; 463 rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc); 464 AssertFatal(!(Dis.pCurInstr->fOpType & (DISOPTYPE_CONTROLFLOW))); 465 # ifdef RT_ARCH_AMD64 466 # ifdef RT_OS_DARWIN 467 /* Kludge for: cmp [malloc_def_zone_state], 1; jg 2; call _malloc_initialize; 2: */ 468 DISQPVPARAMVAL Parm; 469 if ( Dis.ModRM.Bits.Mod == 0 470 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */ 471 && (Dis.Param2.fUse & (DISUSE_IMMEDIATE16_SX8 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE64_SX8)) 472 && Dis.Param2.uValue == 1 473 && Dis.pCurInstr->uOpcode == OP_CMP) 474 { 475 cbCopy = offJmpBack; 476 477 offJmpBack += cbInstr; 478 rc = DISInstr((void *)((uintptr_t)pfnOrg + offJmpBack), enmCpuMode, &Dis, &cbInstr); AssertFatalRC(rc); 479 if ( Dis.pCurInstr->uOpcode == OP_JNBE 480 && Dis.Param1.uDisp.i8 == 5) 481 { 482 offJmpBack += cbInstr + 5; 483 AssertFatal(offJmpBack >= cbNeeded); 484 break; 485 } 486 } 487 # endif 488 AssertFatal(!(Dis.ModRM.Bits.Mod == 0 && Dis.ModRM.Bits.Rm == 5 /* wrt RIP */)); 489 # endif 490 offJmpBack += cbInstr; 491 } 492 if (!cbCopy) 493 cbCopy = offJmpBack; 494 495 /* Assemble the jump back. */ 496 memcpy(pb, (void *)(uintptr_t)pfnOrg, cbCopy); 497 uint32_t off = cbCopy; 498 # ifdef RT_ARCH_AMD64 499 pb[off++] = 0xff; /* jmp qword [$+8 wrt RIP] */ 500 pb[off++] = 0x25; 501 *(uint32_t *)&pb[off] = 0; 502 off += 4; 503 *(uint64_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack; 504 off += 8; 505 off = RT_ALIGN_32(off, 16); 506 # elif defined(RT_ARCH_X86) 507 pb[off++] = 0xe9; /* jmp rel32 */ 508 *(uint32_t *)&pb[off] = (uintptr_t)pfnOrg + offJmpBack - (uintptr_t)&pb[4]; 509 off += 4; 510 off = RT_ALIGN_32(off, 8); 511 # else 512 # error "Port me" 513 # endif 514 *aApis[i].ppfnJumpBack = (PFNRT)(uintptr_t)pb; 515 pb += off; 516 } 517 518 /* 519 * Modify the APIs. 520 */ 521 for (unsigned i = 0; i < RT_ELEMENTS(aApis); i++) 522 { 523 pb = (uint8_t *)(uintptr_t)aApis[i].pfnOrg; 524 rc = RTMemProtect(pb, 16, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); AssertFatalRC(rc); 525 526 # ifdef RT_ARCH_AMD64 527 /* Assemble the LdrLoadDll patch. */ 528 *pb++ = 0x48; /* mov rax, qword */ 529 *pb++ = 0xb8; 530 *(uint64_t *)pb = (uintptr_t)aApis[i].pfnReplacement; 531 pb += 8; 532 *pb++ = 0xff; /* jmp rax */ 533 *pb++ = 0xe0; 534 # elif defined(RT_ARCH_X86) 535 *pb++ = 0xe9; /* jmp rel32 */ 536 *(uint32_t *)pb = (uintptr_t)aApis[i].pfnReplacement - (uintptr_t)&pb[4]; 537 # else 538 # error "Port me" 539 # endif 540 } 541 } 542 543 #endif /* RTALLOC_REPLACE_MALLOC && RTALLOC_EFENCE_TRACE */ 525 RTAvlPVDestroy(&g_BlocksTree, rtR0MemEfDestroyBlock, NULL); 526 } 544 527 545 528 … … 547 530 * Internal allocator. 548 531 */ 549 RTDECL(void *) rtR3MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned,550 532 static void * rtR0MemAlloc(const char *pszOp, RTMEMTYPE enmType, size_t cbUnaligned, size_t cbAligned, 533 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL) 551 534 { 552 535 /* 553 536 * Sanity. 554 537 */ 555 if ( RT_ALIGN_Z(RT ALLOC_EFENCE_SIZE, PAGE_SIZE) != RTALLOC_EFENCE_SIZE556 && RT ALLOC_EFENCE_SIZE <= 0)557 { 558 rt memComplain(pszOp, "Invalid E-fence size! %#x\n", RTALLOC_EFENCE_SIZE);538 if ( RT_ALIGN_Z(RTR0MEM_EF_SIZE, PAGE_SIZE) != RTR0MEM_EF_SIZE 539 && RTR0MEM_EF_SIZE <= 0) 540 { 541 rtR0MemComplain(pszOp, "Invalid E-fence size! %#x\n", RTR0MEM_EF_SIZE); 559 542 return NULL; 560 543 } 561 544 if (!cbUnaligned) 562 545 { 563 #if 0564 rt memComplain(pszOp, "Request of ZERO bytes allocation!\n");546 #if 1 547 rtR0MemComplain(pszOp, "Request of ZERO bytes allocation!\n"); 565 548 return NULL; 566 549 #else … … 569 552 } 570 553 571 #ifndef RT ALLOC_EFENCE_IN_FRONT554 #ifndef RTR0MEM_EF_IN_FRONT 572 555 /* Alignment decreases fence accuracy, but this is at least partially 573 556 * counteracted by filling and checking the alignment padding. When the 574 557 * fence is in front then then no extra alignment is needed. */ 575 cbAligned = RT_ALIGN_Z(cbAligned, RTALLOC_EFENCE_ALIGNMENT); 576 #endif 577 578 #ifdef RTALLOC_EFENCE_TRACE 558 cbAligned = RT_ALIGN_Z(cbAligned, RTR0MEM_EF_ALIGNMENT); 559 #endif 560 579 561 /* 580 562 * Allocate the trace block. 581 563 */ 582 PRT MEMBLOCK pBlock = rtmemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS);564 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockCreate(enmType, cbUnaligned, cbAligned, pszTag, pvCaller, RT_SRC_POS_ARGS); 583 565 if (!pBlock) 584 566 { 585 rt memComplain(pszOp, "Failed to allocate trace block!\n");567 rtR0MemComplain(pszOp, "Failed to allocate trace block!\n"); 586 568 return NULL; 587 569 } 588 #endif589 570 590 571 /* 591 572 * Allocate a block with page alignment space + the size of the E-fence. 592 573 */ 593 size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE; 594 void *pvBlock = RTMemPageAlloc(cbBlock); 574 void *pvBlock = NULL; 575 RTR0MEMOBJ hMemObj; 576 size_t cbBlock = RT_ALIGN_Z(cbAligned, PAGE_SIZE) + RTR0MEM_EF_SIZE; 577 int rc = RTR0MemObjAllocPage(&hMemObj, cbBlock, false /*fExecutable*/); 578 if (RT_SUCCESS(rc)) 579 pvBlock = RTR0MemObjAddress(hMemObj); 595 580 if (pvBlock) 596 581 { … … 599 584 * and then change the page protection of the fence. 600 585 */ 601 #ifdef RT ALLOC_EFENCE_IN_FRONT586 #ifdef RTR0MEM_EF_IN_FRONT 602 587 void *pvEFence = pvBlock; 603 void *pv = (char *)pvEFence + RT ALLOC_EFENCE_SIZE;604 # ifdef RT ALLOC_EFENCE_NOMAN_FILLER605 memset((char *)pv + cbUnaligned, RT ALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbUnaligned);588 void *pv = (char *)pvEFence + RTR0MEM_EF_SIZE; 589 # ifdef RTR0MEM_EF_NOMAN_FILLER 590 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbUnaligned); 606 591 # endif 607 592 #else 608 void *pvEFence = (char *)pvBlock + (cbBlock - RT ALLOC_EFENCE_SIZE);593 void *pvEFence = (char *)pvBlock + (cbBlock - RTR0MEM_EF_SIZE); 609 594 void *pv = (char *)pvEFence - cbAligned; 610 # ifdef RT ALLOC_EFENCE_NOMAN_FILLER611 memset(pvBlock, RT ALLOC_EFENCE_NOMAN_FILLER, cbBlock - RTALLOC_EFENCE_SIZE - cbAligned);612 memset((char *)pv + cbUnaligned, RT ALLOC_EFENCE_NOMAN_FILLER, cbAligned - cbUnaligned);595 # ifdef RTR0MEM_EF_NOMAN_FILLER 596 memset(pvBlock, RTR0MEM_EF_NOMAN_FILLER, cbBlock - RTR0MEM_EF_SIZE - cbAligned); 597 memset((char *)pv + cbUnaligned, RTR0MEM_EF_NOMAN_FILLER, cbAligned - cbUnaligned); 613 598 # endif 614 599 #endif 615 600 616 #ifdef RT ALLOC_EFENCE_FENCE_FILLER617 memset(pvEFence, RT ALLOC_EFENCE_FENCE_FILLER, RTALLOC_EFENCE_SIZE);618 #endif 619 int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_NONE);601 #ifdef RTR0MEM_EF_FENCE_FILLER 602 memset(pvEFence, RTR0MEM_EF_FENCE_FILLER, RTR0MEM_EF_SIZE); 603 #endif 604 rc = RTR0MemObjProtect(hMemObj, (uint8_t *)pvEFence - (uint8_t *)pvBlock, RTR0MEM_EF_SIZE, RTMEM_PROT_NONE); 620 605 if (!rc) 621 606 { 622 #ifdef RTALLOC_EFENCE_TRACE 623 rtmemBlockInsert(pBlock, pv); 624 #endif 607 rtR0MemBlockInsert(pBlock, pv, hMemObj); 625 608 if (enmType == RTMEMTYPE_RTMEMALLOCZ) 626 609 memset(pv, 0, cbUnaligned); 627 #ifdef RT ALLOC_EFENCE_FILLER610 #ifdef RTR0MEM_EF_FILLER 628 611 else 629 memset(pv, RT ALLOC_EFENCE_FILLER, cbUnaligned);630 #endif 631 632 rt memLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned);612 memset(pv, RTR0MEM_EF_FILLER, cbUnaligned); 613 #endif 614 615 rtR0MemLog(pszOp, "returns %p (pvBlock=%p cbBlock=%#x pvEFence=%p cbUnaligned=%#x)\n", pv, pvBlock, cbBlock, pvEFence, cbUnaligned); 633 616 return pv; 634 617 } 635 rt memComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc);636 RT MemPageFree(pvBlock, cbBlock);618 rtR0MemComplain(pszOp, "RTMemProtect failed, pvEFence=%p size %d, rc=%d\n", pvEFence, RTR0MEM_EF_SIZE, rc); 619 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/); 637 620 } 638 621 else 639 rtmemComplain(pszOp, "Failed to allocated %lu (%lu) bytes.\n", (unsigned long)cbBlock, (unsigned long)cbUnaligned); 640 641 #ifdef RTALLOC_EFENCE_TRACE 642 rtmemBlockFree(pBlock); 643 #endif 622 { 623 rtR0MemComplain(pszOp, "Failed to allocated %zu (%zu) bytes (rc=%Rrc).\n", cbBlock, cbUnaligned, rc); 624 if (RT_SUCCESS(rc)) 625 RTR0MemObjFree(hMemObj, true /*fFreeMappings*/); 626 } 627 628 rtR0MemBlockFree(pBlock); 644 629 return NULL; 645 630 } … … 649 634 * Internal free. 650 635 */ 651 RTDECL(void) rtR3MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, void *pvCaller, RT_SRC_POS_DECL)636 static void rtR0MemFree(const char *pszOp, RTMEMTYPE enmType, void *pv, void *pvCaller, RT_SRC_POS_DECL) 652 637 { 653 638 NOREF(enmType); RT_SRC_POS_NOREF(); … … 666 651 RTAssertDoPanic(); 667 652 668 #ifdef RTALLOC_EFENCE_TRACE669 653 /* 670 654 * Find the block. 671 655 */ 672 PRT MEMBLOCK pBlock = rtmemBlockRemove(pv);656 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockRemove(pv); 673 657 if (pBlock) 674 658 { … … 676 660 RTLogPrintf("RTMem %s: pv=%p pvCaller=%p cbUnaligned=%#x\n", pszOp, pv, pvCaller, pBlock->cbUnaligned); 677 661 678 # ifdef RTALLOC_EFENCE_NOMAN_FILLER662 #ifdef RTR0MEM_EF_NOMAN_FILLER 679 663 /* 680 664 * Check whether the no man's land is untouched. 681 665 */ 682 # ifdef RTALLOC_EFENCE_IN_FRONT666 # ifdef RTR0MEM_EF_IN_FRONT 683 667 void *pvWrong = ASMMemIsAll8((char *)pv + pBlock->cbUnaligned, 684 668 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbUnaligned, 685 RT ALLOC_EFENCE_NOMAN_FILLER);686 # 669 RTR0MEM_EF_NOMAN_FILLER); 670 # else 687 671 /* Alignment must match allocation alignment in rtMemAlloc(). */ 688 void *pvWrong= ASMMemIsAll8((char *)pv + pBlock->cbUnaligned,689 690 RTALLOC_EFENCE_NOMAN_FILLER);672 void *pvWrong = ASMMemIsAll8((char *)pv + pBlock->cbUnaligned, 673 pBlock->cbAligned - pBlock->cbUnaligned, 674 RTR0MEM_EF_NOMAN_FILLER); 691 675 if (pvWrong) 692 676 RTAssertDoPanic(); 693 677 pvWrong = ASMMemIsAll8((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK), 694 678 RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) - pBlock->cbAligned, 695 RT ALLOC_EFENCE_NOMAN_FILLER);696 # 679 RTR0MEM_EF_NOMAN_FILLER); 680 # endif 697 681 if (pvWrong) 698 682 RTAssertDoPanic(); 699 # 700 701 # ifdef RTALLOC_EFENCE_FREE_FILL683 #endif 684 685 #ifdef RTR0MEM_EF_FREE_FILL 702 686 /* 703 687 * Fill the user part of the block. 704 688 */ 705 memset(pv, RT ALLOC_EFENCE_FREE_FILL, pBlock->cbUnaligned);706 # 707 708 # if defined(RTALLOC_EFENCE_FREE_DELAYED) && RTALLOC_EFENCE_FREE_DELAYED > 0689 memset(pv, RTR0MEM_EF_FREE_FILL, pBlock->cbUnaligned); 690 #endif 691 692 #if defined(RTR0MEM_EF_FREE_DELAYED) && RTR0MEM_EF_FREE_DELAYED > 0 709 693 /* 710 694 * We're doing delayed freeing. 711 695 * That means we'll expand the E-fence to cover the entire block. 712 696 */ 713 int rc = RTMemProtect(pv, pBlock->cbAligned, RTMEM_PROT_NONE); 697 # ifdef RTR0MEM_EF_IN_FRONT 698 int rc = RTR0MemObjProtect(pBlock->hMemObj, RTR0MEM_EF_SIZE /*offSub*/, pBlock->cbAligned, RTMEM_PROT_NONE); 699 # else 700 int rc = RTR0MemObjProtect(pBlock->hMemObj, 0 /*offSub*/, pBlock->cbAligned, RTMEM_PROT_NONE); 701 # endif 714 702 if (RT_SUCCESS(rc)) 715 703 { … … 717 705 * Insert it into the free list and process pending frees. 718 706 */ 719 rtmemBlockDelayInsert(pBlock); 720 while ((pBlock = rtmemBlockDelayRemove()) != NULL) 721 { 722 pv = pBlock->Core.Key; 723 # ifdef RTALLOC_EFENCE_IN_FRONT 724 void *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE; 725 # else 726 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK); 727 # endif 728 size_t cbBlock = RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE; 729 rc = RTMemProtect(pvBlock, cbBlock, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 730 if (RT_SUCCESS(rc)) 731 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE); 732 else 733 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvBlock, cbBlock, rc); 734 rtmemBlockFree(pBlock); 735 } 707 rtR0MemBlockDelayInsert(pBlock); 708 while ((pBlock = rtR0MemBlockDelayRemove()) != NULL) 709 rtR0MemFreeBlock(pBlock, pszOp); 736 710 } 737 711 else 738 rtmemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc); 739 740 # else /* !RTALLOC_EFENCE_FREE_DELAYED */ 741 742 /* 743 * Turn of the E-fence and free it. 744 */ 745 # ifdef RTALLOC_EFENCE_IN_FRONT 746 void *pvBlock = (char *)pv - RTALLOC_EFENCE_SIZE; 747 void *pvEFence = pvBlock; 748 # else 749 void *pvBlock = (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK); 750 void *pvEFence = (char *)pv + pBlock->cb; 751 # endif 752 int rc = RTMemProtect(pvEFence, RTALLOC_EFENCE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 753 if (RT_SUCCESS(rc)) 754 RTMemPageFree(pvBlock, RT_ALIGN_Z(pBlock->cbAligned, PAGE_SIZE) + RTALLOC_EFENCE_SIZE); 755 else 756 rtmemComplain(pszOp, "RTMemProtect(%p, %#x, RTMEM_PROT_READ | RTMEM_PROT_WRITE) -> %d\n", pvEFence, RTALLOC_EFENCE_SIZE, rc); 757 rtmemBlockFree(pBlock); 758 759 # endif /* !RTALLOC_EFENCE_FREE_DELAYED */ 712 rtR0MemComplain(pszOp, "Failed to expand the efence of pv=%p cb=%d, rc=%d.\n", pv, pBlock, rc); 713 714 #else /* !RTR0MEM_EF_FREE_DELAYED */ 715 rtR0MemFreeBlock(pBlock, pszOp); 716 #endif /* !RTR0MEM_EF_FREE_DELAYED */ 760 717 } 761 718 else 762 rtmemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv); 763 764 #else /* !RTALLOC_EFENCE_TRACE */ 765 766 /* 767 * We have no size tracking, so we're not doing any freeing because 768 * we cannot if the E-fence is after the block. 769 * Let's just expand the E-fence to the first page of the user bit 770 * since we know that it's around. 771 */ 772 int rc = RTMemProtect((void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK), PAGE_SIZE, RTMEM_PROT_NONE); 773 if (RT_FAILURE(rc)) 774 rtmemComplain(pszOp, "RTMemProtect(%p, PAGE_SIZE, RTMEM_PROT_NONE) -> %d\n", (void *)((uintptr_t)pv & ~(uintptr_t)PAGE_OFFSET_MASK), rc); 775 #endif /* !RTALLOC_EFENCE_TRACE */ 719 rtR0MemComplain(pszOp, "pv=%p not found! Incorrect free!\n", pv); 776 720 } 777 721 … … 780 724 * Internal realloc. 781 725 */ 782 RTDECL(void *) rtR3MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew,783 726 static void *rtR0MemRealloc(const char *pszOp, RTMEMTYPE enmType, void *pvOld, size_t cbNew, 727 const char *pszTag, void *pvCaller, RT_SRC_POS_DECL) 784 728 { 785 729 /* … … 787 731 */ 788 732 if (!pvOld) 789 return rtR 3MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);733 return rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS); 790 734 if (!cbNew) 791 735 { 792 rtR 3MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS);736 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS); 793 737 return NULL; 794 738 } 795 796 #ifdef RTALLOC_EFENCE_TRACE797 739 798 740 /* 799 741 * Get the block, allocate the new, copy the data, free the old one. 800 742 */ 801 PRT MEMBLOCK pBlock = rtmemBlockGet(pvOld);743 PRTR0MEMEFBLOCK pBlock = rtR0MemBlockGet(pvOld); 802 744 if (pBlock) 803 745 { 804 void *pvRet = rtR 3MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS);746 void *pvRet = rtR0MemAlloc(pszOp, enmType, cbNew, cbNew, pszTag, pvCaller, RT_SRC_POS_ARGS); 805 747 if (pvRet) 806 748 { 807 749 memcpy(pvRet, pvOld, RT_MIN(cbNew, pBlock->cbUnaligned)); 808 rtR 3MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS);750 rtR0MemFree(pszOp, RTMEMTYPE_RTMEMREALLOC, pvOld, pvCaller, RT_SRC_POS_ARGS); 809 751 } 810 752 return pvRet; 811 753 } 812 else 813 rtmemComplain(pszOp, "pvOld=%p was not found!\n", pvOld); 754 rtR0MemComplain(pszOp, "pvOld=%p was not found!\n", pvOld); 814 755 return NULL; 815 816 #else /* !RTALLOC_EFENCE_TRACE */817 818 rtmemComplain(pszOp, "Not supported if RTALLOC_EFENCE_TRACE isn't defined!\n");819 return NULL;820 821 #endif /* !RTALLOC_EFENCE_TRACE */822 756 } 823 757 … … 827 761 RTDECL(void *) RTMemEfTmpAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF 828 762 { 829 return rtR 3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);763 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS); 830 764 } 831 765 … … 833 767 RTDECL(void *) RTMemEfTmpAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF 834 768 { 835 return rtR 3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);769 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS); 836 770 } 837 771 … … 840 774 { 841 775 if (pv) 842 rtR 3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS);776 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS); 843 777 } 844 778 … … 846 780 RTDECL(void *) RTMemEfAlloc(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF 847 781 { 848 return rtR 3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);782 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS); 849 783 } 850 784 … … 852 786 RTDECL(void *) RTMemEfAllocZ(size_t cb, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF 853 787 { 854 return rtR 3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);788 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS); 855 789 } 856 790 … … 863 797 else 864 798 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *)); 865 return rtR 3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);799 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS); 866 800 } 867 801 … … 874 808 else 875 809 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *)); 876 return rtR 3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);810 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS); 877 811 } 878 812 … … 880 814 RTDECL(void *) RTMemEfRealloc(void *pvOld, size_t cbNew, const char *pszTag, RT_SRC_POS_DECL) RT_NO_THROW_DEF 881 815 { 882 return rtR 3MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS);816 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), RT_SRC_POS_ARGS); 883 817 } 884 818 … … 887 821 { 888 822 if (pv) 889 rtR 3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS);823 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), RT_SRC_POS_ARGS); 890 824 } 891 825 … … 924 858 RTDECL(void *) RTMemEfTmpAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF 925 859 { 926 return rtR 3MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);860 return rtR0MemAlloc("TmpAlloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL); 927 861 } 928 862 … … 930 864 RTDECL(void *) RTMemEfTmpAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF 931 865 { 932 return rtR 3MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);866 return rtR0MemAlloc("TmpAllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL); 933 867 } 934 868 … … 937 871 { 938 872 if (pv) 939 rtR 3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL);873 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL); 940 874 } 941 875 … … 943 877 RTDECL(void *) RTMemEfAllocNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF 944 878 { 945 return rtR 3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);879 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL); 946 880 } 947 881 … … 949 883 RTDECL(void *) RTMemEfAllocZNP(size_t cb, const char *pszTag) RT_NO_THROW_DEF 950 884 { 951 return rtR 3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL);885 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cb, cb, pszTag, ASMReturnAddress(), NULL, 0, NULL); 952 886 } 953 887 … … 960 894 else 961 895 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *)); 962 return rtR 3MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);896 return rtR0MemAlloc("Alloc", RTMEMTYPE_RTMEMALLOC, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL); 963 897 } 964 898 … … 971 905 else 972 906 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *)); 973 return rtR 3MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL);907 return rtR0MemAlloc("AllocZ", RTMEMTYPE_RTMEMALLOCZ, cbUnaligned, cbAligned, pszTag, ASMReturnAddress(), NULL, 0, NULL); 974 908 } 975 909 … … 977 911 RTDECL(void *) RTMemEfReallocNP(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF 978 912 { 979 return rtR 3MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL);913 return rtR0MemRealloc("Realloc", RTMEMTYPE_RTMEMREALLOC, pvOld, cbNew, pszTag, ASMReturnAddress(), NULL, 0, NULL); 980 914 } 981 915 … … 984 918 { 985 919 if (pv) 986 rtR 3MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL);920 rtR0MemFree("Free", RTMEMTYPE_RTMEMFREE, pv, ASMReturnAddress(), NULL, 0, NULL); 987 921 } 988 922 -
trunk/src/VBox/Runtime/r0drv/initterm-r0drv.cpp
r57358 r58269 43 43 44 44 #include "internal/initterm.h" 45 #include "internal/mem.h" 45 46 #include "internal/thread.h" 46 47 … … 88 89 if (RT_SUCCESS(rc)) 89 90 { 91 #ifdef RTR0MEM_WITH_EF_APIS 92 rtR0MemEfInit(); 93 #endif 90 94 rc = rtThreadInit(); 91 95 if (RT_SUCCESS(rc)) … … 106 110 rtThreadTerm(); 107 111 } 112 #ifdef RTR0MEM_WITH_EF_APIS 113 rtR0MemEfTerm(); 114 #endif 108 115 rtR0TermNative(); 109 116 } … … 119 126 rtR0PowerNotificationTerm(); 120 127 rtR0MpNotificationTerm(); 128 #endif 129 #ifdef RTR0MEM_WITH_EF_APIS 130 rtR0MemEfTerm(); 121 131 #endif 122 132 rtR0TermNative();
Note:
See TracChangeset
for help on using the changeset viewer.