VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/memsafer-r3.cpp@ 73720

Last change on this file since 73720 was 73703, checked in by vboxsync, 6 years ago

IPRT/r3/memsafer: Switched to individual scrambling words for each allocation.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.2 KB
Line 
1/* $Id: memsafer-r3.cpp 73703 2018-08-16 09:27:22Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocate for Sensitive Data, generic heap-based implementation.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "internal/iprt.h"
32#include <iprt/memsafer.h>
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#include <iprt/avl.h>
37#include <iprt/critsect.h>
38#include <iprt/mem.h>
39#include <iprt/once.h>
40#include <iprt/rand.h>
41#include <iprt/param.h>
42#include <iprt/string.h>
43#ifdef IN_SUP_R3
44# include <VBox/sup.h>
45#endif
46
47
48/*********************************************************************************************************************************
49* Defined Constants And Macros *
50*********************************************************************************************************************************/
51/** Allocation size alignment (power of two). */
52#define RTMEMSAFER_ALIGN 16
53
54
55/*********************************************************************************************************************************
56* Structures and Typedefs *
57*********************************************************************************************************************************/
58/**
59 * Allocators.
60 */
61typedef enum RTMEMSAFERALLOCATOR
62{
63 /** Invalid method. */
64 RTMEMSAFERALLOCATOR_INVALID = 0,
65 /** RTMemPageAlloc. */
66 RTMEMSAFERALLOCATOR_RTMEMPAGE,
67 /** SUPR3PageAllocEx. */
68 RTMEMSAFERALLOCATOR_SUPR3
69} RTMEMSAFERALLOCATOR;
70
71/**
72 * Tracking node (lives on normal heap).
73 */
74typedef struct RTMEMSAFERNODE
75{
76 /** Node core.
77 * The core key is a scrambled pointer the user memory. */
78 AVLPVNODECORE Core;
79 /** The allocation flags. */
80 uint32_t fFlags;
81 /** The offset into the allocation of the user memory. */
82 uint32_t offUser;
83 /** The requested allocation size. */
84 size_t cbUser;
85 /** The allocation size in pages, this includes the two guard pages. */
86 uint32_t cPages;
87 /** The allocator used for this node. */
88 RTMEMSAFERALLOCATOR enmAllocator;
89 /** XOR scrambler value for memory. */
90 uintptr_t uScramblerXor;
91} RTMEMSAFERNODE;
92/** Pointer to an allocation tracking node. */
93typedef RTMEMSAFERNODE *PRTMEMSAFERNODE;
94
95
96/*********************************************************************************************************************************
97* Global Variables *
98*********************************************************************************************************************************/
99/** Init once structure for this module. */
100static RTONCE g_MemSaferOnce = RTONCE_INITIALIZER;
101/** Critical section protecting the allocation tree. */
102static RTCRITSECTRW g_MemSaferCritSect;
103/** Tree of allocation nodes. */
104static AVLPVTREE g_pMemSaferTree;
105/** XOR scrambler value pointers. */
106static uintptr_t g_uMemSaferPtrScramblerXor;
107/** Pointer rotate shift count.*/
108static uintptr_t g_cMemSaferPtrScramblerRotate;
109
110
111/**
112 * @callback_method_impl{FNRTONCE, Inits globals.}
113 */
114static DECLCALLBACK(int32_t) rtMemSaferOnceInit(void *pvUserIgnore)
115{
116 RT_NOREF_PV(pvUserIgnore);
117
118 g_uMemSaferPtrScramblerXor = (uintptr_t)RTRandU64();
119 g_cMemSaferPtrScramblerRotate = RTRandU32Ex(0, ARCH_BITS - 1);
120 return RTCritSectRwInit(&g_MemSaferCritSect);
121}
122
123
124/**
125 * @callback_method_impl{PFNRTONCECLEANUP, Cleans up globals.}
126 */
127static DECLCALLBACK(void) rtMemSaferOnceTerm(void *pvUser, bool fLazyCleanUpOk)
128{
129 RT_NOREF_PV(pvUser);
130
131 if (!fLazyCleanUpOk)
132 {
133 RTCritSectRwDelete(&g_MemSaferCritSect);
134 Assert(!g_pMemSaferTree);
135 }
136}
137
138
139
140DECLINLINE(void *) rtMemSaferScramblePointer(void *pvUser)
141{
142 uintptr_t uPtr = (uintptr_t)pvUser;
143 uPtr ^= g_uMemSaferPtrScramblerXor;
144#if ARCH_BITS == 64
145 uPtr = ASMRotateRightU64(uPtr, g_cMemSaferPtrScramblerRotate);
146#elif ARCH_BITS == 32
147 uPtr = ASMRotateRightU32(uPtr, g_cMemSaferPtrScramblerRotate);
148#else
149# error "Unsupported/missing ARCH_BITS."
150#endif
151 return (void *)uPtr;
152}
153
154
155/**
156 * Inserts a tracking node into the tree.
157 *
158 * @param pThis The allocation tracking node to insert.
159 */
160static void rtMemSaferNodeInsert(PRTMEMSAFERNODE pThis)
161{
162 RTCritSectRwEnterExcl(&g_MemSaferCritSect);
163 pThis->Core.Key = rtMemSaferScramblePointer(pThis->Core.Key);
164 bool fRc = RTAvlPVInsert(&g_pMemSaferTree, &pThis->Core);
165 RTCritSectRwLeaveExcl(&g_MemSaferCritSect);
166 Assert(fRc); NOREF(fRc);
167}
168
169
170/**
171 * Finds a tracking node into the tree.
172 *
173 * @returns The allocation tracking node for @a pvUser. NULL if not found.
174 * @param pvUser The user pointer to the allocation.
175 */
176static PRTMEMSAFERNODE rtMemSaferNodeLookup(void *pvUser)
177{
178 void *pvKey = rtMemSaferScramblePointer(pvUser);
179 RTCritSectRwEnterShared(&g_MemSaferCritSect);
180 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVGet(&g_pMemSaferTree, pvKey);
181 RTCritSectRwLeaveShared(&g_MemSaferCritSect);
182 return pThis;
183}
184
185
186/**
187 * Removes a tracking node from the tree.
188 *
189 * @returns The allocation tracking node for @a pvUser. NULL if not found.
190 * @param pvUser The user pointer to the allocation.
191 */
192static PRTMEMSAFERNODE rtMemSaferNodeRemove(void *pvUser)
193{
194 void *pvKey = rtMemSaferScramblePointer(pvUser);
195 RTCritSectRwEnterExcl(&g_MemSaferCritSect);
196 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVRemove(&g_pMemSaferTree, pvKey);
197 RTCritSectRwLeaveExcl(&g_MemSaferCritSect);
198 return pThis;
199}
200
201
202RTDECL(int) RTMemSaferScramble(void *pv, size_t cb)
203{
204 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pv);
205 AssertReturn(pThis, VERR_INVALID_POINTER);
206 AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER);
207
208 /* First time we get a new xor value. */
209 if (!pThis->uScramblerXor)
210 pThis->uScramblerXor = (uintptr_t)RTRandU64();
211
212 /* Note! This isn't supposed to be safe, just less obvious. */
213 uintptr_t *pu = (uintptr_t *)pv;
214 cb = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);
215 while (cb > 0)
216 {
217 *pu ^= pThis->uScramblerXor;
218 pu++;
219 cb -= sizeof(*pu);
220 }
221
222 return VINF_SUCCESS;
223}
224RT_EXPORT_SYMBOL(RTMemSaferScramble);
225
226
227RTDECL(int) RTMemSaferUnscramble(void *pv, size_t cb)
228{
229 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pv);
230 AssertReturn(pThis, VERR_INVALID_POINTER);
231 AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER);
232
233 /* Note! This isn't supposed to be safe, just less obvious. */
234 uintptr_t *pu = (uintptr_t *)pv;
235 cb = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);
236 while (cb > 0)
237 {
238 *pu ^= pThis->uScramblerXor;
239 pu++;
240 cb -= sizeof(*pu);
241 }
242
243 return VINF_SUCCESS;
244}
245RT_EXPORT_SYMBOL(RTMemSaferUnscramble);
246
247
248/**
249 * Initializes the pages.
250 *
251 * Fills the memory with random bytes in order to make it less obvious where the
252 * secret data starts and ends. We also zero the user memory in case the
253 * allocator does not do this.
254 *
255 * @param pThis The allocation tracer node. The Core.Key member
256 * will be set.
257 * @param pvPages The pages to initialize.
258 */
259static void rtMemSaferInitializePages(PRTMEMSAFERNODE pThis, void *pvPages)
260{
261 RTRandBytes(pvPages, PAGE_SIZE + pThis->offUser);
262
263 uint8_t *pbUser = (uint8_t *)pvPages + PAGE_SIZE + pThis->offUser;
264 pThis->Core.Key = pbUser;
265 RT_BZERO(pbUser, pThis->cbUser); /* paranoia */
266
267 RTRandBytes(pbUser + pThis->cbUser, (size_t)pThis->cPages * PAGE_SIZE - PAGE_SIZE - pThis->offUser - pThis->cbUser);
268}
269
270
271/**
272 * Allocates and initializes pages from the support driver and initializes it.
273 *
274 * @returns VBox status code.
275 * @param pThis The allocator node. Core.Key will be set on successful
276 * return (unscrambled).
277 */
278static int rtMemSaferSupR3AllocPages(PRTMEMSAFERNODE pThis)
279{
280#ifdef IN_SUP_R3
281 /*
282 * Try allocate the memory.
283 */
284 void *pvPages;
285 int rc = SUPR3PageAllocEx(pThis->cPages, 0 /* fFlags */, &pvPages, NULL /* pR0Ptr */, NULL /* paPages */);
286 if (RT_SUCCESS(rc))
287 {
288 rtMemSaferInitializePages(pThis, pvPages);
289
290 /*
291 * On darwin we cannot allocate pages without an R0 mapping and
292 * SUPR3PageAllocEx falls back to another method which is incompatible with
293 * the way SUPR3PageProtect works. Ignore changing the protection of the guard
294 * pages.
295 */
296#ifdef RT_OS_DARWIN
297 return VINF_SUCCESS;
298#else
299 /*
300 * Configure the guard pages.
301 * SUPR3PageProtect isn't supported on all hosts, we ignore that.
302 */
303 rc = SUPR3PageProtect(pvPages, NIL_RTR0PTR, 0, PAGE_SIZE, RTMEM_PROT_NONE);
304 if (RT_SUCCESS(rc))
305 {
306 rc = SUPR3PageProtect(pvPages, NIL_RTR0PTR, (pThis->cPages - 1) * PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
307 if (RT_SUCCESS(rc))
308 return VINF_SUCCESS;
309 SUPR3PageProtect(pvPages, NIL_RTR0PTR, 0, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
310 }
311 else if (rc == VERR_NOT_SUPPORTED)
312 return VINF_SUCCESS;
313
314 /* failed. */
315 int rc2 = SUPR3PageFreeEx(pvPages, pThis->cPages); AssertRC(rc2);
316#endif
317 }
318 return rc;
319
320#else /* !IN_SUP_R3 */
321 RT_NOREF_PV(pThis);
322 return VERR_NOT_SUPPORTED;
323#endif /* !IN_SUP_R3 */
324}
325
326
327/**
328 * Allocates and initializes pages using the IPRT page allocator API.
329 *
330 * @returns VBox status code.
331 * @param pThis The allocator node. Core.Key will be set on successful
332 * return (unscrambled).
333 */
334static int rtMemSaferMemAllocPages(PRTMEMSAFERNODE pThis)
335{
336 /*
337 * Try allocate the memory.
338 */
339 int rc = VINF_SUCCESS;
340 void *pvPages = RTMemPageAlloc((size_t)pThis->cPages * PAGE_SIZE);
341 if (pvPages)
342 {
343 rtMemSaferInitializePages(pThis, pvPages);
344
345 /*
346 * Configure the guard pages.
347 */
348 rc = RTMemProtect(pvPages, PAGE_SIZE, RTMEM_PROT_NONE);
349 if (RT_SUCCESS(rc))
350 {
351 rc = RTMemProtect((uint8_t *)pvPages + (size_t)(pThis->cPages - 1U) * PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
352 if (RT_SUCCESS(rc))
353 return VINF_SUCCESS;
354 rc = RTMemProtect(pvPages, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
355 }
356
357 /* failed. */
358 RTMemPageFree(pvPages, (size_t)pThis->cPages * PAGE_SIZE);
359 }
360 else
361 rc = VERR_NO_PAGE_MEMORY;
362
363 return rc;
364}
365
366
367RTDECL(int) RTMemSaferAllocZExTag(void **ppvNew, size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
368{
369 RT_NOREF_PV(pszTag);
370
371 /*
372 * Validate input.
373 */
374 AssertPtrReturn(ppvNew, VERR_INVALID_PARAMETER);
375 *ppvNew = NULL;
376 AssertReturn(cb, VERR_INVALID_PARAMETER);
377 AssertReturn(cb <= 32U*_1M - PAGE_SIZE * 3U, VERR_ALLOCATION_TOO_BIG); /* Max 32 MB minus padding and guard pages. */
378 AssertReturn(!(fFlags & ~RTMEMSAFER_F_VALID_MASK), VERR_INVALID_FLAGS);
379
380 /*
381 * Initialize globals.
382 */
383 int rc = RTOnceEx(&g_MemSaferOnce, rtMemSaferOnceInit, rtMemSaferOnceTerm, NULL);
384 if (RT_SUCCESS(rc))
385 {
386 /*
387 * Allocate a tracker node first.
388 */
389 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTMemAllocZ(sizeof(RTMEMSAFERNODE));
390 if (pThis)
391 {
392 /*
393 * Prepare the allocation.
394 */
395 pThis->cbUser = cb;
396 pThis->offUser = (RTRandU32Ex(0, 128) * RTMEMSAFER_ALIGN) & PAGE_OFFSET_MASK;
397
398 size_t cbNeeded = pThis->offUser + pThis->cbUser;
399 cbNeeded = RT_ALIGN_Z(cbNeeded, PAGE_SIZE);
400
401 pThis->cPages = (uint32_t)(cbNeeded / PAGE_SIZE) + 2; /* +2 for guard pages */
402
403 /*
404 * Try allocate the memory, using the best allocator by default and
405 * falling back on the less safe one.
406 */
407 rc = rtMemSaferSupR3AllocPages(pThis);
408 if (RT_SUCCESS(rc))
409 pThis->enmAllocator = RTMEMSAFERALLOCATOR_SUPR3;
410 else if (!(fFlags & RTMEMSAFER_F_REQUIRE_NOT_PAGABLE))
411 {
412 rc = rtMemSaferMemAllocPages(pThis);
413 if (RT_SUCCESS(rc))
414 pThis->enmAllocator = RTMEMSAFERALLOCATOR_RTMEMPAGE;
415 }
416 if (RT_SUCCESS(rc))
417 {
418 /*
419 * Insert the node.
420 */
421 *ppvNew = pThis->Core.Key;
422 rtMemSaferNodeInsert(pThis); /* (Scrambles Core.Key) */
423 return VINF_SUCCESS;
424 }
425
426 RTMemFree(pThis);
427 }
428 else
429 rc = VERR_NO_MEMORY;
430 }
431 return rc;
432}
433RT_EXPORT_SYMBOL(RTMemSaferAllocZExTag);
434
435
436RTDECL(void) RTMemSaferFree(void *pv, size_t cb) RT_NO_THROW_DEF
437{
438 if (pv)
439 {
440 PRTMEMSAFERNODE pThis = rtMemSaferNodeRemove(pv);
441 AssertReturnVoid(pThis);
442 AssertMsg(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser));
443
444 /*
445 * Wipe the user memory first.
446 */
447 RTMemWipeThoroughly(pv, RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN), 3);
448
449 /*
450 * Free the pages.
451 */
452 uint8_t *pbPages = (uint8_t *)pv - pThis->offUser - PAGE_SIZE;
453 size_t cbPages = (size_t)pThis->cPages * PAGE_SIZE;
454 switch (pThis->enmAllocator)
455 {
456#ifdef IN_SUP_R3
457 case RTMEMSAFERALLOCATOR_SUPR3:
458 SUPR3PageProtect(pbPages, NIL_RTR0PTR, 0, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
459 SUPR3PageProtect(pbPages, NIL_RTR0PTR, (uint32_t)(cbPages - PAGE_SIZE), PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
460 SUPR3PageFreeEx(pbPages, pThis->cPages);
461 break;
462#endif
463 case RTMEMSAFERALLOCATOR_RTMEMPAGE:
464 RTMemProtect(pbPages, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
465 RTMemProtect(pbPages + cbPages - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
466 RTMemPageFree(pbPages, cbPages);
467 break;
468
469 default:
470 AssertFailed();
471 }
472
473 /*
474 * Free the tracking node.
475 */
476 pThis->Core.Key = NULL;
477 pThis->offUser = 0;
478 pThis->cbUser = 0;
479 RTMemFree(pThis);
480 }
481 else
482 Assert(cb == 0);
483}
484RT_EXPORT_SYMBOL(RTMemSaferFree);
485
486
487/**
488 * The simplest reallocation method: allocate new block, copy over the data,
489 * free old block.
490 */
491static int rtMemSaferReallocSimpler(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag)
492{
493 void *pvNew;
494 int rc = RTMemSaferAllocZExTag(&pvNew, cbNew, fFlags, pszTag);
495 if (RT_SUCCESS(rc))
496 {
497 memcpy(pvNew, pvOld, RT_MIN(cbNew, cbOld));
498 RTMemSaferFree(pvOld, cbOld);
499 *ppvNew = pvNew;
500 }
501 return rc;
502}
503
504
505RTDECL(int) RTMemSaferReallocZExTag(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
506{
507 int rc;
508 /* Real realloc. */
509 if (cbNew && cbOld)
510 {
511 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pvOld);
512 AssertReturn(pThis, VERR_INVALID_POINTER);
513 AssertMsgStmt(cbOld == pThis->cbUser, ("cbOld=%#zx != %#zx\n", cbOld, pThis->cbUser), cbOld = pThis->cbUser);
514
515 if (pThis->fFlags == fFlags)
516 {
517 if (cbNew > cbOld)
518 {
519 /*
520 * Is the enough room for us to grow?
521 */
522 size_t cbMax = (size_t)(pThis->cPages - 2) * PAGE_SIZE;
523 if (cbNew <= cbMax)
524 {
525 size_t const cbAdded = (cbNew - cbOld);
526 size_t const cbAfter = cbMax - pThis->offUser - cbOld;
527 if (cbAfter >= cbAdded)
528 {
529 /*
530 * Sufficient space after the current allocation.
531 */
532 uint8_t *pbNewSpace = (uint8_t *)pvOld + cbOld;
533 RT_BZERO(pbNewSpace, cbAdded);
534 *ppvNew = pvOld;
535 }
536 else
537 {
538 /*
539 * Have to move the allocation to make enough room at the
540 * end. In order to make it a little less predictable and
541 * maybe avoid a relocation or two in the next call, divide
542 * the page offset by four until it it fits.
543 */
544 AssertReturn(rtMemSaferNodeRemove(pvOld) == pThis, VERR_INTERNAL_ERROR_3);
545 uint32_t offNewUser = pThis->offUser;
546 do
547 offNewUser = offNewUser / 2;
548 while ((pThis->offUser - offNewUser) + cbAfter < cbAdded);
549 offNewUser &= ~(RTMEMSAFER_ALIGN - 1U);
550
551 uint32_t const cbMove = pThis->offUser - offNewUser;
552 uint8_t *pbNew = (uint8_t *)pvOld - cbMove;
553 memmove(pbNew, pvOld, cbOld);
554
555 RT_BZERO(pbNew + cbOld, cbAdded);
556 if (cbMove > cbAdded)
557 RTMemWipeThoroughly(pbNew + cbNew, cbMove - cbAdded, 3);
558
559 pThis->offUser = offNewUser;
560 pThis->Core.Key = pbNew;
561 *ppvNew = pbNew;
562
563 rtMemSaferNodeInsert(pThis);
564 }
565 Assert(((uintptr_t)*ppvNew & PAGE_OFFSET_MASK) == pThis->offUser);
566 pThis->cbUser = cbNew;
567 rc = VINF_SUCCESS;
568 }
569 else
570 {
571 /*
572 * Not enough space, allocate a new block and copy over the data.
573 */
574 rc = rtMemSaferReallocSimpler(cbOld, pvOld, cbNew, ppvNew, fFlags, pszTag);
575 }
576 }
577 else
578 {
579 /*
580 * Shrinking the allocation, just wipe the memory that is no longer
581 * being used.
582 */
583 if (cbNew != cbOld)
584 {
585 uint8_t *pbAbandond = (uint8_t *)pvOld + cbNew;
586 RTMemWipeThoroughly(pbAbandond, cbOld - cbNew, 3);
587 }
588 pThis->cbUser = cbNew;
589 *ppvNew = pvOld;
590 rc = VINF_SUCCESS;
591 }
592 }
593 else if (!pThis->fFlags)
594 {
595 /*
596 * New flags added. Allocate a new block and copy over the old one.
597 */
598 rc = rtMemSaferReallocSimpler(cbOld, pvOld, cbNew, ppvNew, fFlags, pszTag);
599 }
600 else
601 {
602 /* Compatible flags. */
603 AssertMsgFailed(("fFlags=%#x old=%#x\n", fFlags, pThis->fFlags));
604 rc = VERR_INVALID_FLAGS;
605 }
606 }
607 /*
608 * First allocation. Pass it on.
609 */
610 else if (!cbOld)
611 {
612 Assert(pvOld == NULL);
613 rc = RTMemSaferAllocZExTag(ppvNew, cbNew, fFlags, pszTag);
614 }
615 /*
616 * Free operation. Pass it on.
617 */
618 else
619 {
620 RTMemSaferFree(pvOld, cbOld);
621 *ppvNew = NULL;
622 rc = VINF_SUCCESS;
623 }
624 return rc;
625}
626RT_EXPORT_SYMBOL(RTMemSaferReallocZExTag);
627
628
629RTDECL(void *) RTMemSaferAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
630{
631 void *pvNew = NULL;
632 int rc = RTMemSaferAllocZExTag(&pvNew, cb, 0 /*fFlags*/, pszTag);
633 if (RT_SUCCESS(rc))
634 return pvNew;
635 return NULL;
636}
637RT_EXPORT_SYMBOL(RTMemSaferAllocZTag);
638
639
640RTDECL(void *) RTMemSaferReallocZTag(size_t cbOld, void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
641{
642 void *pvNew = NULL;
643 int rc = RTMemSaferReallocZExTag(cbOld, pvOld, cbNew, &pvNew, 0 /*fFlags*/, pszTag);
644 if (RT_SUCCESS(rc))
645 return pvNew;
646 return NULL;
647}
648RT_EXPORT_SYMBOL(RTMemSaferReallocZTag);
649
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette