VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/memsafer-r3.cpp@ 98456

Last change on this file since 98456 was 98103, checked in by vboxsync, 2 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.2 KB
Line 
1/* $Id: memsafer-r3.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocate for Sensitive Data, generic heap-based implementation.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#include "internal/iprt.h"
42#include <iprt/memsafer.h>
43
44#include <iprt/asm.h>
45#include <iprt/assert.h>
46#include <iprt/avl.h>
47#include <iprt/critsect.h>
48#include <iprt/err.h>
49#include <iprt/mem.h>
50#include <iprt/once.h>
51#include <iprt/rand.h>
52#include <iprt/param.h>
53#include <iprt/string.h>
54#ifdef IN_SUP_R3
55# include <VBox/sup.h>
56#endif
57
58
59/*********************************************************************************************************************************
60* Defined Constants And Macros *
61*********************************************************************************************************************************/
62/** Allocation size alignment (power of two). */
63#define RTMEMSAFER_ALIGN 16
64
65
66/*********************************************************************************************************************************
67* Structures and Typedefs *
68*********************************************************************************************************************************/
69/**
70 * Allocators.
71 */
72typedef enum RTMEMSAFERALLOCATOR
73{
74 /** Invalid method. */
75 RTMEMSAFERALLOCATOR_INVALID = 0,
76 /** RTMemPageAlloc. */
77 RTMEMSAFERALLOCATOR_RTMEMPAGE,
78 /** SUPR3PageAllocEx. */
79 RTMEMSAFERALLOCATOR_SUPR3
80} RTMEMSAFERALLOCATOR;
81
82/**
83 * Tracking node (lives on normal heap).
84 */
85typedef struct RTMEMSAFERNODE
86{
87 /** Node core.
88 * The core key is a scrambled pointer the user memory. */
89 AVLPVNODECORE Core;
90 /** The allocation flags. */
91 uint32_t fFlags;
92 /** The offset into the allocation of the user memory. */
93 uint32_t offUser;
94 /** The requested allocation size. */
95 size_t cbUser;
96 /** The allocation size in pages, this includes the two guard pages. */
97 uint32_t cPages;
98 /** The allocator used for this node. */
99 RTMEMSAFERALLOCATOR enmAllocator;
100 /** XOR scrambler value for memory. */
101 uintptr_t uScramblerXor;
102} RTMEMSAFERNODE;
103/** Pointer to an allocation tracking node. */
104typedef RTMEMSAFERNODE *PRTMEMSAFERNODE;
105
106
107/*********************************************************************************************************************************
108* Global Variables *
109*********************************************************************************************************************************/
110/** Init once structure for this module. */
111static RTONCE g_MemSaferOnce = RTONCE_INITIALIZER;
112/** Critical section protecting the allocation tree. */
113static RTCRITSECTRW g_MemSaferCritSect;
114/** Tree of allocation nodes. */
115static AVLPVTREE g_pMemSaferTree;
116/** XOR scrambler value pointers. */
117static uintptr_t g_uMemSaferPtrScramblerXor;
118/** Pointer rotate shift count.*/
119static uintptr_t g_cMemSaferPtrScramblerRotate;
120
121
122/**
123 * @callback_method_impl{FNRTONCE, Inits globals.}
124 */
125static DECLCALLBACK(int32_t) rtMemSaferOnceInit(void *pvUserIgnore)
126{
127 RT_NOREF_PV(pvUserIgnore);
128
129 g_uMemSaferPtrScramblerXor = (uintptr_t)RTRandU64();
130 g_cMemSaferPtrScramblerRotate = RTRandU32Ex(0, ARCH_BITS - 1);
131 return RTCritSectRwInit(&g_MemSaferCritSect);
132}
133
134
135/**
136 * @callback_method_impl{PFNRTONCECLEANUP, Cleans up globals.}
137 */
138static DECLCALLBACK(void) rtMemSaferOnceTerm(void *pvUser, bool fLazyCleanUpOk)
139{
140 RT_NOREF_PV(pvUser);
141
142 if (!fLazyCleanUpOk)
143 {
144 RTCritSectRwDelete(&g_MemSaferCritSect);
145 Assert(!g_pMemSaferTree);
146 }
147}
148
149
150
151DECLINLINE(void *) rtMemSaferScramblePointer(void *pvUser)
152{
153 uintptr_t uPtr = (uintptr_t)pvUser;
154 uPtr ^= g_uMemSaferPtrScramblerXor;
155#if ARCH_BITS == 64
156 uPtr = ASMRotateRightU64(uPtr, g_cMemSaferPtrScramblerRotate);
157#elif ARCH_BITS == 32
158 uPtr = ASMRotateRightU32(uPtr, g_cMemSaferPtrScramblerRotate);
159#else
160# error "Unsupported/missing ARCH_BITS."
161#endif
162 return (void *)uPtr;
163}
164
165
166/**
167 * Inserts a tracking node into the tree.
168 *
169 * @param pThis The allocation tracking node to insert.
170 */
171static void rtMemSaferNodeInsert(PRTMEMSAFERNODE pThis)
172{
173 RTCritSectRwEnterExcl(&g_MemSaferCritSect);
174 pThis->Core.Key = rtMemSaferScramblePointer(pThis->Core.Key);
175 bool fRc = RTAvlPVInsert(&g_pMemSaferTree, &pThis->Core);
176 RTCritSectRwLeaveExcl(&g_MemSaferCritSect);
177 Assert(fRc); NOREF(fRc);
178}
179
180
181/**
182 * Finds a tracking node into the tree.
183 *
184 * @returns The allocation tracking node for @a pvUser. NULL if not found.
185 * @param pvUser The user pointer to the allocation.
186 */
187static PRTMEMSAFERNODE rtMemSaferNodeLookup(void *pvUser)
188{
189 void *pvKey = rtMemSaferScramblePointer(pvUser);
190 RTCritSectRwEnterShared(&g_MemSaferCritSect);
191 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVGet(&g_pMemSaferTree, pvKey);
192 RTCritSectRwLeaveShared(&g_MemSaferCritSect);
193 return pThis;
194}
195
196
197/**
198 * Removes a tracking node from the tree.
199 *
200 * @returns The allocation tracking node for @a pvUser. NULL if not found.
201 * @param pvUser The user pointer to the allocation.
202 */
203static PRTMEMSAFERNODE rtMemSaferNodeRemove(void *pvUser)
204{
205 void *pvKey = rtMemSaferScramblePointer(pvUser);
206 RTCritSectRwEnterExcl(&g_MemSaferCritSect);
207 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVRemove(&g_pMemSaferTree, pvKey);
208 RTCritSectRwLeaveExcl(&g_MemSaferCritSect);
209 return pThis;
210}
211
212
213RTDECL(int) RTMemSaferScramble(void *pv, size_t cb)
214{
215 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pv);
216 AssertReturn(pThis, VERR_INVALID_POINTER);
217 AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER);
218
219 /* First time we get a new xor value. */
220 if (!pThis->uScramblerXor)
221 pThis->uScramblerXor = (uintptr_t)RTRandU64();
222
223 /* Note! This isn't supposed to be safe, just less obvious. */
224 uintptr_t *pu = (uintptr_t *)pv;
225 cb = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);
226 while (cb > 0)
227 {
228 *pu ^= pThis->uScramblerXor;
229 pu++;
230 cb -= sizeof(*pu);
231 }
232
233 return VINF_SUCCESS;
234}
235RT_EXPORT_SYMBOL(RTMemSaferScramble);
236
237
238RTDECL(int) RTMemSaferUnscramble(void *pv, size_t cb)
239{
240 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pv);
241 AssertReturn(pThis, VERR_INVALID_POINTER);
242 AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER);
243
244 /* Note! This isn't supposed to be safe, just less obvious. */
245 uintptr_t *pu = (uintptr_t *)pv;
246 cb = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);
247 while (cb > 0)
248 {
249 *pu ^= pThis->uScramblerXor;
250 pu++;
251 cb -= sizeof(*pu);
252 }
253
254 return VINF_SUCCESS;
255}
256RT_EXPORT_SYMBOL(RTMemSaferUnscramble);
257
258
259/**
260 * Initializes the pages.
261 *
262 * Fills the memory with random bytes in order to make it less obvious where the
263 * secret data starts and ends. We also zero the user memory in case the
264 * allocator does not do this.
265 *
266 * @param pThis The allocation tracer node. The Core.Key member
267 * will be set.
268 * @param pvPages The pages to initialize.
269 */
270static void rtMemSaferInitializePages(PRTMEMSAFERNODE pThis, void *pvPages)
271{
272 RTRandBytes(pvPages, PAGE_SIZE + pThis->offUser);
273
274 uint8_t *pbUser = (uint8_t *)pvPages + PAGE_SIZE + pThis->offUser;
275 pThis->Core.Key = pbUser;
276 RT_BZERO(pbUser, pThis->cbUser); /* paranoia */
277
278 RTRandBytes(pbUser + pThis->cbUser, (size_t)pThis->cPages * PAGE_SIZE - PAGE_SIZE - pThis->offUser - pThis->cbUser);
279}
280
281
282/**
283 * Allocates and initializes pages from the support driver and initializes it.
284 *
285 * @returns VBox status code.
286 * @param pThis The allocator node. Core.Key will be set on successful
287 * return (unscrambled).
288 */
289static int rtMemSaferSupR3AllocPages(PRTMEMSAFERNODE pThis)
290{
291#ifdef IN_SUP_R3
292 /*
293 * Try allocate the memory.
294 */
295 void *pvPages;
296 int rc = SUPR3PageAllocEx(pThis->cPages, 0 /* fFlags */, &pvPages, NULL /* pR0Ptr */, NULL /* paPages */);
297 if (RT_SUCCESS(rc))
298 {
299 rtMemSaferInitializePages(pThis, pvPages);
300
301 /*
302 * On darwin we cannot allocate pages without an R0 mapping and
303 * SUPR3PageAllocEx falls back to another method which is incompatible with
304 * the way SUPR3PageProtect works. Ignore changing the protection of the guard
305 * pages.
306 */
307#ifdef RT_OS_DARWIN
308 return VINF_SUCCESS;
309#else
310 /*
311 * Configure the guard pages.
312 * SUPR3PageProtect isn't supported on all hosts, we ignore that.
313 */
314 rc = SUPR3PageProtect(pvPages, NIL_RTR0PTR, 0, PAGE_SIZE, RTMEM_PROT_NONE);
315 if (RT_SUCCESS(rc))
316 {
317 rc = SUPR3PageProtect(pvPages, NIL_RTR0PTR, (pThis->cPages - 1) * PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
318 if (RT_SUCCESS(rc))
319 return VINF_SUCCESS;
320 SUPR3PageProtect(pvPages, NIL_RTR0PTR, 0, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
321 }
322 else if (rc == VERR_NOT_SUPPORTED)
323 return VINF_SUCCESS;
324
325 /* failed. */
326 int rc2 = SUPR3PageFreeEx(pvPages, pThis->cPages); AssertRC(rc2);
327#endif
328 }
329 return rc;
330
331#else /* !IN_SUP_R3 */
332 RT_NOREF_PV(pThis);
333 return VERR_NOT_SUPPORTED;
334#endif /* !IN_SUP_R3 */
335}
336
337
338/**
339 * Allocates and initializes pages using the IPRT page allocator API.
340 *
341 * @returns VBox status code.
342 * @param pThis The allocator node. Core.Key will be set on successful
343 * return (unscrambled).
344 */
345static int rtMemSaferMemAllocPages(PRTMEMSAFERNODE pThis)
346{
347 /*
348 * Try allocate the memory.
349 */
350 int rc = VINF_SUCCESS;
351 void *pvPages = RTMemPageAllocEx((size_t)pThis->cPages * PAGE_SIZE,
352 RTMEMPAGEALLOC_F_ADVISE_LOCKED | RTMEMPAGEALLOC_F_ADVISE_NO_DUMP | RTMEMPAGEALLOC_F_ZERO);
353 if (pvPages)
354 {
355 rtMemSaferInitializePages(pThis, pvPages);
356
357 /*
358 * Configure the guard pages.
359 */
360 rc = RTMemProtect(pvPages, PAGE_SIZE, RTMEM_PROT_NONE);
361 if (RT_SUCCESS(rc))
362 {
363 rc = RTMemProtect((uint8_t *)pvPages + (size_t)(pThis->cPages - 1U) * PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
364 if (RT_SUCCESS(rc))
365 return VINF_SUCCESS;
366 rc = RTMemProtect(pvPages, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
367 }
368
369 /* failed. */
370 RTMemPageFree(pvPages, (size_t)pThis->cPages * PAGE_SIZE);
371 }
372 else
373 rc = VERR_NO_PAGE_MEMORY;
374
375 return rc;
376}
377
378
379RTDECL(int) RTMemSaferAllocZExTag(void **ppvNew, size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
380{
381 RT_NOREF_PV(pszTag);
382
383 /*
384 * Validate input.
385 */
386 AssertPtrReturn(ppvNew, VERR_INVALID_PARAMETER);
387 *ppvNew = NULL;
388 AssertReturn(cb, VERR_INVALID_PARAMETER);
389 AssertReturn(cb <= 32U*_1M - PAGE_SIZE * 3U, VERR_ALLOCATION_TOO_BIG); /* Max 32 MB minus padding and guard pages. */
390 AssertReturn(!(fFlags & ~RTMEMSAFER_F_VALID_MASK), VERR_INVALID_FLAGS);
391
392 /*
393 * Initialize globals.
394 */
395 int rc = RTOnceEx(&g_MemSaferOnce, rtMemSaferOnceInit, rtMemSaferOnceTerm, NULL);
396 if (RT_SUCCESS(rc))
397 {
398 /*
399 * Allocate a tracker node first.
400 */
401 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTMemAllocZ(sizeof(RTMEMSAFERNODE));
402 if (pThis)
403 {
404 /*
405 * Prepare the allocation.
406 */
407 pThis->cbUser = cb;
408 pThis->offUser = (RTRandU32Ex(0, 128) * RTMEMSAFER_ALIGN) & PAGE_OFFSET_MASK;
409
410 size_t cbNeeded = pThis->offUser + pThis->cbUser;
411 cbNeeded = RT_ALIGN_Z(cbNeeded, PAGE_SIZE);
412
413 pThis->cPages = (uint32_t)(cbNeeded / PAGE_SIZE) + 2; /* +2 for guard pages */
414
415 /*
416 * Try allocate the memory, using the best allocator by default and
417 * falling back on the less safe one.
418 */
419 rc = rtMemSaferSupR3AllocPages(pThis);
420 if (RT_SUCCESS(rc))
421 pThis->enmAllocator = RTMEMSAFERALLOCATOR_SUPR3;
422 else if (!(fFlags & RTMEMSAFER_F_REQUIRE_NOT_PAGABLE))
423 {
424 rc = rtMemSaferMemAllocPages(pThis);
425 if (RT_SUCCESS(rc))
426 pThis->enmAllocator = RTMEMSAFERALLOCATOR_RTMEMPAGE;
427 }
428 if (RT_SUCCESS(rc))
429 {
430 /*
431 * Insert the node.
432 */
433 *ppvNew = pThis->Core.Key;
434 rtMemSaferNodeInsert(pThis); /* (Scrambles Core.Key) */
435 return VINF_SUCCESS;
436 }
437
438 RTMemFree(pThis);
439 }
440 else
441 rc = VERR_NO_MEMORY;
442 }
443 return rc;
444}
445RT_EXPORT_SYMBOL(RTMemSaferAllocZExTag);
446
447
448RTDECL(void) RTMemSaferFree(void *pv, size_t cb) RT_NO_THROW_DEF
449{
450 if (pv)
451 {
452 PRTMEMSAFERNODE pThis = rtMemSaferNodeRemove(pv);
453 AssertReturnVoid(pThis);
454 if (cb == 0) /* for openssl use */
455 cb = pThis->cbUser;
456 else
457 AssertMsg(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser));
458
459 /*
460 * Wipe the user memory first.
461 */
462 RTMemWipeThoroughly(pv, RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN), 3);
463
464 /*
465 * Free the pages.
466 */
467 uint8_t *pbPages = (uint8_t *)pv - pThis->offUser - PAGE_SIZE;
468 size_t cbPages = (size_t)pThis->cPages * PAGE_SIZE;
469 switch (pThis->enmAllocator)
470 {
471#ifdef IN_SUP_R3
472 case RTMEMSAFERALLOCATOR_SUPR3:
473 SUPR3PageProtect(pbPages, NIL_RTR0PTR, 0, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
474 SUPR3PageProtect(pbPages, NIL_RTR0PTR, (uint32_t)(cbPages - PAGE_SIZE), PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
475 SUPR3PageFreeEx(pbPages, pThis->cPages);
476 break;
477#endif
478 case RTMEMSAFERALLOCATOR_RTMEMPAGE:
479 RTMemProtect(pbPages, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
480 RTMemProtect(pbPages + cbPages - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
481 RTMemPageFree(pbPages, cbPages);
482 break;
483
484 default:
485 AssertFailed();
486 }
487
488 /*
489 * Free the tracking node.
490 */
491 pThis->Core.Key = NULL;
492 pThis->offUser = 0;
493 pThis->cbUser = 0;
494 RTMemFree(pThis);
495 }
496 else
497 Assert(cb == 0);
498}
499RT_EXPORT_SYMBOL(RTMemSaferFree);
500
501
502RTDECL(size_t) RTMemSaferGetSize(void *pv) RT_NO_THROW_DEF
503{
504 size_t cbRet = 0;
505 if (pv)
506 {
507 void *pvKey = rtMemSaferScramblePointer(pv);
508 RTCritSectRwEnterShared(&g_MemSaferCritSect);
509 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVGet(&g_pMemSaferTree, pvKey);
510 if (pThis)
511 cbRet = pThis->cbUser;
512 RTCritSectRwLeaveShared(&g_MemSaferCritSect);
513 }
514 return cbRet;
515}
516RT_EXPORT_SYMBOL(RTMemSaferGetSize);
517
518
519/**
520 * The simplest reallocation method: allocate new block, copy over the data,
521 * free old block.
522 */
523static int rtMemSaferReallocSimpler(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag)
524{
525 void *pvNew;
526 int rc = RTMemSaferAllocZExTag(&pvNew, cbNew, fFlags, pszTag);
527 if (RT_SUCCESS(rc))
528 {
529 memcpy(pvNew, pvOld, RT_MIN(cbNew, cbOld));
530 RTMemSaferFree(pvOld, cbOld);
531 *ppvNew = pvNew;
532 }
533 return rc;
534}
535
536
537RTDECL(int) RTMemSaferReallocZExTag(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
538{
539 int rc;
540 /* Real realloc. */
541 if (cbNew && cbOld)
542 {
543 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pvOld);
544 AssertReturn(pThis, VERR_INVALID_POINTER);
545 AssertMsgStmt(cbOld == pThis->cbUser, ("cbOld=%#zx != %#zx\n", cbOld, pThis->cbUser), cbOld = pThis->cbUser);
546
547 if (pThis->fFlags == fFlags)
548 {
549 if (cbNew > cbOld)
550 {
551 /*
552 * Is the enough room for us to grow?
553 */
554 size_t cbMax = (size_t)(pThis->cPages - 2) * PAGE_SIZE;
555 if (cbNew <= cbMax)
556 {
557 size_t const cbAdded = (cbNew - cbOld);
558 size_t const cbAfter = cbMax - pThis->offUser - cbOld;
559 if (cbAfter >= cbAdded)
560 {
561 /*
562 * Sufficient space after the current allocation.
563 */
564 uint8_t *pbNewSpace = (uint8_t *)pvOld + cbOld;
565 RT_BZERO(pbNewSpace, cbAdded);
566 *ppvNew = pvOld;
567 }
568 else
569 {
570 /*
571 * Have to move the allocation to make enough room at the
572 * end. In order to make it a little less predictable and
573 * maybe avoid a relocation or two in the next call, divide
574 * the page offset by four until it it fits.
575 */
576 AssertReturn(rtMemSaferNodeRemove(pvOld) == pThis, VERR_INTERNAL_ERROR_3);
577 uint32_t offNewUser = pThis->offUser;
578 do
579 offNewUser = offNewUser / 2;
580 while ((pThis->offUser - offNewUser) + cbAfter < cbAdded);
581 offNewUser &= ~(RTMEMSAFER_ALIGN - 1U);
582
583 uint32_t const cbMove = pThis->offUser - offNewUser;
584 uint8_t *pbNew = (uint8_t *)pvOld - cbMove;
585 memmove(pbNew, pvOld, cbOld);
586
587 RT_BZERO(pbNew + cbOld, cbAdded);
588 if (cbMove > cbAdded)
589 RTMemWipeThoroughly(pbNew + cbNew, cbMove - cbAdded, 3);
590
591 pThis->offUser = offNewUser;
592 pThis->Core.Key = pbNew;
593 *ppvNew = pbNew;
594
595 rtMemSaferNodeInsert(pThis);
596 }
597 Assert(((uintptr_t)*ppvNew & PAGE_OFFSET_MASK) == pThis->offUser);
598 pThis->cbUser = cbNew;
599 rc = VINF_SUCCESS;
600 }
601 else
602 {
603 /*
604 * Not enough space, allocate a new block and copy over the data.
605 */
606 rc = rtMemSaferReallocSimpler(cbOld, pvOld, cbNew, ppvNew, fFlags, pszTag);
607 }
608 }
609 else
610 {
611 /*
612 * Shrinking the allocation, just wipe the memory that is no longer
613 * being used.
614 */
615 if (cbNew != cbOld)
616 {
617 uint8_t *pbAbandond = (uint8_t *)pvOld + cbNew;
618 RTMemWipeThoroughly(pbAbandond, cbOld - cbNew, 3);
619 }
620 pThis->cbUser = cbNew;
621 *ppvNew = pvOld;
622 rc = VINF_SUCCESS;
623 }
624 }
625 else if (!pThis->fFlags)
626 {
627 /*
628 * New flags added. Allocate a new block and copy over the old one.
629 */
630 rc = rtMemSaferReallocSimpler(cbOld, pvOld, cbNew, ppvNew, fFlags, pszTag);
631 }
632 else
633 {
634 /* Compatible flags. */
635 AssertMsgFailed(("fFlags=%#x old=%#x\n", fFlags, pThis->fFlags));
636 rc = VERR_INVALID_FLAGS;
637 }
638 }
639 /*
640 * First allocation. Pass it on.
641 */
642 else if (!cbOld)
643 {
644 Assert(pvOld == NULL);
645 rc = RTMemSaferAllocZExTag(ppvNew, cbNew, fFlags, pszTag);
646 }
647 /*
648 * Free operation. Pass it on.
649 */
650 else
651 {
652 RTMemSaferFree(pvOld, cbOld);
653 *ppvNew = NULL;
654 rc = VINF_SUCCESS;
655 }
656 return rc;
657}
658RT_EXPORT_SYMBOL(RTMemSaferReallocZExTag);
659
660
661RTDECL(void *) RTMemSaferAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
662{
663 void *pvNew = NULL;
664 int rc = RTMemSaferAllocZExTag(&pvNew, cb, 0 /*fFlags*/, pszTag);
665 if (RT_SUCCESS(rc))
666 return pvNew;
667 return NULL;
668}
669RT_EXPORT_SYMBOL(RTMemSaferAllocZTag);
670
671
672RTDECL(void *) RTMemSaferReallocZTag(size_t cbOld, void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
673{
674 void *pvNew = NULL;
675 int rc = RTMemSaferReallocZExTag(cbOld, pvOld, cbNew, &pvNew, 0 /*fFlags*/, pszTag);
676 if (RT_SUCCESS(rc))
677 return pvNew;
678 return NULL;
679}
680RT_EXPORT_SYMBOL(RTMemSaferReallocZTag);
681
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette