VirtualBox

source: vbox/trunk/src/VBox/Runtime/generic/memsafer-generic.cpp@ 78198

Last change on this file since 78198 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 7.7 KB
Line 
1/* $Id: memsafer-generic.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocate for Sensitive Data, generic heap-based implementation.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "internal/iprt.h"
32#include <iprt/memsafer.h>
33
34#include <iprt/assert.h>
35#include <iprt/err.h>
36#include <iprt/string.h>
37
38
39/*********************************************************************************************************************************
40* Defined Constants And Macros *
41*********************************************************************************************************************************/
42/** Allocation size alignment. */
43#define RTMEMSAFER_ALIGN 16
44/** Padding after the block to avoid small overruns. */
45#define RTMEMSAFER_PAD_BEFORE 96
46/** Padding after the block to avoid small underruns. */
47#define RTMEMSAFER_PAD_AFTER 32
48
49
50/*********************************************************************************************************************************
51* Global Variables *
52*********************************************************************************************************************************/
53/** XOR scrabler value.
54 * @todo determine this at runtime */
55#if ARCH_BITS == 32
56static uintptr_t g_uScramblerXor = UINT32_C(0x867af88d);
57#elif ARCH_BITS == 64
58static uintptr_t g_uScramblerXor = UINT64_C(0xed95ecc99416d312);
59#else
60# error "Bad ARCH_BITS value"
61#endif
62
63
64
65RTDECL(int) RTMemSaferScramble(void *pv, size_t cb)
66{
67
68 AssertMsg(*(size_t *)((char *)pv - RTMEMSAFER_PAD_BEFORE) == cb,
69 ("*pvStart=%#zx cb=%#zx\n", *(size_t *)((char *)pv- RTMEMSAFER_PAD_BEFORE), cb));
70
71 /* Note! This isn't supposed to be safe, just less obvious. */
72 uintptr_t *pu = (uintptr_t *)pv;
73 cb = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);
74 while (cb > 0)
75 {
76 *pu ^= g_uScramblerXor;
77 pu++;
78 cb -= sizeof(*pu);
79 }
80
81 return VINF_SUCCESS;
82}
83RT_EXPORT_SYMBOL(RTMemSaferScramble);
84
85
86RTDECL(int) RTMemSaferUnscramble(void *pv, size_t cb)
87{
88 AssertMsg(*(size_t *)((char *)pv - RTMEMSAFER_PAD_BEFORE) == cb,
89 ("*pvStart=%#zx cb=%#zx\n", *(size_t *)((char *)pv - RTMEMSAFER_PAD_BEFORE), cb));
90
91 /* Note! This isn't supposed to be safe, just less obvious. */
92 uintptr_t *pu = (uintptr_t *)pv;
93 cb = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);
94 while (cb > 0)
95 {
96 *pu ^= g_uScramblerXor;
97 pu++;
98 cb -= sizeof(*pu);
99 }
100
101 return VINF_SUCCESS;
102}
103RT_EXPORT_SYMBOL(RTMemSaferUnscramble);
104
105
106RTDECL(int) RTMemSaferAllocZExTag(void **ppvNew, size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
107{
108 AssertPtrReturn(ppvNew, VERR_INVALID_PARAMETER);
109 *ppvNew = NULL;
110 AssertReturn(cb, VERR_INVALID_PARAMETER);
111 RT_NOREF_PV(pszTag);
112
113 /*
114 * We support none of the hard requirements passed thru flags.
115 */
116 if (fFlags == 0)
117 {
118 /*
119 * Don't request zeroed memory. We want random heap garbage in the
120 * padding zones, nothing that makes our allocations easier to find.
121 */
122 size_t cbUser = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);
123 void *pvNew = RTMemAlloc(cbUser + RTMEMSAFER_PAD_BEFORE + RTMEMSAFER_PAD_AFTER);
124 if (pvNew)
125 {
126#ifdef RT_STRICT /* For checking input in string builds. */
127 memset(pvNew, 0xad, RTMEMSAFER_PAD_BEFORE);
128 memset((char *)pvNew + RTMEMSAFER_PAD_BEFORE + cb, 0xda, RTMEMSAFER_PAD_AFTER + (cbUser - cb));
129 *(size_t *)pvNew = cb;
130#endif
131
132 void *pvUser = (char *)pvNew + RTMEMSAFER_PAD_BEFORE;
133 *ppvNew = pvUser;
134
135 /* You don't use this API for performance, so we always clean memory. */
136 RT_BZERO(pvUser, cb);
137
138 return VINF_SUCCESS;
139 }
140 return VERR_NO_MEMORY;
141 }
142 AssertReturn(!(fFlags & ~RTMEMSAFER_F_VALID_MASK), VERR_INVALID_FLAGS);
143 return VWRN_UNABLE_TO_SATISFY_REQUIREMENTS;
144}
145RT_EXPORT_SYMBOL(RTMemSaferAllocZExTag);
146
147
148RTDECL(void) RTMemSaferFree(void *pv, size_t cb) RT_NO_THROW_DEF
149{
150 if (pv)
151 {
152 Assert(cb);
153 void *pvStart = (char *)pv - RTMEMSAFER_PAD_BEFORE;
154 AssertMsg(*(size_t *)pvStart == cb, ("*pvStart=%#zx cb=%#zx\n", *(size_t *)pvStart, cb));
155 RTMemWipeThoroughly(pv, RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN), 3);
156 RTMemFree(pvStart);
157 }
158 else
159 Assert(cb == 0);
160}
161RT_EXPORT_SYMBOL(RTMemSaferFree);
162
163
164RTDECL(int) RTMemSaferReallocZExTag(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag) RT_NO_THROW_DEF
165{
166 /*
167 * We cannot let the heap move us around because we will be failing in our
168 * duty to clean things up. So, allocate a new block, copy over the old
169 * content, and free the old one.
170 */
171 int rc;
172 /* Real realloc. */
173 if (cbNew && cbOld)
174 {
175 AssertPtr(pvOld);
176 AssertMsg(*(size_t *)((char *)pvOld - RTMEMSAFER_PAD_BEFORE) == cbOld,
177 ("*pvStart=%#zx cbOld=%#zx\n", *(size_t *)((char *)pvOld - RTMEMSAFER_PAD_BEFORE), cbOld));
178
179 /*
180 * We support none of the hard requirements passed thru flags.
181 */
182 void *pvNew;
183 rc = RTMemSaferAllocZExTag(&pvNew, cbNew, fFlags, pszTag);
184 if (RT_SUCCESS(rc))
185 {
186 memcpy(pvNew, pvOld, RT_MIN(cbNew, cbOld));
187 RTMemSaferFree(pvOld, cbOld);
188 *ppvNew = pvNew;
189 }
190 }
191 /* First allocation. */
192 else if (!cbOld)
193 {
194 Assert(pvOld == NULL);
195 rc = RTMemSaferAllocZExTag(ppvNew, cbNew, fFlags, pszTag);
196 }
197 /* Free operation*/
198 else
199 {
200 RTMemSaferFree(pvOld, cbOld);
201 rc = VINF_SUCCESS;
202 }
203 return rc;
204}
205RT_EXPORT_SYMBOL(RTMemSaferReallocZExTag);
206
207
208RTDECL(void *) RTMemSaferAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW_DEF
209{
210 void *pvNew = NULL;
211 int rc = RTMemSaferAllocZExTag(&pvNew, cb, 0 /*fFlags*/, pszTag);
212 if (RT_SUCCESS(rc))
213 return pvNew;
214 return NULL;
215}
216RT_EXPORT_SYMBOL(RTMemSaferAllocZTag);
217
218
219RTDECL(void *) RTMemSaferReallocZTag(size_t cbOld, void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW_DEF
220{
221 void *pvNew = NULL;
222 int rc = RTMemSaferReallocZExTag(cbOld, pvOld, cbNew, &pvNew, 0 /*fFlags*/, pszTag);
223 if (RT_SUCCESS(rc))
224 return pvNew;
225 return NULL;
226}
227RT_EXPORT_SYMBOL(RTMemSaferReallocZTag);
228
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette