VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/alloc-r0drv.cpp@ 48462

Last change on this file since 48462 was 46567, checked in by vboxsync, 12 years ago

RTMemAllocEx for ring-3.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 12.7 KB
Line 
1/* $Id: alloc-r0drv.cpp 46567 2013-06-14 16:12:24Z vboxsync $ */
2/** @file
3 * IPRT - Memory Allocation, Ring-0 Driver.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define RTMEM_NO_WRAP_TO_EF_APIS
32#include <iprt/mem.h>
33#include "internal/iprt.h"
34
35#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
36# include <iprt/asm-amd64-x86.h>
37#endif
38#include <iprt/assert.h>
39#ifdef RT_MORE_STRICT
40# include <iprt/mp.h>
41#endif
42#include <iprt/param.h>
43#include <iprt/string.h>
44#include <iprt/thread.h>
45#include "r0drv/alloc-r0drv.h"
46
47
48/*******************************************************************************
49* Defined Constants And Macros *
50*******************************************************************************/
51#ifdef RT_STRICT
52# define RTR0MEM_STRICT
53#endif
54
55#ifdef RTR0MEM_STRICT
56# define RTR0MEM_FENCE_EXTRA 16
57#else
58# define RTR0MEM_FENCE_EXTRA 0
59#endif
60
61
62/*******************************************************************************
63* Global Variables *
64*******************************************************************************/
65#ifdef RTR0MEM_STRICT
66/** Fence data. */
67static uint8_t const g_abFence[RTR0MEM_FENCE_EXTRA] =
68{
69 0x77, 0x88, 0x66, 0x99, 0x55, 0xaa, 0x44, 0xbb,
70 0x33, 0xcc, 0x22, 0xdd, 0x11, 0xee, 0x00, 0xff
71};
72#endif
73
74
75/**
76 * Wrapper around rtR0MemAllocEx.
77 *
78 * @returns Pointer to the allocated memory block header.
79 * @param cb The number of bytes to allocate (sans header).
80 * @param fFlags The allocation flags.
81 */
82DECLINLINE(PRTMEMHDR) rtR0MemAlloc(size_t cb, uint32_t fFlags)
83{
84 PRTMEMHDR pHdr;
85 int rc = rtR0MemAllocEx(cb, fFlags, &pHdr);
86 if (RT_FAILURE(rc))
87 return NULL;
88 return pHdr;
89}
90
91
92RTDECL(void *) RTMemTmpAllocTag(size_t cb, const char *pszTag) RT_NO_THROW
93{
94 return RTMemAllocTag(cb, pszTag);
95}
96RT_EXPORT_SYMBOL(RTMemTmpAllocTag);
97
98
99RTDECL(void *) RTMemTmpAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW
100{
101 return RTMemAllocZTag(cb, pszTag);
102}
103RT_EXPORT_SYMBOL(RTMemTmpAllocZTag);
104
105
106RTDECL(void) RTMemTmpFree(void *pv) RT_NO_THROW
107{
108 return RTMemFree(pv);
109}
110RT_EXPORT_SYMBOL(RTMemTmpFree);
111
112
113
114
115
116RTDECL(void *) RTMemAllocTag(size_t cb, const char *pszTag) RT_NO_THROW
117{
118 PRTMEMHDR pHdr;
119 RT_ASSERT_INTS_ON();
120
121 pHdr = rtR0MemAlloc(cb + RTR0MEM_FENCE_EXTRA, 0);
122 if (pHdr)
123 {
124#ifdef RTR0MEM_STRICT
125 pHdr->cbReq = (uint32_t)cb; Assert(pHdr->cbReq == cb);
126 memcpy((uint8_t *)(pHdr + 1) + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
127#endif
128 return pHdr + 1;
129 }
130 return NULL;
131}
132RT_EXPORT_SYMBOL(RTMemAllocTag);
133
134
135RTDECL(void *) RTMemAllocZTag(size_t cb, const char *pszTag) RT_NO_THROW
136{
137 PRTMEMHDR pHdr;
138 RT_ASSERT_INTS_ON();
139
140 pHdr = rtR0MemAlloc(cb + RTR0MEM_FENCE_EXTRA, RTMEMHDR_FLAG_ZEROED);
141 if (pHdr)
142 {
143#ifdef RTR0MEM_STRICT
144 pHdr->cbReq = (uint32_t)cb; Assert(pHdr->cbReq == cb);
145 memcpy((uint8_t *)(pHdr + 1) + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
146 return memset(pHdr + 1, 0, cb);
147#else
148 return memset(pHdr + 1, 0, pHdr->cb);
149#endif
150 }
151 return NULL;
152}
153RT_EXPORT_SYMBOL(RTMemAllocZTag);
154
155
156RTDECL(void *) RTMemAllocVarTag(size_t cbUnaligned, const char *pszTag)
157{
158 size_t cbAligned;
159 if (cbUnaligned >= 16)
160 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
161 else
162 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
163 return RTMemAllocTag(cbAligned, pszTag);
164}
165RT_EXPORT_SYMBOL(RTMemAllocVarTag);
166
167
168RTDECL(void *) RTMemAllocZVarTag(size_t cbUnaligned, const char *pszTag)
169{
170 size_t cbAligned;
171 if (cbUnaligned >= 16)
172 cbAligned = RT_ALIGN_Z(cbUnaligned, 16);
173 else
174 cbAligned = RT_ALIGN_Z(cbUnaligned, sizeof(void *));
175 return RTMemAllocZTag(cbAligned, pszTag);
176}
177RT_EXPORT_SYMBOL(RTMemAllocZVarTag);
178
179
180RTDECL(void *) RTMemReallocTag(void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW
181{
182 PRTMEMHDR pHdrOld;
183
184 /* Free. */
185 if (!cbNew && pvOld)
186 {
187 RTMemFree(pvOld);
188 return NULL;
189 }
190
191 /* Alloc. */
192 if (!pvOld)
193 return RTMemAllocTag(cbNew, pszTag);
194
195 /*
196 * Realloc.
197 */
198 pHdrOld = (PRTMEMHDR)pvOld - 1;
199 RT_ASSERT_PREEMPTIBLE();
200
201 if (pHdrOld->u32Magic == RTMEMHDR_MAGIC)
202 {
203 PRTMEMHDR pHdrNew;
204
205 /* If there is sufficient space in the old block and we don't cause
206 substantial internal fragmentation, reuse the old block. */
207 if ( pHdrOld->cb >= cbNew + RTR0MEM_FENCE_EXTRA
208 && pHdrOld->cb - (cbNew + RTR0MEM_FENCE_EXTRA) <= 128)
209 {
210 pHdrOld->cbReq = (uint32_t)cbNew; Assert(pHdrOld->cbReq == cbNew);
211#ifdef RTR0MEM_STRICT
212 memcpy((uint8_t *)(pHdrOld + 1) + cbNew, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
213#endif
214 return pvOld;
215 }
216
217 /* Allocate a new block and copy over the content. */
218 pHdrNew = rtR0MemAlloc(cbNew + RTR0MEM_FENCE_EXTRA, 0);
219 if (pHdrNew)
220 {
221 size_t cbCopy = RT_MIN(pHdrOld->cb, pHdrNew->cb);
222 memcpy(pHdrNew + 1, pvOld, cbCopy);
223#ifdef RTR0MEM_STRICT
224 pHdrNew->cbReq = (uint32_t)cbNew; Assert(pHdrNew->cbReq == cbNew);
225 memcpy((uint8_t *)(pHdrNew + 1) + cbNew, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
226 AssertReleaseMsg(!memcmp((uint8_t *)(pHdrOld + 1) + pHdrOld->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
227 ("pHdr=%p pvOld=%p cbReq=%u cb=%u cbNew=%zu fFlags=%#x\n"
228 "fence: %.*Rhxs\n"
229 "expected: %.*Rhxs\n",
230 pHdrOld, pvOld, pHdrOld->cbReq, pHdrOld->cb, cbNew, pHdrOld->fFlags,
231 RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdrOld + 1) + pHdrOld->cbReq,
232 RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
233#endif
234 rtR0MemFree(pHdrOld);
235 return pHdrNew + 1;
236 }
237 }
238 else
239 AssertMsgFailed(("pHdrOld->u32Magic=%RX32 pvOld=%p cbNew=%#zx\n", pHdrOld->u32Magic, pvOld, cbNew));
240
241 return NULL;
242}
243RT_EXPORT_SYMBOL(RTMemReallocTag);
244
245
246RTDECL(void) RTMemFree(void *pv) RT_NO_THROW
247{
248 PRTMEMHDR pHdr;
249 RT_ASSERT_INTS_ON();
250
251 if (!pv)
252 return;
253 pHdr = (PRTMEMHDR)pv - 1;
254 if (pHdr->u32Magic == RTMEMHDR_MAGIC)
255 {
256 Assert(!(pHdr->fFlags & RTMEMHDR_FLAG_ALLOC_EX));
257 Assert(!(pHdr->fFlags & RTMEMHDR_FLAG_EXEC));
258#ifdef RTR0MEM_STRICT
259 AssertReleaseMsg(!memcmp((uint8_t *)(pHdr + 1) + pHdr->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
260 ("pHdr=%p pv=%p cbReq=%u cb=%u fFlags=%#x\n"
261 "fence: %.*Rhxs\n"
262 "expected: %.*Rhxs\n",
263 pHdr, pv, pHdr->cbReq, pHdr->cb, pHdr->fFlags,
264 RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdr + 1) + pHdr->cbReq,
265 RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
266#endif
267 rtR0MemFree(pHdr);
268 }
269 else
270 AssertMsgFailed(("pHdr->u32Magic=%RX32 pv=%p\n", pHdr->u32Magic, pv));
271}
272RT_EXPORT_SYMBOL(RTMemFree);
273
274
275
276
277
278
279RTDECL(void *) RTMemExecAllocTag(size_t cb, const char *pszTag) RT_NO_THROW
280{
281 PRTMEMHDR pHdr;
282#ifdef RT_OS_SOLARIS /** @todo figure out why */
283 RT_ASSERT_INTS_ON();
284#else
285 RT_ASSERT_PREEMPTIBLE();
286#endif
287
288 pHdr = rtR0MemAlloc(cb + RTR0MEM_FENCE_EXTRA, RTMEMHDR_FLAG_EXEC);
289 if (pHdr)
290 {
291#ifdef RTR0MEM_STRICT
292 pHdr->cbReq = (uint32_t)cb; Assert(pHdr->cbReq == cb);
293 memcpy((uint8_t *)(pHdr + 1) + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
294#endif
295 return pHdr + 1;
296 }
297 return NULL;
298}
299RT_EXPORT_SYMBOL(RTMemExecAllocTag);
300
301
302RTDECL(void) RTMemExecFree(void *pv, size_t cb) RT_NO_THROW
303{
304 PRTMEMHDR pHdr;
305 RT_ASSERT_INTS_ON();
306
307 if (!pv)
308 return;
309 pHdr = (PRTMEMHDR)pv - 1;
310 if (pHdr->u32Magic == RTMEMHDR_MAGIC)
311 {
312 Assert(!(pHdr->fFlags & RTMEMHDR_FLAG_ALLOC_EX));
313#ifdef RTR0MEM_STRICT
314 AssertReleaseMsg(!memcmp((uint8_t *)(pHdr + 1) + pHdr->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
315 ("pHdr=%p pv=%p cbReq=%u cb=%u fFlags=%#x\n"
316 "fence: %.*Rhxs\n"
317 "expected: %.*Rhxs\n",
318 pHdr, pv, pHdr->cbReq, pHdr->cb, pHdr->fFlags,
319 RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdr + 1) + pHdr->cbReq,
320 RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
321#endif
322 rtR0MemFree(pHdr);
323 }
324 else
325 AssertMsgFailed(("pHdr->u32Magic=%RX32 pv=%p\n", pHdr->u32Magic, pv));
326}
327RT_EXPORT_SYMBOL(RTMemExecFree);
328
329
330
331
332RTDECL(int) RTMemAllocExTag(size_t cb, size_t cbAlignment, uint32_t fFlags, const char *pszTag, void **ppv) RT_NO_THROW
333{
334 uint32_t fHdrFlags = RTMEMHDR_FLAG_ALLOC_EX;
335 PRTMEMHDR pHdr;
336 int rc;
337
338 RT_ASSERT_PREEMPT_CPUID_VAR();
339 if (!(fFlags & RTMEMALLOCEX_FLAGS_ANY_CTX_ALLOC))
340 RT_ASSERT_INTS_ON();
341
342 /*
343 * Fake up some alignment support.
344 */
345 AssertMsgReturn(cbAlignment <= sizeof(void *), ("%zu (%#x)\n", cbAlignment, cbAlignment), VERR_UNSUPPORTED_ALIGNMENT);
346 if (cb < cbAlignment)
347 cb = cbAlignment;
348
349 /*
350 * Validate and convert flags.
351 */
352 AssertMsgReturn(!(fFlags & ~RTMEMALLOCEX_FLAGS_VALID_MASK_R0), ("%#x\n", fFlags), VERR_INVALID_PARAMETER);
353 if (fFlags & RTMEMALLOCEX_FLAGS_ZEROED)
354 fHdrFlags |= RTMEMHDR_FLAG_ZEROED;
355 if (fFlags & RTMEMALLOCEX_FLAGS_EXEC)
356 fHdrFlags |= RTMEMHDR_FLAG_EXEC;
357 if (fFlags & RTMEMALLOCEX_FLAGS_ANY_CTX_ALLOC)
358 fHdrFlags |= RTMEMHDR_FLAG_ANY_CTX_ALLOC;
359 if (fFlags & RTMEMALLOCEX_FLAGS_ANY_CTX_FREE)
360 fHdrFlags |= RTMEMHDR_FLAG_ANY_CTX_FREE;
361
362 /*
363 * Do the allocation.
364 */
365 rc = rtR0MemAllocEx(cb + RTR0MEM_FENCE_EXTRA, fHdrFlags, &pHdr);
366 if (RT_SUCCESS(rc))
367 {
368 void *pv;
369
370 Assert(pHdr->cbReq == cb + RTR0MEM_FENCE_EXTRA);
371 Assert((pHdr->fFlags & fFlags) == fFlags);
372
373 /*
374 * Calc user pointer, initialize the memory if requested, and if
375 * memory strictness is enable set up the fence.
376 */
377 pv = pHdr + 1;
378 *ppv = pv;
379 if (fFlags & RTMEMHDR_FLAG_ZEROED)
380 memset(pv, 0, pHdr->cb);
381
382#ifdef RTR0MEM_STRICT
383 pHdr->cbReq = (uint32_t)cb;
384 memcpy((uint8_t *)pv + cb, &g_abFence[0], RTR0MEM_FENCE_EXTRA);
385#endif
386 }
387 else if (rc == VERR_NO_MEMORY && (fFlags & RTMEMALLOCEX_FLAGS_EXEC))
388 rc = VERR_NO_EXEC_MEMORY;
389
390 RT_ASSERT_PREEMPT_CPUID();
391 return rc;
392}
393RT_EXPORT_SYMBOL(RTMemAllocExTag);
394
395
396RTDECL(void) RTMemFreeEx(void *pv, size_t cb) RT_NO_THROW
397{
398 PRTMEMHDR pHdr;
399
400 if (!pv)
401 return;
402
403 AssertPtr(pv);
404 pHdr = (PRTMEMHDR)pv - 1;
405 if (pHdr->u32Magic == RTMEMHDR_MAGIC)
406 {
407 RT_ASSERT_PREEMPT_CPUID_VAR();
408
409 Assert(pHdr->fFlags & RTMEMHDR_FLAG_ALLOC_EX);
410 if (!(pHdr->fFlags & RTMEMHDR_FLAG_ANY_CTX_FREE))
411 RT_ASSERT_INTS_ON();
412 AssertMsg(pHdr->cbReq == cb, ("cbReq=%zu cb=%zu\n", pHdr->cb, cb));
413
414#ifdef RTR0MEM_STRICT
415 AssertReleaseMsg(!memcmp((uint8_t *)(pHdr + 1) + pHdr->cbReq, &g_abFence[0], RTR0MEM_FENCE_EXTRA),
416 ("pHdr=%p pv=%p cbReq=%u cb=%u fFlags=%#x\n"
417 "fence: %.*Rhxs\n"
418 "expected: %.*Rhxs\n",
419 pHdr, pv, pHdr->cbReq, pHdr->cb, pHdr->fFlags,
420 RTR0MEM_FENCE_EXTRA, (uint8_t *)(pHdr + 1) + pHdr->cbReq,
421 RTR0MEM_FENCE_EXTRA, &g_abFence[0]));
422#endif
423 rtR0MemFree(pHdr);
424 RT_ASSERT_PREEMPT_CPUID();
425 }
426 else
427 AssertMsgFailed(("pHdr->u32Magic=%RX32 pv=%p\n", pHdr->u32Magic, pv));
428}
429RT_EXPORT_SYMBOL(RTMemFreeEx);
430
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette