VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/alloc-posix.cpp@ 533

Last change on this file since 533 was 533, checked in by vboxsync, 18 years ago

Fixed the RTMemExecAlloc issue with selinux enabled systems (and the assertion on amd64 in RTMemExecFree() during shutdown.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 7.3 KB
Line 
1/* $Id: alloc-posix.cpp 533 2007-02-02 02:02:14Z vboxsync $ */
2/** @file
3 * InnoTek Portable Runtime - Memory Allocation, POSIX.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#include <iprt/alloc.h>
27#include <iprt/assert.h>
28#include <iprt/param.h>
29#include <iprt/err.h>
30#include <iprt/string.h>
31
32#include <stdlib.h>
33#include <malloc.h>
34#include <errno.h>
35#include <sys/mman.h>
36
37#if !defined(RT_USE_MMAP) && (defined(__LINUX__))
38# define RT_USE_MMAP
39#endif
40
41/*******************************************************************************
42* Structures and Typedefs *
43*******************************************************************************/
44#ifdef RT_USE_MMAP
45/**
46 * RTMemExecAlloc() header used when using mmap for allocating the memory.
47 */
48typedef struct RTMEMEXECHDR
49{
50 /** Magic number (RTMEMEXECHDR_MAGIC). */
51 size_t uMagic;
52 /** The size we requested from mmap. */
53 size_t cb;
54# if ARCH_BITS == 32
55 uint32_t Alignment[2];
56# endif
57} RTMEMEXECHDR, *PRTMEMEXECHDR;
58
59/** MAgic for RTMEMEXECHDR. */
60#define RTMEMEXECHDR_MAGIC (~(size_t)0xfeedbabe)
61
62#endif /* RT_USE_MMAP */
63
64
65
66#ifdef IN_RING3
67
68/**
69 * Allocates memory which may contain code.
70 *
71 * @returns Pointer to the allocated memory.
72 * @returns NULL on failure.
73 * @param cb Size in bytes of the memory block to allocate.
74 */
75RTDECL(void *) RTMemExecAlloc(size_t cb)
76{
77 AssertMsg(cb, ("Allocating ZERO bytes is really not a good idea! Good luck with the next assertion!\n"));
78
79#ifdef RT_USE_MMAP
80 /*
81 * Use mmap to get low memory.
82 */
83 void *pv = mmap(NULL, RT_ALIGN_Z(cb, PAGE_SIZE), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS
84#if defined(__AMD64__) && defined(MAP_32BIT)
85 | MAP_32BIT
86#endif
87 , -1, 0);
88 AssertMsgReturn(pv != MAP_FAILED, ("errno=%d cb=%#zx\n", errno, cb), NULL);
89 PRTMEMEXECHDR pHdr = (PRTMEMEXECHDR)pv;
90 pHdr->uMagic = RTMEMEXECHDR_MAGIC;
91 pHdr->cb = RT_ALIGN_Z(cb, PAGE_SIZE);
92 pv = pHdr + 1;
93
94#else
95 /*
96 * Allocate first.
97 */
98 cb = RT_ALIGN_Z(cb, 32);
99 void *pv = NULL;
100 int rc = posix_memalign(&pv, 32, cb);
101 AssertMsg(!rc && pv, ("posix_memalign(%zd) failed!!! rc=%d\n", cb, rc));
102 if (pv && !rc)
103 {
104 /*
105 * Add PROT_EXEC flag to the page.
106 *
107 * This is in violation of the SuS where I think it saith that mprotect() shall
108 * only be used with mmap()'ed memory. Works on linux and OS/2 LIBC v0.6.
109 */
110 memset(pv, 0xcc, cb);
111 void *pvProt = (void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK);
112 size_t cbProt = ((uintptr_t)pv & PAGE_OFFSET_MASK) + cb;
113 cbProt = RT_ALIGN_Z(cbProt, PAGE_SIZE);
114 rc = mprotect(pvProt, cbProt, PROT_READ | PROT_WRITE | PROT_EXEC);
115 if (rc)
116 {
117 AssertMsgFailed(("mprotect(%p, %#zx,,) -> rc=%d, errno=%d\n", pvProt, cbProt, rc, errno));
118 free(pv);
119 pv = NULL;
120 }
121 }
122#endif
123 return pv;
124}
125
126
127/**
128 * Free executable/read/write memory allocated by RTMemExecAlloc().
129 *
130 * @param pv Pointer to memory block.
131 */
132RTDECL(void) RTMemExecFree(void *pv)
133{
134 if (pv)
135 {
136#ifdef RT_USE_MMAP
137 PRTMEMEXECHDR pHdr = (PRTMEMEXECHDR)pv - 1;
138 AssertMsgReturnVoid(RT_ALIGN_P(pHdr, PAGE_SIZE) == pHdr, ("pHdr=%p pv=%p\n", pHdr, pv));
139 AssertMsgReturnVoid(pHdr->uMagic == RTMEMEXECHDR_MAGIC, ("pHdr=%p(uMagic=%#zx) pv=%p\n", pHdr, pHdr->uMagic, pv));
140 int rc = munmap(pHdr, pHdr->cb);
141 AssertMsg(!rc, ("munmap -> %d errno=%d\n", rc, errno));
142#else
143 free(pv);
144#endif
145 }
146}
147
148
149/**
150 * Allocate page aligned memory.
151 *
152 * @returns Pointer to the allocated memory.
153 * @returns NULL if we're out of memory.
154 * @param cb Size of the memory block. Will be rounded up to page size.
155 */
156RTDECL(void *) RTMemPageAlloc(size_t cb)
157{
158#if 0 /** @todo huh? we're using posix_memalign in the next function... */
159 void *pv;
160 int rc = posix_memalign(&pv, PAGE_SIZE, RT_ALIGN_Z(cb, PAGE_SIZE));
161 if (!rc)
162 return pv;
163 return NULL;
164#else
165 return memalign(PAGE_SIZE, cb);
166#endif
167}
168
169
170/**
171 * Allocate zero'ed page aligned memory.
172 *
173 * @returns Pointer to the allocated memory.
174 * @returns NULL if we're out of memory.
175 * @param cb Size of the memory block. Will be rounded up to page size.
176 */
177RTDECL(void *) RTMemPageAllocZ(size_t cb)
178{
179 void *pv;
180 int rc = posix_memalign(&pv, PAGE_SIZE, RT_ALIGN_Z(cb, PAGE_SIZE));
181 if (!rc)
182 {
183 bzero(pv, RT_ALIGN_Z(cb, PAGE_SIZE));
184 return pv;
185 }
186 return NULL;
187}
188
189
190/**
191 * Free a memory block allocated with RTMemPageAlloc() or RTMemPageAllocZ().
192 *
193 * @param pv Pointer to the block as it was returned by the allocation function.
194 * NULL will be ignored.
195 */
196RTDECL(void) RTMemPageFree(void *pv)
197{
198 if (pv)
199 free(pv);
200}
201
202
203/**
204 * Change the page level protection of a memory region.
205 *
206 * @returns iprt status code.
207 * @param pv Start of the region. Will be rounded down to nearest page boundary.
208 * @param cb Size of the region. Will be rounded up to the nearest page boundary.
209 * @param fProtect The new protection, a combination of the RTMEM_PROT_* defines.
210 */
211RTDECL(int) RTMemProtect(void *pv, size_t cb, unsigned fProtect)
212{
213 /*
214 * Validate input.
215 */
216 if (cb == 0)
217 {
218 AssertMsgFailed(("!cb\n"));
219 return VERR_INVALID_PARAMETER;
220 }
221 if (fProtect & ~(RTMEM_PROT_NONE | RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC))
222 {
223 AssertMsgFailed(("fProtect=%#x\n", fProtect));
224 return VERR_INVALID_PARAMETER;
225 }
226
227 /*
228 * Convert the flags.
229 */
230 int fProt;
231#if RTMEM_PROT_NONE == PROT_NONE \
232 && RTMEM_PROT_READ == PROT_READ \
233 && RTMEM_PROT_WRITE == PROT_WRITE \
234 && RTMEM_PROT_EXEC == PROT_EXEC
235 fProt = fProtect;
236#else
237 Assert(!RTMEM_PROT_NONE);
238 if (!fProtect)
239 fProt = PROT_NONE;
240 else
241 {
242 fProt = 0;
243 if (fProtect & RTMEM_PROT_READ)
244 fProt |= PROT_READ;
245 if (fProtect & RTMEM_PROT_WRITE)
246 fProt |= PROT_WRITE;
247 if (fProtect & RTMEM_PROT_EXEC)
248 fProt |= PROT_EXEC;
249 }
250#endif
251
252 /*
253 * Align the request.
254 */
255 cb += (uintptr_t)pv & PAGE_OFFSET_MASK;
256 pv = (void *)((uintptr_t)pv & ~PAGE_OFFSET_MASK);
257
258 /*
259 * Change the page attributes.
260 */
261 int rc = mprotect(pv, cb, fProt);
262 if (!rc)
263 return rc;
264 return RTErrConvertFromErrno(errno);
265}
266
267#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette