VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c@ 290

Last change on this file since 290 was 290, checked in by vboxsync, 18 years ago

compile fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 9.0 KB
Line 
1/* $Id: alloc-r0drv-linux.c 290 2007-01-25 05:43:58Z vboxsync $ */
2/** @file
3 * InnoTek Portable Runtime - Memory Allocation, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2006 InnoTek Systemberatung GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * If you received this file as part of a commercial VirtualBox
18 * distribution, then only the terms of your commercial VirtualBox
19 * license agreement apply instead of the previous paragraph.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#include "the-linux-kernel.h"
27#include <iprt/alloc.h>
28#include <iprt/assert.h>
29#include "r0drv/alloc-r0drv.h"
30
31#if defined(__AMD64__) || defined(__DOXYGEN__)
32/**
33 * We need memory in the module range (~2GB to ~0) this can only be obtained
34 * thru APIs that are not exported (see module_alloc()).
35 *
36 * So, we'll have to create a quick and dirty heap here using BSS memory.
37 * Very annoying and it's going to restrict us!
38 */
39# define RTMEMALLOC_EXEC_HEAP
40#endif
41#ifdef RTMEMALLOC_EXEC_HEAP
42# include <iprt/heap.h>
43# include <iprt/spinlock.h>
44# include <iprt/err.h>
45#endif
46
47
48/*******************************************************************************
49* Global Variables *
50*******************************************************************************/
51#ifdef RTMEMALLOC_EXEC_HEAP
52/** The heap. */
53static RTHEAPSIMPLE g_HeapExec = NIL_RTHEAPSIMPLE;
54/** Spinlock protecting the heap. */
55static RTSPINLOCK g_HeapExecSpinlock = NIL_RTHEAPSIMPLE;
56
57
58/**
59 * API for cleaning up the heap spinlock on IPRT termination.
60 * This is as RTMemExecDonate specific to AMD64 Linux/GNU.
61 */
62RTDECL(void) RTMemExecCleanup(void)
63{
64 RTSpinlockDestroy(g_HeapExecSpinlock);
65 g_HeapExecSpinlock = NIL_RTSPINLOCK;
66}
67
68
69/**
70 * Donate read+write+execute memory to the exec heap.
71 *
72 * This API is specific to AMD64 and Linux/GNU. A kernel module that desires to
73 * use RTMemExecAlloc on AMD64 Linux/GNU will have to donate some statically
74 * allocated memory in the module if it wishes for GCC generated code to work.
75 * GCC can only generate modules that work in the address range ~2GB to ~0
76 * currently.
77 *
78 * The API only accept one single donation.
79 *
80 * @returns IPRT status code.
81 * @param pvMemory Pointer to the memory block.
82 * @param cb The size of the memory block.
83 */
84RTDECL(int) RTMemExecDonate(void *pvMemory, size_t cb)
85{
86 AssertReturn(g_HeapExec == NIL_RTHEAPSIMPLE, VERR_WRONG_ORDER);
87
88 int rc = RTSpinlockCreate(&g_HeapExecSpinlock);
89 if (RT_SUCCESS(rc))
90 {
91 rc = RTHeapSimpleInit(&g_HeapExec, pvMemory, cb);
92 if (RT_FAILURE(rc))
93 RTMemExecCleanup();
94 }
95 return rc;
96}
97#endif /* RTMEMALLOC_EXEC_HEAP */
98
99
100
101/**
102 * OS specific allocation function.
103 */
104PRTMEMHDR rtMemAlloc(size_t cb, uint32_t fFlags)
105{
106 /*
107 * Allocate.
108 */
109 PRTMEMHDR pHdr;
110 Assert(cb != sizeof(void *)); /* 99% of pointer sized allocations are wrong. */
111 if (fFlags & RTMEMHDR_FLAG_EXEC)
112 {
113#if defined(__AMD64__)
114# ifdef RTMEMALLOC_EXEC_HEAP
115 if (g_HeapExec != NIL_RTHEAPSIMPLE)
116 {
117 fFlags |= RTMEMHDR_FLAG_EXEC_HEAP;
118 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
119 RTSpinlockAcquireNoInts(g_HeapExecSpinlock, &SpinlockTmp);
120 pHdr = (PRTMEMHDR)RTHeapSimpleAlloc(g_HeapExec, cb + sizeof(*pHdr), 0);
121 RTSpinlockReleaseNoInts(g_HeapExecSpinlock, &SpinlockTmp);
122 }
123 else
124# endif
125 pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
126
127#elif defined(PAGE_KERNEL_EXEC) && defined(CONFIG_X86_PAE)
128 pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM,
129 __pgprot(cpu_has_pge ? _PAGE_KERNEL_EXEC | _PAGE_GLOBAL : _PAGE_KERNEL_EXEC));
130#else
131 pHdr = (PRTMEMHDR)vmalloc(cb + sizeof(*pHdr));
132#endif
133 }
134 else
135 {
136 if (cb <= PAGE_SIZE)
137 {
138 fFlags |= RTMEMHDR_FLAG_KMALLOC;
139 pHdr = kmalloc(cb + sizeof(*pHdr), GFP_KERNEL);
140 }
141 else
142 pHdr = vmalloc(cb + sizeof(*pHdr));
143 }
144
145 /*
146 * Initialize.
147 */
148 if (pHdr)
149 {
150 pHdr->u32Magic = RTMEMHDR_MAGIC;
151 pHdr->fFlags = fFlags;
152 pHdr->cb = cb;
153 pHdr->u32Padding= 0;
154 }
155 return pHdr;
156}
157
158
159/**
160 * OS specific free function.
161 */
162void rtMemFree(PRTMEMHDR pHdr)
163{
164 pHdr->u32Magic += 1;
165 if (pHdr->fFlags & RTMEMHDR_FLAG_KMALLOC)
166 kfree(pHdr);
167#ifdef RTMEMALLOC_EXEC_HEAP
168 else if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC_HEAP)
169 {
170 RTSpinlockAcquireNoInts(g_HeapExecSpinlock, &SpinlockTmp);
171 RTHeapSimpleFree(g_HeapExec, pHdr);
172 RTSpinlockReleaseNoInts(g_HeapExecSpinlock, &SpinlockTmp);
173 }
174#endif
175 else
176 vfree(pHdr);
177}
178
179
180/**
181 * Compute order. Some functions allocate 2^order pages.
182 *
183 * @returns order.
184 * @param cPages Number of pages.
185 */
186static int CalcPowerOf2Order(unsigned long cPages)
187{
188 int iOrder;
189 unsigned long cTmp;
190
191 for (iOrder = 0, cTmp = cPages; cTmp >>= 1; ++iOrder)
192 ;
193 if (cPages & ~(1 << iOrder))
194 ++iOrder;
195
196 return iOrder;
197}
198
199
200/**
201 * Allocates physical contiguous memory (below 4GB).
202 * The allocation is page aligned and the content is undefined.
203 *
204 * @returns Pointer to the memory block. This is page aligned.
205 * @param pPhys Where to store the physical address.
206 * @param cb The allocation size in bytes. This is always
207 * rounded up to PAGE_SIZE.
208 */
209RTR0DECL(void *) RTMemContAlloc(PRTCCPHYS pPhys, size_t cb)
210{
211 int cOrder;
212 unsigned cPages;
213 struct page *paPages;
214
215 /*
216 * validate input.
217 */
218 Assert(VALID_PTR(pPhys));
219 Assert(cb > 0);
220
221 /*
222 * Allocate page pointer array.
223 */
224 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
225 cPages = cb >> PAGE_SHIFT;
226 cOrder = CalcPowerOf2Order(cPages);
227#ifdef __AMD64__ /** @todo check out if there is a correct way of getting memory below 4GB (physically). */
228 paPages = alloc_pages(GFP_DMA, cOrder);
229#else
230 paPages = alloc_pages(GFP_USER, cOrder);
231#endif
232 if (paPages)
233 {
234 /*
235 * Reserve the pages and mark them executable.
236 */
237 unsigned iPage;
238 for (iPage = 0; iPage < cPages; iPage++)
239 {
240 Assert(!PageHighMem(&paPages[iPage]));
241 if (iPage + 1 < cPages)
242 {
243 AssertMsg( (uintptr_t)phys_to_virt(page_to_phys(&paPages[iPage])) + PAGE_SIZE
244 == (uintptr_t)phys_to_virt(page_to_phys(&paPages[iPage + 1]))
245 && page_to_phys(&paPages[iPage]) + PAGE_SIZE
246 == page_to_phys(&paPages[iPage + 1]),
247 ("iPage=%i cPages=%u [0]=%#llx,%p [1]=%#llx,%p\n", iPage, cPages,
248 (long long)page_to_phys(&paPages[iPage]), phys_to_virt(page_to_phys(&paPages[iPage])),
249 (long long)page_to_phys(&paPages[iPage + 1]), phys_to_virt(page_to_phys(&paPages[iPage + 1])) ));
250 }
251
252 SetPageReserved(&paPages[iPage]);
253 if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
254 MY_CHANGE_PAGE_ATTR(&paPages[iPage], 1, MY_PAGE_KERNEL_EXEC);
255 }
256 *pPhys = page_to_phys(paPages);
257 return phys_to_virt(page_to_phys(paPages));
258 }
259
260 return NULL;
261}
262
263
264/**
265 * Frees memory allocated ysing RTMemContAlloc().
266 *
267 * @param pv Pointer to return from RTMemContAlloc().
268 * @param cb The cb parameter passed to RTMemContAlloc().
269 */
270RTR0DECL(void) RTMemContFree(void *pv, size_t cb)
271{
272 if (pv)
273 {
274 int cOrder;
275 unsigned cPages;
276 unsigned iPage;
277 struct page *paPages;
278
279 /* validate */
280 AssertMsg(!((uintptr_t)pv & PAGE_OFFSET_MASK), ("pv=%p\n", pv));
281 Assert(cb > 0);
282
283 /* calc order and get pages */
284 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
285 cPages = cb >> PAGE_SHIFT;
286 cOrder = CalcPowerOf2Order(cPages);
287 paPages = virt_to_page(pv);
288
289 /*
290 * Restore page attributes freeing the pages.
291 */
292 for (iPage = 0; iPage < cPages; iPage++)
293 {
294 ClearPageReserved(&paPages[iPage]);
295 if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL))
296 MY_CHANGE_PAGE_ATTR(&paPages[iPage], 1, PAGE_KERNEL);
297 }
298 __free_pages(paPages, cOrder);
299 }
300}
301
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette