Changeset 40894 in vbox for trunk/src/VBox/Runtime/r0drv/linux
- Timestamp:
- Apr 12, 2012 2:40:45 PM (13 years ago)
- Location:
- trunk/src/VBox/Runtime/r0drv/linux
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
r40806 r40894 37 37 #include "r0drv/alloc-r0drv.h" 38 38 39 39 40 #if defined(RT_ARCH_AMD64) || defined(DOXYGEN_RUNNING) 41 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) 42 /** 43 * Starting with 2.6.23 we can use __get_vm_area and map_vm_area to allocate 44 * memory in the moduel range. This is preferrable to the exec heap below. 45 */ 46 # define RTMEMALLOC_EXEC_VM_AREA 47 # else 40 48 /** 41 49 * We need memory in the module range (~2GB to ~0) this can only be obtained … … 45 53 * Very annoying and it's going to restrict us! 46 54 */ 47 # define RTMEMALLOC_EXEC_HEAP 48 #endif 55 # define RTMEMALLOC_EXEC_HEAP 56 # endif 57 #endif 58 49 59 #ifdef RTMEMALLOC_EXEC_HEAP 50 60 # include <iprt/heap.h> … … 55 65 56 66 /******************************************************************************* 67 * Structures and Typedefs * 68 *******************************************************************************/ 69 #ifdef RTMEMALLOC_EXEC_VM_AREA 70 /** 71 * Extended header used for headers marked with RTMEMHDR_FLAG_EXEC_VM_AREA. 72 * 73 * This is used with allocating executable memory, for things like generated 74 * code and loaded modules. 75 */ 76 typedef struct RTMEMLNXHDREX 77 { 78 /** The VM area for this allocation. */ 79 struct vm_struct *pVmArea; 80 void *pvDummy; 81 /** The header we present to the generic API. */ 82 RTMEMHDR Hdr; 83 } RTMEMLNXHDREX; 84 AssertCompileSize(RTMEMLNXHDREX, 32); 85 /** Pointer to an extended memory header. */ 86 typedef RTMEMLNXHDREX *PRTMEMLNXHDREX; 87 #endif 88 89 90 /******************************************************************************* 57 91 * Global Variables * 58 92 *******************************************************************************/ 59 93 #ifdef RTMEMALLOC_EXEC_HEAP 60 61 # ifdef CONFIG_DEBUG_SET_MODULE_RONX62 # define RTMEMALLOC_EXEC_HEAP_VM_AREA 163 # endif64 94 /** The heap. */ 65 95 static RTHEAPSIMPLE g_HeapExec = NIL_RTHEAPSIMPLE; 66 96 /** Spinlock protecting the heap. */ 67 97 static RTSPINLOCK g_HeapExecSpinlock = NIL_RTSPINLOCK; 68 # ifdef RTMEMALLOC_EXEC_HEAP_VM_AREA69 static struct page **g_apPages;70 static void *g_pvHeap;71 static size_t g_cPages;72 # endif73 98 74 99 … … 79 104 DECLHIDDEN(void) rtR0MemExecCleanup(void) 80 105 { 81 # ifdef RTMEMALLOC_EXEC_HEAP_VM_AREA82 unsigned i;83 84 /* according to linux/drivers/lguest/core.c this function undoes85 * map_vm_area() as well as __get_vm_area(). */86 if (g_pvHeap)87 vunmap(g_pvHeap);88 for (i = 0; i < g_cPages; i++)89 __free_page(g_apPages[i]);90 kfree(g_apPages);91 # endif92 93 106 RTSpinlockDestroy(g_HeapExecSpinlock); 94 107 g_HeapExecSpinlock = NIL_RTSPINLOCK; … … 96 109 97 110 98 # ifndef RTMEMALLOC_EXEC_HEAP_VM_AREA99 111 /** 100 112 * Donate read+write+execute memory to the exec heap. … … 128 140 RT_EXPORT_SYMBOL(RTR0MemExecDonate); 129 141 130 # else /* !RTMEMALLOC_EXEC_HEAP_VM_AREA */ 131 132 /** 133 * RTR0MemExecDonate() does not work if CONFIG_DEBUG_SET_MODULE_RONX is enabled. 134 * In that case, allocate a VM area in the modules range and back it with kernel 135 * memory. Unfortunately __vmalloc_area() is not exported so we have to emulate 136 * it. 137 */ 138 RTR0DECL(int) RTR0MemExecInit(size_t cb) 139 { 140 int rc; 141 struct vm_struct *area; 142 size_t cPages; 143 size_t cbPages; 144 unsigned i; 145 struct page **ppPages; 146 147 AssertReturn(g_HeapExec == NIL_RTHEAPSIMPLE, VERR_WRONG_ORDER); 148 149 rc = RTSpinlockCreate(&g_HeapExecSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTR0MemExecInit"); 150 if (RT_SUCCESS(rc)) 151 { 152 cb = RT_ALIGN(cb, PAGE_SIZE); 153 area = __get_vm_area(cb, VM_ALLOC, MODULES_VADDR, MODULES_END); 154 if (!area) 142 #endif /* RTMEMALLOC_EXEC_HEAP */ 143 144 145 #ifdef RTMEMALLOC_EXEC_VM_AREA 146 /** 147 * Allocate executable kernel memory in the module range. 148 * 149 * @returns Pointer to a allocation header success. NULL on failure. 150 * 151 * @param cb The size the user requested. 152 */ 153 static PRTMEMHDR rtR0MemAllocExecVmArea(size_t cb) 154 { 155 size_t const cbAlloc = RT_ALIGN_Z(sizeof(RTMEMLNXHDREX) + cb, PAGE_SIZE); 156 size_t const cPages = cbAlloc >> PAGE_SHIFT; 157 struct page **papPages; 158 struct vm_struct *pVmArea; 159 size_t iPage; 160 161 pVmArea = __get_vm_area(cbAlloc, VM_ALLOC, MODULES_VADDR, MODULES_END); 162 if (!pVmArea) 163 return NULL; 164 pVmArea->nr_pages = 0; /* paranoia? */ 165 pVmArea->pages = NULL; /* paranoia? */ 166 167 papPages = (struct page **)kmalloc(cPages * sizeof(papPages[0]), GFP_KERNEL); 168 if (!papPages) 169 { 170 vunmap(pVmArea->addr); 171 return NULL; 172 } 173 174 for (iPage = 0; iPage < cPages; iPage++) 175 { 176 papPages[iPage] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 177 if (!papPages[iPage]) 178 break; 179 } 180 if (iPage == cPages) 181 { 182 /* 183 * Map the pages. The API requires an iterator argument, which can be 184 * used, in case of failure, to figure out how much was actually 185 * mapped. Not sure how useful this really is, but whatever. 186 * 187 * Not entirely sure we really need to set nr_pages and pages here, but 188 * they provide a very convenient place for storing something we need 189 * in the free function, if nothing else... 190 */ 191 struct page **papPagesIterator = papPages; 192 pVmArea->nr_pages = cPages; 193 pVmArea->pages = papPages; 194 if (!map_vm_area(pVmArea, PAGE_KERNEL_EXEC, &papPagesIterator)) 155 195 { 156 rtR0MemExecCleanup(); 157 return VERR_NO_MEMORY; 196 PRTMEMLNXHDREX pHdrEx = (PRTMEMLNXHDREX)pVmArea->addr; 197 pHdrEx->pVmArea = pVmArea; 198 pHdrEx->pvDummy = NULL; 199 return &pHdrEx->Hdr; 158 200 } 159 g_pvHeap = area->addr; 160 cPages = cb >> PAGE_SHIFT; 161 area->nr_pages = 0; 162 cbPages = cPages * sizeof(struct page *); 163 g_apPages = kmalloc(cbPages, GFP_KERNEL); 164 area->pages = g_apPages; 165 if (!g_apPages) 166 { 167 rtR0MemExecCleanup(); 168 return VERR_NO_MEMORY; 169 } 170 memset(area->pages, 0, cbPages); 171 for (i = 0; i < cPages; i++) 172 { 173 g_apPages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 174 if (!g_apPages[i]) 175 { 176 area->nr_pages = i; 177 g_cPages = i; 178 rtR0MemExecCleanup(); 179 return VERR_NO_MEMORY; 180 } 181 } 182 area->nr_pages = cPages; 183 g_cPages = i; 184 ppPages = g_apPages; 185 if (map_vm_area(area, PAGE_KERNEL_EXEC, &ppPages)) 186 { 187 rtR0MemExecCleanup(); 188 return VERR_NO_MEMORY; 189 } 190 191 rc = RTHeapSimpleInit(&g_HeapExec, g_pvHeap, cb); 192 if (RT_FAILURE(rc)) 193 rtR0MemExecCleanup(); 194 } 195 return rc; 196 } 197 RT_EXPORT_SYMBOL(RTR0MemExecInit); 198 # endif /* RTMEMALLOC_EXEC_HEAP_VM_AREA */ 199 #endif /* RTMEMALLOC_EXEC_HEAP */ 200 201 202 /* bail out */ 203 pVmArea->nr_pages = papPagesIterator - papPages; 204 } 205 206 vunmap(pVmArea->addr); 207 208 while (iPage-- > 0) 209 __free_page(papPages[iPage]); 210 kfree(papPages); 211 212 return NULL; 213 } 214 #endif /* RTMEMALLOC_EXEC_VM_AREA */ 201 215 202 216 … … 227 241 else 228 242 pHdr = NULL; 243 244 # elif defined(RTMEMALLOC_EXEC_VM_AREA) 245 pHdr = rtR0MemAllocExecVmArea(cb); 246 fFlags |= RTMEMHDR_FLAG_EXEC_VM_AREA; 247 229 248 # else /* !RTMEMALLOC_EXEC_HEAP */ 249 # error "you don not want to go here..." 230 250 pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM, MY_PAGE_KERNEL_EXEC); 231 251 # endif /* !RTMEMALLOC_EXEC_HEAP */ … … 294 314 } 295 315 #endif 316 #ifdef RTMEMALLOC_EXEC_VM_AREA 317 else if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC_VM_AREA) 318 { 319 PRTMEMLNXHDREX pHdrEx = RT_FROM_MEMBER(pHdr, RTMEMLNXHDREX, Hdr); 320 size_t iPage = pHdrEx->pVmArea->nr_pages; 321 struct page **papPages = pHdrEx->pVmArea->pages; 322 void *pvMapping = pHdrEx->pVmArea->addr; 323 324 vunmap(pvMapping); 325 326 while (iPage-- > 0) 327 __free_page(papPages[iPage]); 328 kfree(papPages); 329 } 330 #endif 296 331 else 297 332 vfree(pHdr); 298 333 } 334 299 335 300 336 … … 393 429 394 430 /** 395 * Frees memory allocated ysing RTMemContAlloc().431 * Frees memory allocated using RTMemContAlloc(). 396 432 * 397 433 * @param pv Pointer to return from RTMemContAlloc(). -
trunk/src/VBox/Runtime/r0drv/linux/initterm-r0drv-linux.c
r39013 r40894 50 50 * Internal Functions * 51 51 *******************************************************************************/ 52 #if def RT_ARCH_AMD6452 #if defined(RT_ARCH_AMD64) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23) 53 53 /* in alloc-r0drv0-linux.c */ 54 54 DECLHIDDEN(void) rtR0MemExecCleanup(void); … … 115 115 #endif 116 116 117 #if def RT_ARCH_AMD64117 #if defined(RT_ARCH_AMD64) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23) 118 118 rtR0MemExecCleanup(); 119 119 #endif
Note:
See TracChangeset
for help on using the changeset viewer.