Changeset 97910 in vbox for trunk/src/VBox/Runtime/r0drv/linux
- Timestamp:
- Dec 29, 2022 7:15:56 PM (2 years ago)
- Location:
- trunk/src/VBox/Runtime/r0drv/linux
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c
r97905 r97910 47 47 #include "r0drv/alloc-r0drv.h" 48 48 49 50 #if (defined(RT_ARCH_AMD64) || defined(DOXYGEN_RUNNING)) && !defined(RTMEMALLOC_EXEC_HEAP)51 # if RTLNX_VER_MIN(2,6,23) && RTLNX_VER_MAX(5,8,0) && !RTLNX_RHEL_MAJ_PREREQ(8,5)52 /**53 * Starting with 2.6.23 we can use __get_vm_area and map_vm_area to allocate54 * memory in the moduel range. This is preferrable to the exec heap below.55 */56 # define RTMEMALLOC_EXEC_VM_AREA57 # else58 /**59 * We need memory in the module range (~2GB to ~0) this can only be obtained60 * thru APIs that are not exported (see module_alloc()).61 *62 * So, we'll have to create a quick and dirty heap here using BSS memory.63 * Very annoying and it's going to restrict us!64 */65 # define RTMEMALLOC_EXEC_HEAP66 # endif67 #endif68 69 #ifdef RTMEMALLOC_EXEC_HEAP70 # include <iprt/heap.h>71 # include <iprt/spinlock.h>72 # include <iprt/errcore.h>73 #endif74 75 49 #include "internal/initterm.h" 76 50 77 78 /*********************************************************************************************************************************79 * Structures and Typedefs *80 *********************************************************************************************************************************/81 #ifdef RTMEMALLOC_EXEC_VM_AREA82 /**83 * Extended header used for headers marked with RTMEMHDR_FLAG_EXEC_VM_AREA.84 *85 * This is used with allocating executable memory, for things like generated86 * code and loaded modules.87 */88 typedef struct RTMEMLNXHDREX89 {90 /** The VM area for this allocation. */91 struct vm_struct *pVmArea;92 void *pvDummy;93 /** The header we present to the generic API. */94 RTMEMHDR Hdr;95 } RTMEMLNXHDREX;96 AssertCompileSize(RTMEMLNXHDREX, 32);97 /** Pointer to an extended memory header. */98 typedef RTMEMLNXHDREX *PRTMEMLNXHDREX;99 #endif100 101 102 /*********************************************************************************************************************************103 * Global Variables *104 *********************************************************************************************************************************/105 #ifdef RTMEMALLOC_EXEC_HEAP106 /** The heap. */107 static RTHEAPSIMPLE g_HeapExec = NIL_RTHEAPSIMPLE;108 /** Spinlock protecting the heap. */109 static RTSPINLOCK g_HeapExecSpinlock = NIL_RTSPINLOCK;110 #endif111 112 113 /**114 * API for cleaning up the heap spinlock on IPRT termination.115 * This is as RTMemExecDonate specific to AMD64 Linux/GNU.116 */117 DECLHIDDEN(void) rtR0MemExecCleanup(void)118 {119 #ifdef RTMEMALLOC_EXEC_HEAP120 RTSpinlockDestroy(g_HeapExecSpinlock);121 g_HeapExecSpinlock = NIL_RTSPINLOCK;122 #endif123 }124 125 126 #ifdef RTMEMALLOC_EXEC_VM_AREA127 /**128 * Allocate executable kernel memory in the module range.129 *130 * @returns Pointer to a allocation header success. NULL on failure.131 *132 * @param cb The size the user requested.133 */134 static PRTMEMHDR rtR0MemAllocExecVmArea(size_t cb)135 {136 size_t const cbAlloc = RT_ALIGN_Z(sizeof(RTMEMLNXHDREX) + cb, PAGE_SIZE);137 size_t const cPages = cbAlloc >> PAGE_SHIFT;138 struct page **papPages;139 struct vm_struct *pVmArea;140 size_t iPage;141 142 pVmArea = __get_vm_area(cbAlloc, VM_ALLOC, MODULES_VADDR, MODULES_END);143 if (!pVmArea)144 return NULL;145 pVmArea->nr_pages = 0; /* paranoia? */146 pVmArea->pages = NULL; /* paranoia? */147 148 papPages = (struct page **)kmalloc(cPages * sizeof(papPages[0]), GFP_KERNEL | __GFP_NOWARN);149 if (!papPages)150 {151 vunmap(pVmArea->addr);152 return NULL;153 }154 155 for (iPage = 0; iPage < cPages; iPage++)156 {157 papPages[iPage] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN);158 if (!papPages[iPage])159 break;160 }161 if (iPage == cPages)162 {163 /*164 * Map the pages.165 *166 * Not entirely sure we really need to set nr_pages and pages here, but167 * they provide a very convenient place for storing something we need168 * in the free function, if nothing else...169 */170 # if RTLNX_VER_MAX(3,17,0)171 struct page **papPagesIterator = papPages;172 # endif173 pVmArea->nr_pages = cPages;174 pVmArea->pages = papPages;175 if (!map_vm_area(pVmArea, PAGE_KERNEL_EXEC,176 # if RTLNX_VER_MAX(3,17,0)177 &papPagesIterator178 # else179 papPages180 # endif181 ))182 {183 PRTMEMLNXHDREX pHdrEx = (PRTMEMLNXHDREX)pVmArea->addr;184 pHdrEx->pVmArea = pVmArea;185 pHdrEx->pvDummy = NULL;186 return &pHdrEx->Hdr;187 }188 /* bail out */189 # if RTLNX_VER_MAX(3,17,0)190 pVmArea->nr_pages = papPagesIterator - papPages;191 # endif192 }193 194 vunmap(pVmArea->addr);195 196 while (iPage-- > 0)197 __free_page(papPages[iPage]);198 kfree(papPages);199 200 return NULL;201 }202 #endif /* RTMEMALLOC_EXEC_VM_AREA */203 51 204 52 … … 214 62 * Allocate. 215 63 */ 216 if (fFlags & RTMEMHDR_FLAG_EXEC) 217 { 218 if (fFlags & RTMEMHDR_FLAG_ANY_CTX) 219 return VERR_NOT_SUPPORTED; 220 221 #if defined(RT_ARCH_AMD64) 222 # ifdef RTMEMALLOC_EXEC_HEAP 223 if (g_HeapExec != NIL_RTHEAPSIMPLE) 64 if ( 65 #if 1 /* vmalloc has serious performance issues, avoid it. */ 66 cb <= PAGE_SIZE*16 - sizeof(*pHdr) 67 #else 68 cb <= PAGE_SIZE 69 #endif 70 || (fFlags & RTMEMHDR_FLAG_ANY_CTX) 71 ) 72 { 73 fFlags |= RTMEMHDR_FLAG_KMALLOC; 74 pHdr = kmalloc(cb + sizeof(*pHdr), 75 fFlags & RTMEMHDR_FLAG_ANY_CTX_ALLOC ? GFP_ATOMIC | __GFP_NOWARN : GFP_KERNEL | __GFP_NOWARN); 76 if (RT_UNLIKELY( !pHdr 77 && cb > PAGE_SIZE 78 && !(fFlags & RTMEMHDR_FLAG_ANY_CTX) )) 224 79 { 225 RTSpinlockAcquire(g_HeapExecSpinlock); 226 pHdr = (PRTMEMHDR)RTHeapSimpleAlloc(g_HeapExec, cb + sizeof(*pHdr), 0); 227 RTSpinlockRelease(g_HeapExecSpinlock); 228 fFlags |= RTMEMHDR_FLAG_EXEC_HEAP; 80 fFlags &= ~RTMEMHDR_FLAG_KMALLOC; 81 pHdr = vmalloc(cb + sizeof(*pHdr)); 229 82 } 230 else231 pHdr = NULL;232 233 # elif defined(RTMEMALLOC_EXEC_VM_AREA)234 pHdr = rtR0MemAllocExecVmArea(cb);235 fFlags |= RTMEMHDR_FLAG_EXEC_VM_AREA;236 237 # else /* !RTMEMALLOC_EXEC_HEAP && !RTMEMALLOC_EXEC_VM_AREA */238 # error "you do not want to go here..."239 pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC);240 # endif /* !RTMEMALLOC_EXEC_HEAP && !RTMEMALLOC_EXEC_VM_AREA */241 242 #elif defined(PAGE_KERNEL_EXEC) && defined(CONFIG_X86_PAE)243 # if RTLNX_VER_MIN(5,8,0)244 AssertMsgFailed(("This point should not be reached, please file a bug\n"));245 pHdr = NULL;246 # else247 pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, MY_PAGE_KERNEL_EXEC);248 # endif249 #else250 pHdr = (PRTMEMHDR)vmalloc(cb + sizeof(*pHdr));251 #endif252 83 } 253 84 else 254 { 255 if ( 256 #if 1 /* vmalloc has serious performance issues, avoid it. */ 257 cb <= PAGE_SIZE*16 - sizeof(*pHdr) 258 #else 259 cb <= PAGE_SIZE 260 #endif 261 || (fFlags & RTMEMHDR_FLAG_ANY_CTX) 262 ) 263 { 264 fFlags |= RTMEMHDR_FLAG_KMALLOC; 265 pHdr = kmalloc(cb + sizeof(*pHdr), 266 fFlags & RTMEMHDR_FLAG_ANY_CTX_ALLOC ? GFP_ATOMIC | __GFP_NOWARN : GFP_KERNEL | __GFP_NOWARN); 267 if (RT_UNLIKELY( !pHdr 268 && cb > PAGE_SIZE 269 && !(fFlags & RTMEMHDR_FLAG_ANY_CTX) )) 270 { 271 fFlags &= ~RTMEMHDR_FLAG_KMALLOC; 272 pHdr = vmalloc(cb + sizeof(*pHdr)); 273 } 274 } 275 else 276 pHdr = vmalloc(cb + sizeof(*pHdr)); 277 } 278 if (RT_UNLIKELY(!pHdr)) 279 { 85 pHdr = vmalloc(cb + sizeof(*pHdr)); 86 if (RT_LIKELY(pHdr)) 87 { 88 /* 89 * Initialize. 90 */ 91 pHdr->u32Magic = RTMEMHDR_MAGIC; 92 pHdr->fFlags = fFlags; 93 pHdr->cb = cb; 94 pHdr->cbReq = cb; 95 96 *ppHdr = pHdr; 280 97 IPRT_LINUX_RESTORE_EFL_AC(); 281 return VERR_NO_MEMORY; 282 } 283 284 /* 285 * Initialize. 286 */ 287 pHdr->u32Magic = RTMEMHDR_MAGIC; 288 pHdr->fFlags = fFlags; 289 pHdr->cb = cb; 290 pHdr->cbReq = cb; 291 292 *ppHdr = pHdr; 98 return VINF_SUCCESS; 99 } 100 293 101 IPRT_LINUX_RESTORE_EFL_AC(); 294 return V INF_SUCCESS;102 return VERR_NO_MEMORY; 295 103 } 296 104 … … 306 114 if (pHdr->fFlags & RTMEMHDR_FLAG_KMALLOC) 307 115 kfree(pHdr); 308 #ifdef RTMEMALLOC_EXEC_HEAP309 else if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC_HEAP)310 {311 RTSpinlockAcquire(g_HeapExecSpinlock);312 RTHeapSimpleFree(g_HeapExec, pHdr);313 RTSpinlockRelease(g_HeapExecSpinlock);314 }315 #endif316 #ifdef RTMEMALLOC_EXEC_VM_AREA317 else if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC_VM_AREA)318 {319 PRTMEMLNXHDREX pHdrEx = RT_FROM_MEMBER(pHdr, RTMEMLNXHDREX, Hdr);320 size_t iPage = pHdrEx->pVmArea->nr_pages;321 struct page **papPages = pHdrEx->pVmArea->pages;322 void *pvMapping = pHdrEx->pVmArea->addr;323 324 vunmap(pvMapping);325 326 while (iPage-- > 0)327 __free_page(papPages[iPage]);328 kfree(papPages);329 }330 #endif331 116 else 332 117 vfree(pHdr); -
trunk/src/VBox/Runtime/r0drv/linux/initterm-r0drv-linux.c
r96407 r97910 134 134 #endif 135 135 136 rtR0MemExecCleanup();137 138 136 IPRT_LINUX_RESTORE_EFL_AC(); 139 137 }
Note:
See TracChangeset
for help on using the changeset viewer.