VirtualBox

Ignore:
Timestamp:
Apr 12, 2012 2:40:45 PM (13 years ago)
Author:
vboxsync
Message:

IPRT/SUPDrv: Don't create a fixed sized heap if we don't have to, use get_vm_area and friends for each request instead.

Location:
trunk/src/VBox/Runtime/r0drv/linux
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r0drv/linux/alloc-r0drv-linux.c

    r40806 r40894  
    3737#include "r0drv/alloc-r0drv.h"
    3838
     39
    3940#if defined(RT_ARCH_AMD64) || defined(DOXYGEN_RUNNING)
     41# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
     42/**
     43 * Starting with 2.6.23 we can use __get_vm_area and map_vm_area to allocate
     44 * memory in the moduel range.  This is preferrable to the exec heap below.
     45 */
     46#  define RTMEMALLOC_EXEC_VM_AREA
     47# else
    4048/**
    4149 * We need memory in the module range (~2GB to ~0) this can only be obtained
     
    4553 * Very annoying and it's going to restrict us!
    4654 */
    47 # define RTMEMALLOC_EXEC_HEAP
    48 #endif
     55#  define RTMEMALLOC_EXEC_HEAP
     56# endif
     57#endif
     58
    4959#ifdef RTMEMALLOC_EXEC_HEAP
    5060# include <iprt/heap.h>
     
    5565
    5666/*******************************************************************************
     67*   Structures and Typedefs                                                    *
     68*******************************************************************************/
     69#ifdef RTMEMALLOC_EXEC_VM_AREA
     70/**
     71 * Extended header used for headers marked with RTMEMHDR_FLAG_EXEC_VM_AREA.
     72 *
     73 * This is used with allocating executable memory, for things like generated
     74 * code and loaded modules.
     75 */
     76typedef struct RTMEMLNXHDREX
     77{
     78    /** The VM area for this allocation. */
     79    struct vm_struct   *pVmArea;
     80    void               *pvDummy;
     81    /** The header we present to the generic API. */
     82    RTMEMHDR            Hdr;
     83} RTMEMLNXHDREX;
     84AssertCompileSize(RTMEMLNXHDREX, 32);
     85/** Pointer to an extended memory header. */
     86typedef RTMEMLNXHDREX *PRTMEMLNXHDREX;
     87#endif
     88
     89
     90/*******************************************************************************
    5791*   Global Variables                                                           *
    5892*******************************************************************************/
    5993#ifdef RTMEMALLOC_EXEC_HEAP
    60 
    61 # ifdef CONFIG_DEBUG_SET_MODULE_RONX
    62 #  define RTMEMALLOC_EXEC_HEAP_VM_AREA  1
    63 # endif
    6494/** The heap. */
    6595static RTHEAPSIMPLE g_HeapExec = NIL_RTHEAPSIMPLE;
    6696/** Spinlock protecting the heap. */
    6797static RTSPINLOCK   g_HeapExecSpinlock = NIL_RTSPINLOCK;
    68 # ifdef RTMEMALLOC_EXEC_HEAP_VM_AREA
    69 static struct page **g_apPages;
    70 static void *g_pvHeap;
    71 static size_t g_cPages;
    72 # endif
    7398
    7499
     
    79104DECLHIDDEN(void) rtR0MemExecCleanup(void)
    80105{
    81 # ifdef RTMEMALLOC_EXEC_HEAP_VM_AREA
    82     unsigned i;
    83 
    84     /* according to linux/drivers/lguest/core.c this function undoes
    85      * map_vm_area() as well as __get_vm_area(). */
    86     if (g_pvHeap)
    87         vunmap(g_pvHeap);
    88     for (i = 0; i < g_cPages; i++)
    89         __free_page(g_apPages[i]);
    90     kfree(g_apPages);
    91 # endif
    92 
    93106    RTSpinlockDestroy(g_HeapExecSpinlock);
    94107    g_HeapExecSpinlock = NIL_RTSPINLOCK;
     
    96109
    97110
    98 # ifndef RTMEMALLOC_EXEC_HEAP_VM_AREA
    99111/**
    100112 * Donate read+write+execute memory to the exec heap.
     
    128140RT_EXPORT_SYMBOL(RTR0MemExecDonate);
    129141
    130 # else /* !RTMEMALLOC_EXEC_HEAP_VM_AREA */
    131 
    132 /**
    133  * RTR0MemExecDonate() does not work if CONFIG_DEBUG_SET_MODULE_RONX is enabled.
    134  * In that case, allocate a VM area in the modules range and back it with kernel
    135  * memory. Unfortunately __vmalloc_area() is not exported so we have to emulate
    136  * it.
    137  */
    138 RTR0DECL(int) RTR0MemExecInit(size_t cb)
    139 {
    140     int rc;
    141     struct vm_struct *area;
    142     size_t cPages;
    143     size_t cbPages;
    144     unsigned i;
    145     struct page **ppPages;
    146 
    147     AssertReturn(g_HeapExec == NIL_RTHEAPSIMPLE, VERR_WRONG_ORDER);
    148 
    149     rc = RTSpinlockCreate(&g_HeapExecSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTR0MemExecInit");
    150     if (RT_SUCCESS(rc))
    151     {
    152         cb = RT_ALIGN(cb, PAGE_SIZE);
    153         area = __get_vm_area(cb, VM_ALLOC, MODULES_VADDR, MODULES_END);
    154         if (!area)
     142#endif /* RTMEMALLOC_EXEC_HEAP */
     143
     144
     145#ifdef RTMEMALLOC_EXEC_VM_AREA
     146/**
     147 * Allocate executable kernel memory in the module range.
     148 *
     149 * @returns Pointer to a allocation header success.  NULL on failure.
     150 *
     151 * @param   cb          The size the user requested.
     152 */
     153static PRTMEMHDR rtR0MemAllocExecVmArea(size_t cb)
     154{
     155    size_t const        cbAlloc = RT_ALIGN_Z(sizeof(RTMEMLNXHDREX) + cb, PAGE_SIZE);
     156    size_t const        cPages  = cbAlloc >> PAGE_SHIFT;
     157    struct page       **papPages;
     158    struct vm_struct   *pVmArea;
     159    size_t              iPage;
     160
     161    pVmArea = __get_vm_area(cbAlloc, VM_ALLOC, MODULES_VADDR, MODULES_END);
     162    if (!pVmArea)
     163        return NULL;
     164    pVmArea->nr_pages = 0;    /* paranoia? */
     165    pVmArea->pages    = NULL; /* paranoia? */
     166
     167    papPages = (struct page **)kmalloc(cPages * sizeof(papPages[0]), GFP_KERNEL);
     168    if (!papPages)
     169    {
     170        vunmap(pVmArea->addr);
     171        return NULL;
     172    }
     173
     174    for (iPage = 0; iPage < cPages; iPage++)
     175    {
     176        papPages[iPage] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
     177        if (!papPages[iPage])
     178            break;
     179    }
     180    if (iPage == cPages)
     181    {
     182        /*
     183         * Map the pages.  The API requires an iterator argument, which can be
     184         * used, in case of failure, to figure out how much was actually
     185         * mapped.  Not sure how useful this really is, but whatever.
     186         *
     187         * Not entirely sure we really need to set nr_pages and pages here, but
     188         * they provide a very convenient place for storing something we need
     189         * in the free function, if nothing else...
     190         */
     191        struct page **papPagesIterator = papPages;
     192        pVmArea->nr_pages = cPages;
     193        pVmArea->pages    = papPages;
     194        if (!map_vm_area(pVmArea, PAGE_KERNEL_EXEC, &papPagesIterator))
    155195        {
    156             rtR0MemExecCleanup();
    157             return VERR_NO_MEMORY;
     196            PRTMEMLNXHDREX pHdrEx = (PRTMEMLNXHDREX)pVmArea->addr;
     197            pHdrEx->pVmArea     = pVmArea;
     198            pHdrEx->pvDummy     = NULL;
     199            return &pHdrEx->Hdr;
    158200        }
    159         g_pvHeap = area->addr;
    160         cPages = cb >> PAGE_SHIFT;
    161         area->nr_pages = 0;
    162         cbPages = cPages * sizeof(struct page *);
    163         g_apPages = kmalloc(cbPages, GFP_KERNEL);
    164         area->pages = g_apPages;
    165         if (!g_apPages)
    166         {
    167             rtR0MemExecCleanup();
    168             return VERR_NO_MEMORY;
    169         }
    170         memset(area->pages, 0, cbPages);
    171         for (i = 0; i < cPages; i++)
    172         {
    173             g_apPages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
    174             if (!g_apPages[i])
    175             {
    176                 area->nr_pages = i;
    177                 g_cPages = i;
    178                 rtR0MemExecCleanup();
    179                 return VERR_NO_MEMORY;
    180             }
    181         }
    182         area->nr_pages = cPages;
    183         g_cPages = i;
    184         ppPages = g_apPages;
    185         if (map_vm_area(area, PAGE_KERNEL_EXEC, &ppPages))
    186         {
    187             rtR0MemExecCleanup();
    188             return VERR_NO_MEMORY;
    189         }
    190 
    191         rc = RTHeapSimpleInit(&g_HeapExec, g_pvHeap, cb);
    192         if (RT_FAILURE(rc))
    193             rtR0MemExecCleanup();
    194     }
    195     return rc;
    196 }
    197 RT_EXPORT_SYMBOL(RTR0MemExecInit);
    198 # endif /* RTMEMALLOC_EXEC_HEAP_VM_AREA */
    199 #endif /* RTMEMALLOC_EXEC_HEAP */
    200 
     201
     202        /* bail out */
     203        pVmArea->nr_pages = papPagesIterator - papPages;
     204    }
     205
     206    vunmap(pVmArea->addr);
     207
     208    while (iPage-- > 0)
     209        __free_page(papPages[iPage]);
     210    kfree(papPages);
     211
     212    return NULL;
     213}
     214#endif /* RTMEMALLOC_EXEC_VM_AREA */
    201215
    202216
     
    227241        else
    228242            pHdr = NULL;
     243
     244# elif defined(RTMEMALLOC_EXEC_VM_AREA)
     245        pHdr = rtR0MemAllocExecVmArea(cb);
     246        fFlags |= RTMEMHDR_FLAG_EXEC_VM_AREA;
     247
    229248# else  /* !RTMEMALLOC_EXEC_HEAP */
     249# error "you don not want to go here..."
    230250        pHdr = (PRTMEMHDR)__vmalloc(cb + sizeof(*pHdr), GFP_KERNEL | __GFP_HIGHMEM, MY_PAGE_KERNEL_EXEC);
    231251# endif /* !RTMEMALLOC_EXEC_HEAP */
     
    294314    }
    295315#endif
     316#ifdef RTMEMALLOC_EXEC_VM_AREA
     317    else if (pHdr->fFlags & RTMEMHDR_FLAG_EXEC_VM_AREA)
     318    {
     319        PRTMEMLNXHDREX pHdrEx    = RT_FROM_MEMBER(pHdr, RTMEMLNXHDREX, Hdr);
     320        size_t         iPage     = pHdrEx->pVmArea->nr_pages;
     321        struct page  **papPages  = pHdrEx->pVmArea->pages;
     322        void          *pvMapping = pHdrEx->pVmArea->addr;
     323
     324        vunmap(pvMapping);
     325
     326        while (iPage-- > 0)
     327            __free_page(papPages[iPage]);
     328        kfree(papPages);
     329    }
     330#endif
    296331    else
    297332        vfree(pHdr);
    298333}
     334
    299335
    300336
     
    393429
    394430/**
    395  * Frees memory allocated ysing RTMemContAlloc().
     431 * Frees memory allocated using RTMemContAlloc().
    396432 *
    397433 * @param   pv      Pointer to return from RTMemContAlloc().
  • trunk/src/VBox/Runtime/r0drv/linux/initterm-r0drv-linux.c

    r39013 r40894  
    5050*   Internal Functions                                                         *
    5151*******************************************************************************/
    52 #ifdef RT_ARCH_AMD64
     52#if defined(RT_ARCH_AMD64) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
    5353/* in alloc-r0drv0-linux.c */
    5454DECLHIDDEN(void) rtR0MemExecCleanup(void);
     
    115115#endif
    116116
    117 #ifdef RT_ARCH_AMD64
     117#if defined(RT_ARCH_AMD64) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
    118118    rtR0MemExecCleanup();
    119119#endif
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette