VirtualBox

Changeset 39697 in vbox for trunk/src/VBox/Runtime


Ignore:
Timestamp:
Jan 3, 2012 10:21:26 PM (13 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
75542
Message:

Runtime/r0drv: get the physical address of Linux kernel kmap mappings too.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c

    r36555 r39697  
    741741    }
    742742    return rc;
     743}
     744
     745
     746/**
     747 * Translates a kernel virtual address to a linux page structure by walking the
     748 * page tables.
     749 *
     750 * @note    We do assume that the page tables will not change as we are walking
     751 *          them.  This assumption is rather forced by the fact that I could not
     752 *          immediately see any way of preventing this from happening.  So, we
     753 *          take some extra care when accessing them.
     754 *
     755 *          Because of this, we don't want to use this function on memory where
     756 *          attribute changes to nearby pages is likely to cause large pages to
     757 *          be used or split up. So, don't use this for the linear mapping of
     758 *          physical memory.
     759 *
     760 * @returns Pointer to the page structur or NULL if it could not be found.
     761 * @param   pv      The kernel virtual address.
     762 */
     763static struct page *rtR0MemObjLinuxVirtToPage(void *pv)
     764{
     765    unsigned long   ulAddr = (unsigned long)pv;
     766    unsigned long   pfn;
     767    struct page    *pPage;
     768    pte_t          *pEntry;
     769    union
     770    {
     771        pgd_t       Global;
     772#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
     773        pud_t       Upper;
     774#endif
     775        pmd_t       Middle;
     776        pte_t       Entry;
     777    } u;
     778
     779    /* Should this happen in a situation this code will be called in?  And if
     780     * so, can it change under our feet?  See also
     781     * "Documentation/vm/active_mm.txt" in the kernel sources. */
     782    if (RT_UNLIKELY(!current->active_mm))
     783        return NULL;
     784    u.Global = *pgd_offset(current->active_mm, ulAddr);
     785    if (RT_UNLIKELY(pgd_none(u.Global)))
     786        return NULL;
     787
     788#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
     789    u.Upper = *pud_offset(&u.Global, ulAddr);
     790    if (RT_UNLIKELY(pud_none(u.Upper)))
     791        return NULL;
     792# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
     793    if (pud_large(u.Upper))
     794    {
     795        pPage = pud_page(u.Upper);
     796        AssertReturn(pPage, NULL);
     797        pfn  = page_to_pfn(pPage);      /* doing the safe way... */
     798        pfn += (ulAddr >> PAGE_SHIFT) & ((UINT32_C(1) << (PUD_SHIFT - PAGE_SHIFT)) - 1);
     799        return pfn_to_page(pfn);
     800    }
     801# endif
     802
     803    u.Middle = *pmd_offset(&u.Upper, ulAddr);
     804#else  /* < 2.6.11 */
     805    u.Middle = *pmd_offset(&u.Global, ulAddr);
     806#endif /* < 2.6.11 */
     807    if (RT_UNLIKELY(pmd_none(u.Middle)))
     808        return NULL;
     809#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
     810    if (pmd_large(u.Middle))
     811    {
     812        pPage = pmd_page(u.Middle);
     813        AssertReturn(pPage, NULL);
     814        pfn  = page_to_pfn(pPage);      /* doing the safe way... */
     815        pfn += (ulAddr >> PAGE_SHIFT) & ((UINT32_C(1) << (PMD_SHIFT - PAGE_SHIFT)) - 1);
     816        return pfn_to_page(pfn);
     817    }
     818#endif
     819
     820#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5)
     821    pEntry = pte_offset_map(&u.Middle, ulAddr);
     822#else
     823    pEntry = pte_offset(&u.Middle, ulAddr);
     824#endif
     825    if (RT_UNLIKELY(!pEntry))
     826        return NULL;
     827    u.Entry = *pEntry;
     828    pte_unmap(pEntry);
     829
     830    if (RT_UNLIKELY(!pte_present(u.Entry)))
     831        return NULL;
     832    return pte_page(u.Entry);
    743833}
    744834
     
    887977    NOREF(fAccess);
    888978
    889     /*
    890      * Classify the memory and check that we can deal with it.
    891      */
    892 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    893     fLinearMapping = virt_addr_valid(pvLast) && virt_addr_valid(pv);
    894 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0)
    895     fLinearMapping = VALID_PAGE(virt_to_page(pvLast)) && VALID_PAGE(virt_to_page(pv));
     979    if (   !RTR0MemKernelIsValidAddr(pv)
     980        || !RTR0MemKernelIsValidAddr(pv + cb))
     981        return VERR_INVALID_PARAMETER;
     982
     983    /*
     984     * The lower part of the kernel memory has a linear mapping between
     985     * physical and virtual addresses. So we take a short cut here.  This is
     986     * assumed to be the cleanest way to handle those addresses (and the code
     987     * is well tested, though the test for determining it is not very nice).
     988     * If we ever decide it isn't we can still remove it.
     989     */
     990#if 0
     991    fLinearMapping = (unsigned long)pvLast < VMALLOC_START;
    896992#else
    897 # error "not supported"
    898 #endif
    899     /*
    900      * kmap()'ed memory. Only relevant for 32-bit Linux kernels with HIGHMEM
    901      * enabled. Unfortunately there is no easy way to retrieve the page object
    902      * for such temporarily mapped memory, virt_to_page() does not work here.
    903      * There is even no function to check if a virtual address is inside the
    904      * kmap() area or not :-( kmap_atomic_to_page() looks promising but the test
    905      * 'if (vaddr < FIXADDR_START)' if wrong -- the kmap() area is located
    906      * below the fixmap area. vmalloc_to_page() would work but is only allowed
    907      * for vmalloc'ed memory.
    908      */
    909 #ifdef CONFIG_HIGHMEM
    910     if (pv < PKMAP_BASE + LAST_PKMAP*PAGE_SIZE && pvLast >= PKMAP_BASE)
    911         return VERR_INVALID_PARAMETER;
    912 #endif
    913     if (!fLinearMapping)
    914     {
    915 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19)
    916         if (   !RTR0MemKernelIsValidAddr(pv)
    917             || !RTR0MemKernelIsValidAddr(pv + cb))
    918 #endif
    919             return VERR_INVALID_PARAMETER;
    920     }
     993    fLinearMapping = (unsigned long)pv     >= (unsigned long)__va(0)
     994                  && (unsigned long)pvLast <  (unsigned long)high_memory;
     995#endif
    921996
    922997    /*
     
    9291004    /*
    9301005     * Gather the pages.
    931      * We ASSUME all kernel pages are non-swappable.
     1006     * We ASSUME all kernel pages are non-swappable and non-movable.
    9321007     */
    9331008    rc     = VINF_SUCCESS;
    9341009    pbPage = (uint8_t *)pvLast;
    9351010    iPage  = cPages;
    936 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 19)
    9371011    if (!fLinearMapping)
    9381012    {
    9391013        while (iPage-- > 0)
    9401014        {
    941             struct page *pPage = vmalloc_to_page(pbPage);
     1015            struct page *pPage = rtR0MemObjLinuxVirtToPage(pbPage);
    9421016            if (RT_UNLIKELY(!pPage))
    9431017            {
     
    9501024    }
    9511025    else
    952 #endif
    9531026    {
    9541027        while (iPage-- > 0)
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette