VirtualBox

Changeset 92418 in vbox for trunk/src/VBox/Runtime/r0drv


Ignore:
Timestamp:
Nov 15, 2021 8:38:55 AM (3 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
148273
Message:

IPRT/memobj-r0drv-linux: Implemented rtR0MemObjNativeAllocLarge using GFP_TRANSHUGE and made it fail with VERR_NOT_SUPPORTED if GFP_TRANSHUGE is not available (< 2.6.38). bugref:10093 bugref:5324

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c

    r92246 r92418  
    123123    /** Set if we've vmap'ed the memory into ring-0. */
    124124    bool                fMappedToRing0;
     125    /** This is non-zero if large page allocation. */
     126    uint8_t             cLargePageOrder;
    125127#ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
    126128    /** Return from alloc_vm_area() that we now need to use for executable
     
    356358    pMemLnx->cPages = cPages;
    357359
    358      if (cPages > 255)
    359      {
     360    if (cPages > 255)
     361    {
    360362# ifdef __GFP_REPEAT
    361363        /* Try hard to allocate the memory, but the allocation attempt might fail. */
     
    366368        fFlagsLnx |= __GFP_NOMEMALLOC;
    367369# endif
    368      }
     370    }
    369371
    370372    /*
     
    721723    switch (pMemLnx->Core.enmType)
    722724    {
     725        case RTR0MEMOBJTYPE_PAGE:
    723726        case RTR0MEMOBJTYPE_LOW:
    724         case RTR0MEMOBJTYPE_PAGE:
    725727        case RTR0MEMOBJTYPE_CONT:
    726728        case RTR0MEMOBJTYPE_PHYS:
     
    729731            rtR0MemObjLinuxFreePages(pMemLnx);
    730732            break;
     733
     734        case RTR0MEMOBJTYPE_LARGE_PAGE:
     735        {
     736            uint32_t const cLargePages = pMemLnx->Core.cb >> (pMemLnx->cLargePageOrder + PAGE_SHIFT);
     737            uint32_t       iLargePage;
     738            for (iLargePage = 0; iLargePage < cLargePages; iLargePage++)
     739                __free_pages(pMemLnx->apPages[iLargePage << pMemLnx->cLargePageOrder], pMemLnx->cLargePageOrder);
     740            pMemLnx->cPages = 0;
     741
     742#ifdef IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
     743            Assert(!pMemLnx->pArea);
     744            Assert(!pMemLnx->papPtesForArea);
     745#endif
     746            break;
     747        }
    731748
    732749        case RTR0MEMOBJTYPE_LOCK:
     
    836853                                           const char *pszTag)
    837854{
    838     return rtR0MemObjFallbackAllocLarge(ppMem, cb, cbLargePage, fFlags, pszTag);
     855#ifdef GFP_TRANSHUGE
     856    /*
     857     * Allocate a memory object structure that's large enough to contain
     858     * the page pointer array.
     859     */
     860# ifdef __GFP_MOVABLE
     861    unsigned const  fGfp            = (GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE;
     862# else
     863    unsigned const  fGfp            = (GFP_TRANSHUGE | __GFP_ZERO);
     864# endif
     865    size_t const    cPagesPerLarge  = cbLargePage >> PAGE_SHIFT;
     866    unsigned const  cLargePageOrder = rtR0MemObjLinuxOrder(cPagesPerLarge);
     867    size_t const    cLargePages     = cb >> (cLargePageOrder + PAGE_SHIFT);
     868    size_t const    cPages          = cb >> PAGE_SHIFT;
     869    PRTR0MEMOBJLNX  pMemLnx;
     870
     871    Assert(RT_BIT_64(cLargePageOrder + PAGE_SHIFT) == cbLargePage);
     872    pMemLnx = (PRTR0MEMOBJLNX)rtR0MemObjNew(RT_UOFFSETOF_DYN(RTR0MEMOBJLNX, apPages[cPages]),
     873                                            RTR0MEMOBJTYPE_LARGE_PAGE, NULL, cb, pszTag);
     874    if (pMemLnx)
     875    {
     876        size_t iLargePage;
     877
     878        pMemLnx->Core.fFlags    |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
     879        pMemLnx->cLargePageOrder = cLargePageOrder;
     880        pMemLnx->cPages          = cPages;
     881
     882        /*
     883         * Allocate the requested number of large pages.
     884         */
     885        for (iLargePage = 0; iLargePage < cLargePages; iLargePage++)
     886        {
     887            struct page *paPages = alloc_pages(fGfp, cLargePageOrder);
     888            if (paPages)
     889            {
     890                size_t const iPageBase = iLargePage << cLargePageOrder;
     891                size_t       iPage     = cPagesPerLarge;
     892                while (iPage-- > 0)
     893                    pMemLnx->apPages[iPageBase + iPage] = &paPages[iPage];
     894            }
     895            else
     896            {
     897                /*Log(("rtR0MemObjNativeAllocLarge: cb=%#zx cPages=%#zx cLargePages=%#zx cLargePageOrder=%u cPagesPerLarge=%#zx iLargePage=%#zx -> failed!\n",
     898                     cb, cPages, cLargePages, cLargePageOrder, cPagesPerLarge, iLargePage, paPages));*/
     899                while (iLargePage-- > 0)
     900                    __free_pages(pMemLnx->apPages[iLargePage << (cLargePageOrder - PAGE_SHIFT)], cLargePageOrder);
     901                rtR0MemObjDelete(&pMemLnx->Core);
     902                return VERR_NO_MEMORY;
     903            }
     904        }
     905        *ppMem = &pMemLnx->Core;
     906        return VINF_SUCCESS;
     907    }
     908    return VERR_NO_MEMORY;
     909
     910#else
     911    /*
     912     * We don't call rtR0MemObjFallbackAllocLarge here as it can be a really
     913     * bad idea to trigger the swap daemon and whatnot.  So, just fail.
     914     */
     915    RT_NOREF(ppMem, cb, cbLargePage, fFlags, pszTag);
     916    return VERR_NOT_SUPPORTED;
     917#endif
    839918}
    840919
     
    19762055        case RTR0MEMOBJTYPE_PHYS_NC:
    19772056        case RTR0MEMOBJTYPE_PAGE:
     2057        case RTR0MEMOBJTYPE_LARGE_PAGE:
    19782058        default:
    19792059            AssertMsgFailed(("%d\n", pMemLnx->Core.enmType));
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette