VirtualBox

Changeset 37193 in vbox for trunk/src/VBox


Ignore:
Timestamp:
May 24, 2011 1:09:05 PM (14 years ago)
Author:
vboxsync
Message:

GMMR0: Use the GMMCHUNK::fFlags field to indicate large pages - saves 32-bits per chunk :-) - and reduced the time we hold the lock when adding more chunks.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/GMMR0.cpp

    r37192 r37193  
    370370/** Pointer to a GMM allocation chunk mapping. */
    371371typedef struct GMMCHUNKMAP *PGMMCHUNKMAP;
    372 
    373 typedef enum GMMCHUNKTYPE
    374 {
    375     GMMCHUNKTYPE_INVALID        = 0,
    376     GMMCHUNKTYPE_NON_CONTINUOUS = 1,      /* 4 kb pages */
    377     GMMCHUNKTYPE_CONTINUOUS     = 2,      /* one 2 MB continuous physical range. */
    378     GMMCHUNKTYPE_32BIT_HACK     = 0x7fffffff
    379 } GMMCHUNKTYPE;
    380372
    381373
     
    429421    /** The number of shared pages.  (Giant mtx.) */
    430422    uint16_t            cShared;
    431     /** Chunk type.  (Giant mtx.) */
    432     GMMCHUNKTYPE        enmType;
    433423    /** The pages.  (Giant mtx.) */
    434424    GMMPAGE             aPages[GMM_CHUNK_SIZE >> PAGE_SHIFT];
    435425} GMMCHUNK;
     426
     427/** Indicates that the NUMA properies of the memory is unknown. */
     428#define GMM_CHUNK_NUMA_ID_UNKNOWN   UINT16_C(0xfffe)
     429
     430/** @name GMM_CHUNK_FLAGS_XXX - chunk flags.
     431 * @{ */
     432/** Indicates that the chunk is a large page (2MB). */
     433#define GMM_CHUNK_FLAGS_LARGE_PAGE  UINT16_C(0x0001)
     434/** @}  */
    436435
    437436
     
    474473#define GMM_CHUNK_FREE_SET_LISTS    (GMM_CHUNK_NUM_PAGES >> GMM_CHUNK_FREE_SET_SHIFT)
    475474
    476 /** Indicates that the NUMA properies of the memory is unknown. */
    477 #define GMM_CHUNK_NUMA_ID_UNKNOWN   UINT16_C(0xfffe)
    478475
    479476/**
     
    766763                /* Don't reuse possibly partial chunks because of the virtual
    767764                   address space limitation. */
    768                 pGMM->fBoundMemoryMode = true;
     765                pGMM->fBoundMemoryMode      = true;
    769766#  else
    770                 pGMM->fBoundMemoryMode = false;
     767                pGMM->fBoundMemoryMode      = false;
    771768#  endif
    772769# else
    773770                pGMM->fLegacyAllocationMode = true;
    774                 pGMM->fBoundMemoryMode = true;
     771                pGMM->fBoundMemoryMode      = true;
    775772# endif
    776773#endif
     
    18021799     */
    18031800    int32_t idChunk = ++pGMM->idChunkPrev;
    1804 #if 0 /* test the fallback first */
     1801#if 0 /** @todo enable this code */
    18051802    if (    idChunk <= GMM_CHUNKID_LAST
    18061803        &&  idChunk > NIL_GMM_CHUNKID
     
    18381835 * Registers a new chunk of memory.
    18391836 *
    1840  * This is called by both gmmR0AllocateOneChunk and GMMR0SeedChunk. The caller
    1841  * must own the global lock.
    1842  *
    1843  * @returns VBox status code.
     1837 * This is called by both gmmR0AllocateOneChunk and GMMR0SeedChunk.
     1838 *
     1839 * @returns VBox status code.  On success, the giant GMM lock will be held, the
     1840 *          caller must release it (ugly).
    18441841 * @param   pGMM            Pointer to the GMM instance.
    18451842 * @param   pSet            Pointer to the set.
     
    18471844 * @param   hGVM            The affinity of the chunk. NIL_GVM_HANDLE for no
    18481845 *                          affinity.
    1849  * @param   enmChunkType    Chunk type (continuous or non-continuous)
    1850  * @param   ppChunk         Chunk address (out)
    1851  */
    1852 static int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ MemObj, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
    1853 {
     1846 * @param   fChunkFlags     The chunk flags, GMM_CHUNK_FLAGS_XXX.
     1847 * @param   ppChunk         Chunk address (out).  Optional.
     1848 *
     1849 * @remarks The caller must not own the giant GMM mutex.
     1850 *          The giant GMM mutex will be acquired and returned acquired in
     1851 *          the success path.   On failure, no locks will be held.
     1852 */
     1853static int gmmR0RegisterChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, RTR0MEMOBJ MemObj, uint16_t hGVM, uint16_t fChunkFlags,
     1854                              PGMMCHUNK *ppChunk)
     1855{
     1856    Assert(pGMM->hMtxOwner != RTThreadNativeSelf());
    18541857    Assert(hGVM != NIL_GVM_HANDLE || pGMM->fBoundMemoryMode);
     1858    Assert(fChunkFlags == 0 || fChunkFlags == GMM_CHUNK_FLAGS_LARGE_PAGE);
    18551859
    18561860    int rc;
     
    18661870        /*pChunk->iFreeHead = 0;*/
    18671871        pChunk->idNumaNode  = GMM_CHUNK_NUMA_ID_UNKNOWN;
    1868         pChunk->enmType     = enmChunkType;
     1872        pChunk->fFlags      = fChunkFlags;
    18691873        for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++)
    18701874        {
     
    18791883         * This has to be done behind the mutex of course.
    18801884         */
    1881         if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    1882         {
    1883             pChunk->Core.Key = gmmR0AllocateChunkId(pGMM);
    1884             if (    pChunk->Core.Key != NIL_GMM_CHUNKID
    1885                 &&  pChunk->Core.Key <= GMM_CHUNKID_LAST
    1886                 &&  RTAvlU32Insert(&pGMM->pChunks, &pChunk->Core))
     1885        rc = gmmR0MutexAcquire(pGMM);
     1886        if (RT_SUCCESS(rc))
     1887        {
     1888            if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    18871889            {
    1888                 pGMM->cChunks++;
    1889                 RTListAppend(&pGMM->ChunkList, &pChunk->ListNode);
    1890                 gmmR0LinkChunk(pChunk, pSet);
    1891                 LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
    1892 
    1893                 if (ppChunk)
    1894                     *ppChunk = pChunk;
    1895 
    1896                 GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
    1897                 return VINF_SUCCESS;
     1890                pChunk->Core.Key = gmmR0AllocateChunkId(pGMM);
     1891                if (    pChunk->Core.Key != NIL_GMM_CHUNKID
     1892                    &&  pChunk->Core.Key <= GMM_CHUNKID_LAST
     1893                    &&  RTAvlU32Insert(&pGMM->pChunks, &pChunk->Core))
     1894                {
     1895                    pGMM->cChunks++;
     1896                    RTListAppend(&pGMM->ChunkList, &pChunk->ListNode);
     1897                    gmmR0LinkChunk(pChunk, pSet);
     1898                    LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
     1899
     1900                    if (ppChunk)
     1901                        *ppChunk = pChunk;
     1902                    GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
     1903                    return VINF_SUCCESS;
     1904                }
     1905
     1906                /* bail out */
     1907                rc = VERR_INTERNAL_ERROR;
    18981908            }
    1899 
    1900             /* bail out */
    1901             rc = VERR_INTERNAL_ERROR;
    1902         }
    1903         else
    1904             rc = VERR_INTERNAL_ERROR_5;
     1909            else
     1910                rc = VERR_INTERNAL_ERROR_5;
     1911            gmmR0MutexRelease(pGMM);
     1912        }
    19051913
    19061914        RTMemFree(pChunk);
     
    19191927 * @param   pSet            Pointer to the set.
    19201928 * @param   hGVM            The affinity of the new chunk.
    1921  * @param   enmChunkType    Chunk type (continuous or non-continuous)
    1922  * @param   ppChunk         Chunk address (out)
    1923  *
    1924  * @remarks Called without owning the mutex.
    1925  */
    1926 static int gmmR0AllocateOneChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, uint16_t hGVM, GMMCHUNKTYPE enmChunkType, PGMMCHUNK *ppChunk = NULL)
     1929 *
     1930 * @remarks The giant mutex will be temporarily abandond during the allocation.
     1931 */
     1932static int gmmR0AllocateOneChunk(PGMM pGMM, PGMMCHUNKFREESET pSet, uint16_t hGVM)
    19271933{
    19281934    /*
    19291935     * Allocate the memory.
    1930      */
    1931     RTR0MEMOBJ MemObj;
    1932     int        rc;
    1933 
    1934     AssertCompile(GMM_CHUNK_SIZE == _2M);
    1935     AssertReturn(enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS || enmChunkType == GMMCHUNKTYPE_CONTINUOUS, VERR_INVALID_PARAMETER);
    1936 
    1937     /* Leave the lock temporarily as the allocation might take long. */
     1936     *
     1937     * Note! We leave the giant GMM lock temporarily as the allocation might
     1938     *       take a long time. gmmR0RegisterChunk reacquires it (ugly).
     1939     */
    19381940    gmmR0MutexRelease(pGMM);
    1939     if (enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS)
    1940         rc = RTR0MemObjAllocPhysNC(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);
    1941     else
    1942         rc = RTR0MemObjAllocPhysEx(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS, GMM_CHUNK_SIZE);
    1943 
    1944     int rc2 = gmmR0MutexAcquire(pGMM);
    1945     AssertRCReturn(rc2, rc2);
    1946 
    1947     if (RT_SUCCESS(rc))
    1948     {
    1949         rc = gmmR0RegisterChunk(pGMM, pSet, MemObj, hGVM, enmChunkType, ppChunk);
    1950         if (RT_FAILURE(rc))
    1951             RTR0MemObjFree(MemObj, false /* fFreeMappings */);
    1952     }
     1941
     1942    RTR0MEMOBJ hMemObj;
     1943    int rc = RTR0MemObjAllocPhysNC(&hMemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);
    19531944    /** @todo Check that RTR0MemObjAllocPhysNC always returns VERR_NO_MEMORY on
    19541945     *        allocation failure. */
     1946    if (RT_SUCCESS(rc))
     1947    {
     1948        rc = gmmR0RegisterChunk(pGMM, pSet, hMemObj, hGVM, 0 /*fChunkFlags*/, NULL);
     1949        if (RT_SUCCESS(rc))
     1950            return rc;
     1951
     1952        RTR0MemObjFree(hMemObj, false /* fFreeMappings */);
     1953    }
     1954
     1955    int rc2 = gmmR0MutexAcquire(pGMM);
     1956    AssertRCReturn(rc2, RT_FAILURE(rc) ? rc : rc2);
    19551957    return rc;
    19561958}
     
    20012003        while (pSet->cFreePages < cPages)
    20022004        {
    2003             int rc = gmmR0AllocateOneChunk(pGMM, pSet, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
     2005            int rc = gmmR0AllocateOneChunk(pGMM, pSet, pGVM->hSelf);
    20042006            if (RT_FAILURE(rc))
    20052007                return rc;
     
    20342036
    20352037            /* Allocate more. */
    2036             int rc = gmmR0AllocateOneChunk(pGMM, pSet, hGVM, GMMCHUNKTYPE_NON_CONTINUOUS);
     2038            int rc = gmmR0AllocateOneChunk(pGMM, pSet, hGVM);
    20372039            if (RT_FAILURE(rc))
    20382040                return rc;
     
    26362638    {
    26372639        const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT);
    2638         PGMMCHUNK      pChunk;
    2639         GMMPAGEDESC    PageDesc;
    2640 
    2641         if (RT_UNLIKELY(pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages > pGVM->gmm.s.Reserved.cBasePages))
     2640        if (RT_UNLIKELY(  pGVM->gmm.s.Allocated.cBasePages + pGVM->gmm.s.cBalloonedPages + cPages
     2641                        > pGVM->gmm.s.Reserved.cBasePages))
    26422642        {
    26432643            Log(("GMMR0AllocateLargePage: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
     
    26472647        }
    26482648
    2649         /* Allocate a new continuous chunk. */
    2650         rc = gmmR0AllocateOneChunk(pGMM, &pGMM->Private, pGVM->hSelf, GMMCHUNKTYPE_CONTINUOUS, &pChunk);
    2651         if (RT_FAILURE(rc))
    2652         {
    2653             gmmR0MutexRelease(pGMM);
    2654             return rc;
    2655         }
    2656 
    2657         /* Unlink the new chunk from the free list. */
    2658         gmmR0UnlinkChunk(pChunk);
    2659 
    2660         /* Allocate all pages. */
    2661         gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
    2662         /* Return the first page as we'll use the whole chunk as one big page. */
    2663         *pIdPage = PageDesc.idPage;
    2664         *pHCPhys = PageDesc.HCPhysGCPhys;
    2665 
    2666         for (unsigned i = 1; i < cPages; i++)
    2667             gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
    2668 
    2669         /* Update accounting. */
    2670         pGVM->gmm.s.Allocated.cBasePages += cPages;
    2671         pGVM->gmm.s.cPrivatePages        += cPages;
    2672         pGMM->cAllocatedPages            += cPages;
    2673 
    2674         gmmR0LinkChunk(pChunk, &pGMM->Private);
     2649        /*
     2650         * Allocate a new large page chunk.
     2651         *
     2652         * Note! We leave the giant GMM lock temporarily as the allocation might
     2653         *       take a long time.  gmmR0RegisterChunk will retake it (ugly).
     2654         */
     2655        AssertCompile(GMM_CHUNK_SIZE == _2M);
     2656        gmmR0MutexRelease(pGMM);
     2657
     2658        RTR0MEMOBJ hMemObj;
     2659        rc = RTR0MemObjAllocPhysEx(&hMemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS, GMM_CHUNK_SIZE);
     2660        if (RT_SUCCESS(rc))
     2661        {
     2662            PGMMCHUNK pChunk;
     2663            rc = gmmR0RegisterChunk(pGMM, &pGMM->Private, hMemObj, pGVM->hSelf, GMM_CHUNK_FLAGS_LARGE_PAGE, &pChunk);
     2664            if (RT_SUCCESS(rc))
     2665            {
     2666                /*
     2667                 * Allocate all the pages in the chunk.
     2668                 */
     2669                /* Unlink the new chunk from the free list. */
     2670                gmmR0UnlinkChunk(pChunk);
     2671
     2672                /** @todo rewrite this to skip the looping. */
     2673                /* Allocate all pages. */
     2674                GMMPAGEDESC PageDesc;
     2675                gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
     2676
     2677                /* Return the first page as we'll use the whole chunk as one big page. */
     2678                *pIdPage = PageDesc.idPage;
     2679                *pHCPhys = PageDesc.HCPhysGCPhys;
     2680
     2681                for (unsigned i = 1; i < cPages; i++)
     2682                    gmmR0AllocatePage(pGMM, pGVM->hSelf, pChunk, &PageDesc);
     2683
     2684                /* Update accounting. */
     2685                pGVM->gmm.s.Allocated.cBasePages += cPages;
     2686                pGVM->gmm.s.cPrivatePages        += cPages;
     2687                pGMM->cAllocatedPages            += cPages;
     2688
     2689                gmmR0LinkChunk(pChunk, &pGMM->Private);
     2690                gmmR0MutexRelease(pGMM);
     2691            }
     2692            else
     2693                RTR0MemObjFree(hMemObj, false /* fFreeMappings */);
     2694        }
    26752695    }
    26762696    else
     2697    {
     2698        gmmR0MutexRelease(pGMM);
    26772699        rc = VERR_INTERNAL_ERROR_5;
    2678 
    2679     gmmR0MutexRelease(pGMM);
     2700    }
     2701
    26802702    LogFlow(("GMMR0AllocateLargePage: returns %Rrc\n", rc));
    26812703    return rc;
     
    38473869
    38483870    /*
    3849      * Lock the memory before taking the semaphore.
     3871     * Lock the memory and add it as new chunk with our hGVM.
     3872     * (The GMM locking is done inside gmmR0RegisterChunk.)
    38503873     */
    38513874    RTR0MEMOBJ MemObj;
     
    38533876    if (RT_SUCCESS(rc))
    38543877    {
    3855         /* Grab the lock. */
    3856         rc = gmmR0MutexAcquire(pGMM);
     3878        rc = gmmR0RegisterChunk(pGMM, &pGMM->Private, MemObj, pGVM->hSelf, 0 /*fChunkFlags*/, NULL);
    38573879        if (RT_SUCCESS(rc))
    3858         {
    3859             /*
    3860              * Add a new chunk with our hGVM.
    3861              */
    3862             rc = gmmR0RegisterChunk(pGMM, &pGMM->Private, MemObj, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
    38633880            gmmR0MutexRelease(pGMM);
    3864         }
    3865 
    3866         if (RT_FAILURE(rc))
     3881        else
    38673882            RTR0MemObjFree(MemObj, false /* fFreeMappings */);
    38683883    }
     
    38713886    return rc;
    38723887}
     3888
    38733889
    38743890typedef struct
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette