VirtualBox

Changeset 37206 in vbox for trunk


Ignore:
Timestamp:
May 24, 2011 6:43:32 PM (14 years ago)
Author:
vboxsync
Message:

GMMR0: Simplified the cleanup, let the VMs work in parallel since. (fixed redo-from-start bug)

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/GMMR0.cpp

    r37203 r37206  
    514514    PAVLGCPTRNODECORE   pGlobalSharedModuleTree;
    515515
    516     /** The fast mutex protecting the GMM cleanup.
    517      * This is serializes VMs cleaning up their memory, so that we can
    518      * safely leave the primary mutex (hMtx). */
    519     RTSEMFASTMUTEX      hMtxCleanup;
    520516    /** The chunk list.  For simplifying the cleanup process. */
    521517    RTLISTNODE          ChunkList;
     
    697693DECLINLINE(void)             gmmR0UnlinkChunk(PGMMCHUNK pChunk);
    698694static uint32_t              gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo);
    699 static void                  gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
     695static bool                  gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem);
    700696static void                  gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage);
    701697static int                   gmmR0UnmapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
     
    732728    if (RT_SUCCESS(rc))
    733729    {
    734         rc = RTSemFastMutexCreate(&pGMM->hMtxCleanup);
     730        unsigned iMtx;
     731        for (iMtx = 0; iMtx < RT_ELEMENTS(pGMM->aChunkMtx); iMtx++)
     732        {
     733            rc = RTSemFastMutexCreate(&pGMM->aChunkMtx[iMtx].hMtx);
     734            if (RT_FAILURE(rc))
     735                break;
     736        }
    735737        if (RT_SUCCESS(rc))
    736738        {
    737             unsigned iMtx;
    738             for (iMtx = 0; iMtx < RT_ELEMENTS(pGMM->aChunkMtx); iMtx++)
    739             {
    740                 rc = RTSemFastMutexCreate(&pGMM->aChunkMtx[iMtx].hMtx);
    741                 if (RT_FAILURE(rc))
    742                     break;
    743             }
     739            /*
     740             * Check and see if RTR0MemObjAllocPhysNC works.
     741             */
     742#if 0 /* later, see #3170. */
     743            RTR0MEMOBJ MemObj;
     744            rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
    744745            if (RT_SUCCESS(rc))
    745746            {
    746                 /*
    747                  * Check and see if RTR0MemObjAllocPhysNC works.
    748                  */
    749 #if 0 /* later, see #3170. */
    750                 RTR0MEMOBJ MemObj;
    751                 rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
    752                 if (RT_SUCCESS(rc))
    753                 {
    754                     rc = RTR0MemObjFree(MemObj, true);
    755                     AssertRC(rc);
    756                 }
    757                 else if (rc == VERR_NOT_SUPPORTED)
    758                     pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
    759                 else
    760                     SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
     747                rc = RTR0MemObjFree(MemObj, true);
     748                AssertRC(rc);
     749            }
     750            else if (rc == VERR_NOT_SUPPORTED)
     751                pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
     752            else
     753                SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
    761754#else
    762755# if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
    763                 pGMM->fLegacyAllocationMode = false;
     756            pGMM->fLegacyAllocationMode = false;
    764757#  if ARCH_BITS == 32
    765                 /* Don't reuse possibly partial chunks because of the virtual
    766                    address space limitation. */
    767                 pGMM->fBoundMemoryMode      = true;
     758            /* Don't reuse possibly partial chunks because of the virtual
     759               address space limitation. */
     760            pGMM->fBoundMemoryMode      = true;
    768761#  else
    769                 pGMM->fBoundMemoryMode      = false;
     762            pGMM->fBoundMemoryMode      = false;
    770763#  endif
    771764# else
    772                 pGMM->fLegacyAllocationMode = true;
    773                 pGMM->fBoundMemoryMode      = true;
     765            pGMM->fLegacyAllocationMode = true;
     766            pGMM->fBoundMemoryMode      = true;
    774767# endif
    775768#endif
    776769
    777                 /*
    778                  * Query system page count and guess a reasonable cMaxPages value.
    779                  */
    780                 pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
    781 
    782                 g_pGMM = pGMM;
    783                 LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
    784                 return VINF_SUCCESS;
    785             }
    786 
    787770            /*
    788              * Bail out.
     771             * Query system page count and guess a reasonable cMaxPages value.
    789772             */
    790             while (iMtx-- > 0)
    791                 RTSemFastMutexDestroy(pGMM->aChunkMtx[iMtx].hMtx);
    792         }
     773            pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
     774
     775            g_pGMM = pGMM;
     776            LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
     777            return VINF_SUCCESS;
     778        }
     779
     780        /*
     781         * Bail out.
     782         */
     783        while (iMtx-- > 0)
     784            RTSemFastMutexDestroy(pGMM->aChunkMtx[iMtx].hMtx);
    793785        RTSemFastMutexDestroy(pGMM->hMtx);
    794786    }
     
    828820    RTSemFastMutexDestroy(pGMM->hMtx);
    829821    pGMM->hMtx        = NIL_RTSEMFASTMUTEX;
    830     RTSemFastMutexDestroy(pGMM->hMtxCleanup);
    831     pGMM->hMtxCleanup = NIL_RTSEMFASTMUTEX;
    832822
    833823    /* Free any chunks still hanging around. */
     
    11141104 *
    11151105 * This only works if gmmR0ChunkMutexAcquire was called with
    1116  * GMMR0CHUNK_MTX_KEEP_GIANT.  Release will NOT retake the giant
    1117  * when dropped this way, the behavior will be like if
    1118  * GMMR0CHUNK_MTX_DROP_GIANT was used.
     1106 * GMMR0CHUNK_MTX_KEEP_GIANT.  gmmR0ChunkMutexRelease will retake the giant
     1107 * mutex, i.e. behave as if GMMR0CHUNK_MTX_RETAKE_GIANT was used.
    11191108 *
    11201109 * @returns VBox status code (assuming success is ok).
     
    11251114    AssertReturn(pMtxState->fFlags == GMMR0CHUNK_MTX_KEEP_GIANT, VERR_INTERNAL_ERROR_2);
    11261115    Assert(pMtxState->pGMM->hMtxOwner == RTThreadNativeSelf());
    1127     pMtxState->fFlags = GMMR0CHUNK_MTX_DROP_GIANT;
     1116    pMtxState->fFlags = GMMR0CHUNK_MTX_RETAKE_GIANT;
    11281117    /** @todo GMM life cycle cleanup (we may race someone
    11291118     *        destroying and cleaning up GMM)? */
     
    11511140#endif
    11521141
    1153     int rc = RTSemFastMutexRequest(pGMM->hMtxCleanup); AssertRC(rc);
    11541142    gmmR0MutexAcquire(pGMM);
    11551143    uint64_t uLockNanoTS = RTTimeSystemNanoTS();
     
    11751163        /*
    11761164         * Walk the entire pool looking for pages that belong to this VM
    1177          * and left over mappings.  (This'll only catch private pages,
     1165         * and leftover mappings.  (This'll only catch private pages,
    11781166         * shared pages will be 'left behind'.)
    11791167         */
     
    11911179                if (gmmR0CleanupVMScanChunk(pGMM, pGVM, pChunk))
    11921180                {
    1193                     gmmR0MutexAcquire(pGMM);
     1181                    /* We left the giant mutex, so reset the yield counters. */
    11941182                    uLockNanoTS = RTTimeSystemNanoTS();
     1183                    iCountDown  = 64;
    11951184                }
    11961185                else
    11971186                {
     1187                    /* Didn't leave it, so do normal yielding. */
    11981188                    if (!iCountDown)
    11991189                        gmmR0MutexYield(pGMM, &uLockNanoTS);
     
    12161206        do
    12171207        {
     1208            fRedoFromStart = false;
    12181209            iCountDown = 10240;
    12191210            pChunk = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1];
     
    12251216                         || pChunk->hGVM == pGVM->hSelf))
    12261217                {
    1227                     gmmR0FreeChunk(pGMM, pGVM, pChunk);
    1228                     iCountDown = 1;
     1218                    uint64_t const idGenerationOld = pGMM->Private.idGeneration;
     1219                    if (gmmR0FreeChunk(pGMM, pGVM, pChunk, true /*fRelaxedSem*/))
     1220                    {
     1221                        /* We've left the giant mutex, restart? (+1 for our unlink) */
     1222                        fRedoFromStart = pGMM->Private.idGeneration != idGenerationOld + 1;
     1223                        if (fRedoFromStart)
     1224                            break;
     1225                        uLockNanoTS = RTTimeSystemNanoTS();
     1226                        iCountDown = 10240;
     1227                    }
    12291228                }
     1229
     1230                /* Advance and maybe yield the lock. */
    12301231                pChunk = pNext;
    1231 
    12321232                if (--iCountDown == 0)
    12331233                {
     
    12811281    GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
    12821282    gmmR0MutexRelease(pGMM);
    1283     RTSemFastMutexRelease(pGMM->hMtxCleanup);
    12841283
    12851284    LogFlow(("GMMR0CleanupVM: returns\n"));
     
    12901289 * Scan one chunk for private pages belonging to the specified VM.
    12911290 *
    1292  * @note    This function is ugly since may drop the ownership of the giant GMM
    1293  *          mutex!
    1294  *
    1295  * @returns @c true if we've dropped the giant mutex, @c false if we didn't.
     1291 * @note    This function may drop the gian mutex!
     1292 *
     1293 * @returns @c true if we've temporarily dropped the giant mutex, @c false if
     1294 *         we didn't.
    12961295 * @param   pGMM        Pointer to the GMM instance.
    12971296 * @param   pGVM        The global VM handle.
     
    14101409                AssertRC(rc);
    14111410            }
     1411
    14121412            gmmR0ChunkMutexRelease(&MtxState, pChunk);
    14131413            return true;
     
    28192819
    28202820            /* Release the memory immediately. */
    2821             gmmR0FreeChunk(pGMM, NULL, pChunk);
     2821            gmmR0FreeChunk(pGMM, NULL, pChunk, false /*fRelaxedSem*/); /** @todo this can be relaxed too! */
    28222822
    28232823            /* Update accounting. */
     
    28682868 *                      unmap and free the chunk in one go.
    28692869 * @param   pChunk      The chunk to free.
    2870  */
    2871 static void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
     2870 * @param   fRelaxedSem Whether we can release the semaphore while doing the
     2871 *                      freeing (@c true) or not.
     2872 */
     2873static bool gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem)
    28722874{
    28732875    Assert(pChunk->Core.Key != NIL_GMM_CHUNKID);
     
    28782880    /*
    28792881     * Cleanup hack! Unmap the chunk from the callers address space.
     2882     * This shouldn't happen, so screw lock contention...
    28802883     */
    28812884    if (    pChunk->cMappingsX
     
    28942897        /* The chunk can be mapped by more than one VM if fBoundMemoryMode is false! */
    28952898        Log(("gmmR0FreeChunk: chunk still has %d/%d mappings; don't free!\n", pChunk->cMappingsX));
    2896     }
    2897     else
    2898     {
    2899         /*
    2900          * Try free the memory object.
    2901          */
    2902 /** @todo drop the giant lock here! */
    2903         int rc = RTR0MemObjFree(pChunk->hMemObj, false /* fFreeMappings */);
    2904         if (RT_SUCCESS(rc))
    2905         {
    2906             pChunk->hMemObj = NIL_RTR0MEMOBJ;
    2907 
    2908             /*
    2909              * Unlink it from everywhere.
    2910              */
    2911             gmmR0UnlinkChunk(pChunk);
    2912 
    2913             RTListNodeRemove(&pChunk->ListNode);
    2914 
    2915             PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key);
    2916             Assert(pCore == &pChunk->Core); NOREF(pCore);
    2917 
    2918             PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(pChunk->Core.Key)];
    2919             if (pTlbe->pChunk == pChunk)
    2920             {
    2921                 pTlbe->idChunk = NIL_GMM_CHUNKID;
    2922                 pTlbe->pChunk = NULL;
    2923             }
    2924 
    2925             Assert(pGMM->cChunks > 0);
    2926             pGMM->cChunks--;
    2927 
    2928             /*
    2929              * Free the Chunk ID and struct.
    2930              */
    2931             gmmR0FreeChunkId(pGMM, pChunk->Core.Key);
    2932             pChunk->Core.Key = NIL_GMM_CHUNKID;
    2933 
    2934             RTMemFree(pChunk->paMappingsX);
    2935             pChunk->paMappingsX = NULL;
    2936 
    2937             RTMemFree(pChunk);
    2938             pChunk = NULL;              /* (for gmmR0ChunkMutexRelease) */
    2939 
    2940             pGMM->cFreedChunks++;
    2941         }
    2942         else
    2943             AssertRC(rc);
    2944     }
    2945 
    2946     gmmR0ChunkMutexRelease(&MtxState, pChunk);
     2899        gmmR0ChunkMutexRelease(&MtxState, pChunk);
     2900        return false;
     2901    }
     2902
     2903
     2904    /*
     2905     * Save and trash the handle.
     2906     */
     2907    RTR0MEMOBJ const hMemObj = pChunk->hMemObj;
     2908    pChunk->hMemObj = NIL_RTR0MEMOBJ;
     2909
     2910    /*
     2911     * Unlink it from everywhere.
     2912     */
     2913    gmmR0UnlinkChunk(pChunk);
     2914
     2915    RTListNodeRemove(&pChunk->ListNode);
     2916
     2917    PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key);
     2918    Assert(pCore == &pChunk->Core); NOREF(pCore);
     2919
     2920    PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(pChunk->Core.Key)];
     2921    if (pTlbe->pChunk == pChunk)
     2922    {
     2923        pTlbe->idChunk = NIL_GMM_CHUNKID;
     2924        pTlbe->pChunk = NULL;
     2925    }
     2926
     2927    Assert(pGMM->cChunks > 0);
     2928    pGMM->cChunks--;
     2929
     2930    /*
     2931     * Free the Chunk ID before dropping the locks and freeing the rest.
     2932     */
     2933    gmmR0FreeChunkId(pGMM, pChunk->Core.Key);
     2934    pChunk->Core.Key = NIL_GMM_CHUNKID;
     2935
     2936    pGMM->cFreedChunks++;
     2937
     2938    gmmR0ChunkMutexRelease(&MtxState, NULL);
     2939    if (fRelaxedSem)
     2940        gmmR0MutexRelease(pGMM);
     2941
     2942    RTMemFree(pChunk->paMappingsX);
     2943    pChunk->paMappingsX = NULL;
     2944
     2945    RTMemFree(pChunk);
     2946
     2947    int rc = RTR0MemObjFree(hMemObj, false /* fFreeMappings */);
     2948    AssertLogRelRC(rc);
     2949
     2950    if (fRelaxedSem)
     2951        gmmR0MutexAcquire(pGMM);
     2952    return fRelaxedSem;
    29472953}
    29482954
     
    29973003     * a bit...
    29983004     */
     3005    /** @todo Do this on the way out. */
    29993006    if (RT_UNLIKELY(   pChunk->cFree == GMM_CHUNK_NUM_PAGES
    30003007                    && pChunk->pFreeNext
    30013008                    && pChunk->pFreePrev /** @todo this is probably misfiring, see reset... */
    30023009                    && !pGMM->fLegacyAllocationMode))
    3003         gmmR0FreeChunk(pGMM, NULL, pChunk);
     3010        gmmR0FreeChunk(pGMM, NULL, pChunk, false);
    30043011
    30053012}
     
    37323739 * @param   pChunk      Pointer to the chunk to be mapped.
    37333740 * @param   fRelaxedSem Whether we can release the semaphore while doing the
    3734  *                      locking (@c true) or not.
     3741 *                      mapping (@c true) or not.
    37353742 * @param   ppvR3       Where to store the ring-3 address of the mapping.
    37363743 *                      In the VERR_GMM_CHUNK_ALREADY_MAPPED case, this will be
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette