VirtualBox

Changeset 36984 in vbox for trunk/src/VBox


Ignore:
Timestamp:
May 6, 2011 1:50:35 PM (14 years ago)
Author:
vboxsync
Message:

GMMR0: Yield the mutex during cleanup since it may take quite a while to scan GMM for pages belonging to a VM and free empty chunks afterwards.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/GMMR0.cpp

    r36970 r36984  
    162162#include <iprt/asm.h>
    163163#include <iprt/avl.h>
     164#include <iprt/list.h>
    164165#include <iprt/mem.h>
    165166#include <iprt/memobj.h>
    166167#include <iprt/semaphore.h>
    167168#include <iprt/string.h>
     169#include <iprt/time.h>
    168170
    169171
     
    397399     * chunks with no free pages. */
    398400    PGMMCHUNKFREESET    pSet;
     401    /** List node in the chunk list (GMM::ChunkList).  */
     402    RTLISTNODE          ListNode;
    399403    /** Pointer to an array of mappings. */
    400404    PGMMCHUNKMAP        paMappings;
     
    465469    /** The number of free pages in the set. */
    466470    uint64_t            cFreePages;
     471    /** The generation ID for the set.  This is incremented whenever
     472     *  something is linked or unlinked from this set. */
     473    uint64_t            idGeneration;
    467474    /** Chunks ordered by increasing number of free pages. */
    468475    PGMMCHUNK           apLists[GMM_CHUNK_FREE_SET_LISTS];
     
    477484    /** Magic / eye catcher. GMM_MAGIC */
    478485    uint32_t            u32Magic;
     486    /** The number of threads waiting on the mutex. */
     487    uint32_t            cMtxContenders;
    479488    /** The fast mutex protecting the GMM.
    480489     * More fine grained locking can be implemented later if necessary. */
    481     RTSEMFASTMUTEX      Mtx;
     490    RTSEMFASTMUTEX      hMtx;
     491#ifdef VBOX_STRICT
     492    /** The current mutex owner. */
     493    RTNATIVETHREAD      hMtxOwner;
     494#endif
    482495    /** The chunk tree. */
    483496    PAVLU32NODECORE     pChunks;
     
    492505    /** @todo separate trees for distinctly different guest OSes. */
    493506    PAVLGCPTRNODECORE   pGlobalSharedModuleTree;
     507
     508    /** The fast mutex protecting the GMM cleanup.
     509     * This is serializes VMs cleaning up their memory, so that we can
     510     * safely leave the primary mutex (hMtx). */
     511    RTSEMFASTMUTEX      hMtxCleanup;
     512    /** The chunk list.  For simplifying the cleanup process. */
     513    RTLISTNODE          ChunkList;
    494514
    495515    /** The maximum number of pages we're allowed to allocate.
     
    528548    uint16_t            cRegisteredVMs;
    529549
     550    /** The number of freed chunks ever.  This is used a list generation to
     551     *  avoid restarting the cleanup scanning when the list wasn't modified. */
     552    uint32_t            cFreedChunks;
    530553    /** The previous allocated Chunk ID.
    531554     * Used as a hint to avoid scanning the whole bitmap. */
     
    540563
    541564/** The value of GMM::u32Magic (Katsuhiro Otomo). */
    542 #define GMM_MAGIC       0x19540414
     565#define GMM_MAGIC       UINT32_C(0x19540414)
    543566
    544567
     
    623646*******************************************************************************/
    624647static DECLCALLBACK(int)     gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM);
    625 static DECLCALLBACK(int)     gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGMM);
    626 static DECLCALLBACK(int)     gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM);
     648static bool                  gmmR0CleanupVMScanChunk(PGVM pGVM, PGMMCHUNK pChunk);
    627649/*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM);
    628650DECLINLINE(void)             gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet);
     
    632654static void                  gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage);
    633655static int                   gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
     656static void                  gmmR0SharedModuleCleanup(PGMM pGMM, PGVM pGVM);
    634657
    635658
     
    648671
    649672    /*
    650      * Allocate the instance data and the lock(s).
     673     * Allocate the instance data and the locks.
    651674     */
    652675    PGMM pGMM = (PGMM)RTMemAllocZ(sizeof(*pGMM));
    653676    if (!pGMM)
    654677        return VERR_NO_MEMORY;
     678
    655679    pGMM->u32Magic = GMM_MAGIC;
    656680    for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
    657681        pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
     682    RTListInit(&pGMM->ChunkList);
    658683    ASMBitSet(&pGMM->bmChunkId[0], NIL_GMM_CHUNKID);
    659684
    660     int rc = RTSemFastMutexCreate(&pGMM->Mtx);
     685    int rc = RTSemFastMutexCreate(&pGMM->hMtx);
    661686    if (RT_SUCCESS(rc))
    662687    {
    663         /*
    664          * Check and see if RTR0MemObjAllocPhysNC works.
    665          */
     688        rc = RTSemFastMutexCreate(&pGMM->hMtxCleanup);
     689        if (RT_SUCCESS(rc))
     690        {
     691            /*
     692             * Check and see if RTR0MemObjAllocPhysNC works.
     693             */
    666694#if 0 /* later, see #3170. */
    667         RTR0MEMOBJ MemObj;
    668         rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
    669         if (RT_SUCCESS(rc))
    670         {
    671             rc = RTR0MemObjFree(MemObj, true);
    672             AssertRC(rc);
    673         }
    674         else if (rc == VERR_NOT_SUPPORTED)
    675             pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
    676         else
    677             SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
     695            RTR0MEMOBJ MemObj;
     696            rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
     697            if (RT_SUCCESS(rc))
     698            {
     699                rc = RTR0MemObjFree(MemObj, true);
     700                AssertRC(rc);
     701            }
     702            else if (rc == VERR_NOT_SUPPORTED)
     703                pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
     704            else
     705                SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
    678706#else
    679707# if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
    680         pGMM->fLegacyAllocationMode = false;
     708            pGMM->fLegacyAllocationMode = false;
    681709#  if ARCH_BITS == 32
    682         /* Don't reuse possibly partial chunks because of the virtual address space limitation. */
    683         pGMM->fBoundMemoryMode = true;
     710            /* Don't reuse possibly partial chunks because of the virtual address space limitation. */
     711            pGMM->fBoundMemoryMode = true;
    684712#  else
    685         pGMM->fBoundMemoryMode = false;
     713            pGMM->fBoundMemoryMode = false;
    686714#  endif
    687715# else
    688         pGMM->fLegacyAllocationMode = true;
    689         pGMM->fBoundMemoryMode = true;
     716            pGMM->fLegacyAllocationMode = true;
     717            pGMM->fBoundMemoryMode = true;
    690718# endif
    691719#endif
    692720
    693         /*
    694          * Query system page count and guess a reasonable cMaxPages value.
    695          */
    696         pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
    697 
    698         g_pGMM = pGMM;
    699         LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
    700         return VINF_SUCCESS;
    701     }
    702 
     721            /*
     722             * Query system page count and guess a reasonable cMaxPages value.
     723             */
     724            pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
     725
     726            g_pGMM = pGMM;
     727            LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
     728            return VINF_SUCCESS;
     729        }
     730
     731        RTSemFastMutexDestroy(pGMM->hMtx);
     732    }
     733
     734    pGMM->u32Magic = 0;
    703735    RTMemFree(pGMM);
    704736    SUPR0Printf("GMMR0Init: failed! rc=%d\n", rc);
     
    731763    /* Destroy the fundamentals. */
    732764    g_pGMM = NULL;
    733     pGMM->u32Magic++;
    734     RTSemFastMutexDestroy(pGMM->Mtx);
    735     pGMM->Mtx = NIL_RTSEMFASTMUTEX;
     765    pGMM->u32Magic    = ~GMM_MAGIC;
     766    RTSemFastMutexDestroy(pGMM->hMtx);
     767    pGMM->hMtx        = NIL_RTSEMFASTMUTEX;
     768    RTSemFastMutexDestroy(pGMM->hMtxCleanup);
     769    pGMM->hMtxCleanup = NIL_RTSEMFASTMUTEX;
    736770
    737771    /* free any chunks still hanging around. */
     
    798832
    799833/**
     834 * Acquires the GMM giant lock.
     835 *
     836 * @returns Assert status code from RTSemFastMutexRequest.
     837 * @param   pGMM        Pointer to the GMM instance.
     838 */
     839static int gmmR0MutexAcquire(PGMM pGMM)
     840{
     841    ASMAtomicIncU32(&pGMM->cMtxContenders);
     842    int rc = RTSemFastMutexRequest(pGMM->hMtx);
     843    ASMAtomicDecU32(&pGMM->cMtxContenders);
     844    AssertRC(rc);
     845#ifdef VBOX_STRICT
     846    pGMM->hMtxOwner = RTThreadNativeSelf();
     847#endif
     848    return rc;
     849}
     850
     851
     852/**
     853 * Releases the GMM giant lock.
     854 *
     855 * @returns Assert status code from RTSemFastMutexRequest.
     856 * @param   pGMM        Pointer to the GMM instance.
     857 */
     858static int gmmR0MutexRelease(PGMM pGMM)
     859{
     860#ifdef VBOX_STRICT
     861    pGMM->hMtxOwner = NIL_RTNATIVETHREAD;
     862#endif
     863    int rc = RTSemFastMutexRelease(pGMM->hMtx);
     864    AssertRC(rc);
     865    return rc;
     866}
     867
     868
     869/**
     870 * Yields the GMM giant lock if there is contention and a certain minimum time
     871 * has elapsed since we took it.
     872 *
     873 * @returns @c true if the mutex was yielded, @c false if not.
     874 * @param   pGMM            Pointer to the GMM instance.
     875 * @param   puLockNanoTS    Where the lock acquisition time stamp is kept
     876 *                          (in/out).
     877 */
     878static bool gmmR0MutexYield(PGMM pGMM, uint64_t *puLockNanoTS)
     879{
     880    /*
     881     * If nobody is contending the mutex, don't bother checking the time.
     882     */
     883    if (ASMAtomicReadU32(&pGMM->cMtxContenders) == 0)
     884        return false;
     885
     886    /*
     887     * Don't yield if we haven't executed for at least 2 milliseconds.
     888     */
     889    uint64_t uNanoNow = RTTimeSystemNanoTS();
     890    if (uNanoNow - *puLockNanoTS < UINT32_C(2000000))
     891        return false;
     892
     893    /*
     894     * Yield the mutex.
     895     */
     896#ifdef VBOX_STRICT
     897    pGMM->hMtxOwner = NIL_RTNATIVETHREAD;
     898#endif
     899    ASMAtomicIncU32(&pGMM->cMtxContenders);
     900    int rc1 = RTSemFastMutexRelease(pGMM->hMtx); AssertRC(rc1);
     901
     902    RTThreadYield();
     903
     904    int rc2 = RTSemFastMutexRequest(pGMM->hMtx); AssertRC(rc2);
     905    *puLockNanoTS = RTTimeSystemNanoTS();
     906    ASMAtomicDecU32(&pGMM->cMtxContenders);
     907#ifdef VBOX_STRICT
     908    pGMM->hMtxOwner = RTThreadNativeSelf();
     909#endif
     910
     911    return true;
     912}
     913
     914
     915/**
    800916 * Cleans up when a VM is terminating.
    801917 *
     
    809925    GMM_GET_VALID_INSTANCE_VOID(pGMM);
    810926
    811     int rc = RTSemFastMutexRequest(pGMM->Mtx);
    812     AssertRC(rc);
     927#ifdef VBOX_WITH_PAGE_SHARING
     928    /*
     929     * Clean up all registered shared modules first.
     930     */
     931    gmmR0SharedModuleCleanup(pGMM, pGVM);
     932#endif
     933
     934    int rc = RTSemFastMutexRequest(pGMM->hMtxCleanup); AssertRC(rc);
     935    gmmR0MutexAcquire(pGMM);
     936    uint64_t uLockNanoTS = RTTimeSystemNanoTS();
    813937    GMM_CHECK_SANITY_UPON_ENTERING(pGMM);
    814 
    815 #ifdef VBOX_WITH_PAGE_SHARING
    816     /* Clean up all registered shared modules. */
    817     RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);
    818 #endif
    819938
    820939    /*
     
    865984            /*
    866985             * Walk the entire pool looking for pages that belong to this VM
    867              * and left over mappings. (This'll only catch private pages, shared
    868              * pages will be 'left behind'.)
     986             * and left over mappings.  (This'll only catch private pages,
     987             * shared pages will be 'left behind'.)
    869988             */
    870             /** @todo this might be kind of expensive with a lot of VMs and
    871              *   memory hanging around... */
    872             uint64_t cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */
    873             RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0CleanupVMScanChunk, pGVM);
     989            uint64_t    cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */
     990
     991            unsigned    iCountDown = 64;
     992            bool        fRedoFromStart;
     993            PGMMCHUNK   pChunk;
     994            do
     995            {
     996                fRedoFromStart = false;
     997                RTListForEachReverse(&pGMM->ChunkList, pChunk, GMMCHUNK, ListNode)
     998                {
     999                    if (   !gmmR0CleanupVMScanChunk(pGVM, pChunk)
     1000                        || iCountDown != 0)
     1001                        iCountDown--;
     1002                    else
     1003                    {
     1004                        iCountDown = 64;
     1005                        uint32_t const cFreeChunksOld = pGMM->cFreedChunks;
     1006                        fRedoFromStart = gmmR0MutexYield(pGMM, &uLockNanoTS)
     1007                                      && pGMM->cFreedChunks != cFreeChunksOld;
     1008                        if (fRedoFromStart)
     1009                            break;
     1010                    }
     1011                }
     1012            } while (fRedoFromStart);
     1013
    8741014            if (pGVM->gmm.s.cPrivatePages)
    8751015                SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages);
     1016
    8761017            pGMM->cAllocatedPages -= cPrivatePages;
    8771018
    878             /* free empty chunks. */
    879             if (cPrivatePages)
     1019            /*
     1020             * Free empty chunks.
     1021             */
     1022            do
    8801023            {
    881                 PGMMCHUNK pCur = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1];
    882                 while (pCur)
     1024                iCountDown = 10240;
     1025                pChunk = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1];
     1026                while (pChunk)
    8831027                {
    884                     PGMMCHUNK pNext = pCur->pFreeNext;
    885                     if (    pCur->cFree == GMM_CHUNK_NUM_PAGES
     1028                    PGMMCHUNK pNext = pChunk->pFreeNext;
     1029                    if (    pChunk->cFree == GMM_CHUNK_NUM_PAGES
    8861030                        &&  (   !pGMM->fBoundMemoryMode
    887                              || pCur->hGVM == pGVM->hSelf))
    888                         gmmR0FreeChunk(pGMM, pGVM, pCur);
    889                     pCur = pNext;
     1031                             || pChunk->hGVM == pGVM->hSelf))
     1032                    {
     1033                        gmmR0FreeChunk(pGMM, pGVM, pChunk);
     1034                        iCountDown = 1;
     1035                    }
     1036                    pChunk = pNext;
     1037
     1038                    if (--iCountDown == 0)
     1039                    {
     1040                        uint64_t const idGenerationOld = pGMM->Private.idGeneration;
     1041                        fRedoFromStart = gmmR0MutexYield(pGMM, &uLockNanoTS)
     1042                                      && pGMM->Private.idGeneration != idGenerationOld;
     1043                        if (fRedoFromStart)
     1044                            break;
     1045                        iCountDown = 10240;
     1046                    }
    8901047                }
    891             }
    892 
    893             /* account for shared pages that weren't freed. */
     1048            } while (fRedoFromStart);
     1049
     1050            /*
     1051             * Account for shared pages that weren't freed.
     1052             */
    8941053            if (pGVM->gmm.s.cSharedPages)
    8951054            {
     
    8991058            }
    9001059
    901             /* Clean up balloon statistics in case the VM process crashed. */
     1060            /*
     1061             * Clean up balloon statistics in case the VM process crashed.
     1062             */
    9021063            Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages);
    9031064            pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;
     
    9261087
    9271088    GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
    928     RTSemFastMutexRelease(pGMM->Mtx);
     1089    gmmR0MutexRelease(pGMM);
     1090    RTSemFastMutexRelease(pGMM->hMtxCleanup);
    9291091
    9301092    LogFlow(("GMMR0CleanupVM: returns\n"));
     
    9331095
    9341096/**
    935  * RTAvlU32DoWithAll callback.
    936  *
    937  * @returns 0
    938  * @param   pNode   The node to search.
    939  * @param   pvGVM   Pointer to the shared VM structure.
    940  */
    941 static DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGVM)
    942 {
    943     PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
    944     PGVM pGVM = (PGVM)pvGVM;
    945 
     1097 * Scan one chunk for private pages belonging to the specified VM.
     1098 *
     1099 * @returns @c true if a mapping was found (and freed), @c false if not.
     1100 * @param   pGVM        The global VM handle.
     1101 * @param   pChunk      The chunk to scan.
     1102 */
     1103static bool gmmR0CleanupVMScanChunk(PGVM pGVM, PGMMCHUNK pChunk)
     1104{
    9461105    /*
    9471106     * Look for pages belonging to the VM.
     
    10071166
    10081167    /*
    1009      * Look for the mapping belonging to the terminating VM.
    1010      */
    1011     for (unsigned i = 0; i < pChunk->cMappings; i++)
     1168     * Look for a mapping belonging to the terminating VM.
     1169     */
     1170    unsigned cMappings = pChunk->cMappings;
     1171    bool fMappingFreed = true;
     1172    for (unsigned i = 0; i < cMappings; i++)
    10121173        if (pChunk->paMappings[i].pGVM == pGVM)
    10131174        {
    10141175            RTR0MEMOBJ MemObj = pChunk->paMappings[i].MapObj;
    10151176
    1016             pChunk->cMappings--;
    1017             if (i < pChunk->cMappings)
    1018                  pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
    1019             pChunk->paMappings[pChunk->cMappings].pGVM = NULL;
    1020             pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
     1177            cMappings--;
     1178            if (i < cMappings)
     1179                 pChunk->paMappings[i] = pChunk->paMappings[cMappings];
     1180            pChunk->paMappings[cMappings].pGVM   = NULL;
     1181            pChunk->paMappings[cMappings].MapObj = NIL_RTR0MEMOBJ;
     1182            Assert(pChunk->cMappings - 1U == cMappings);
     1183            pChunk->cMappings = cMappings;
    10211184
    10221185            int rc = RTR0MemObjFree(MemObj, false /* fFreeMappings (NA) */);
     
    10271190                AssertRC(rc);
    10281191            }
     1192            fMappingFreed = true;
    10291193            break;
    10301194        }
     
    10501214    }
    10511215
    1052     return 0;
     1216    return fMappingFreed;
    10531217}
    10541218
     
    11461310    AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER);
    11471311
    1148     rc = RTSemFastMutexRequest(pGMM->Mtx);
    1149     AssertRC(rc);
     1312    gmmR0MutexAcquire(pGMM);
    11501313    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    11511314    {
     
    11801343    else
    11811344        rc = VERR_INTERNAL_ERROR_5;
    1182     RTSemFastMutexRelease(pGMM->Mtx);
     1345    gmmR0MutexRelease(pGMM);
    11831346    LogFlow(("GMMR0InitialReservation: returns %Rrc\n", rc));
    11841347    return rc;
     
    12421405    AssertReturn(cFixedPages, VERR_INVALID_PARAMETER);
    12431406
    1244     rc = RTSemFastMutexRequest(pGMM->Mtx);
    1245     AssertRC(rc);
     1407    gmmR0MutexAcquire(pGMM);
    12461408    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    12471409    {
     
    12751437    else
    12761438        rc = VERR_INTERNAL_ERROR_5;
    1277     RTSemFastMutexRelease(pGMM->Mtx);
     1439    gmmR0MutexRelease(pGMM);
    12781440    LogFlow(("GMMR0UpdateReservation: returns %Rrc\n", rc));
    12791441    return rc;
     
    14491611    {
    14501612        pSet->cFreePages -= pChunk->cFree;
     1613        pSet->idGeneration++;
    14511614
    14521615        PGMMCHUNK pPrev = pChunk->pFreePrev;
     
    14971660
    14981661        pSet->cFreePages += pChunk->cFree;
     1662        pSet->idGeneration++;
    14991663    }
    15001664}
     
    16141778            {
    16151779                pGMM->cChunks++;
     1780                RTListAppend(&pGMM->ChunkList, &pChunk->ListNode);
    16161781                gmmR0LinkChunk(pChunk, pSet);
    16171782                LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
     
    16621827
    16631828    /* Leave the lock temporarily as the allocation might take long. */
    1664     RTSemFastMutexRelease(pGMM->Mtx);
     1829    gmmR0MutexRelease(pGMM);
    16651830    if (enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS)
    16661831        rc = RTR0MemObjAllocPhysNC(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS);
     
    16681833        rc = RTR0MemObjAllocPhysEx(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS, GMM_CHUNK_SIZE);
    16691834
    1670     /* Grab the lock again. */
    1671     int rc2 = RTSemFastMutexRequest(pGMM->Mtx);
     1835    int rc2 = gmmR0MutexAcquire(pGMM);
    16721836    AssertRCReturn(rc2, rc2);
    16731837
     
    20792243    }
    20802244
    2081     rc = RTSemFastMutexRequest(pGMM->Mtx);
    2082     AssertRC(rc);
     2245    gmmR0MutexAcquire(pGMM);
    20832246    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    20842247    {
     
    21982361    else
    21992362        rc = VERR_INTERNAL_ERROR_5;
    2200     RTSemFastMutexRelease(pGMM->Mtx);
     2363    gmmR0MutexRelease(pGMM);
    22012364    LogFlow(("GMMR0AllocateHandyPages: returns %Rrc\n", rc));
    22022365    return rc;
     
    22582421    }
    22592422
    2260     rc = RTSemFastMutexRequest(pGMM->Mtx);
    2261     AssertRC(rc);
     2423    gmmR0MutexAcquire(pGMM);
    22622424    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    22632425    {
     
    22872449    else
    22882450        rc = VERR_INTERNAL_ERROR_5;
    2289     RTSemFastMutexRelease(pGMM->Mtx);
     2451    gmmR0MutexRelease(pGMM);
    22902452    LogFlow(("GMMR0AllocatePages: returns %Rrc\n", rc));
    22912453    return rc;
     
    23172479    return GMMR0AllocatePages(pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount);
    23182480}
     2481
    23192482
    23202483/**
     
    23602523    *pIdPage = NIL_GMM_PAGEID;
    23612524
    2362     rc = RTSemFastMutexRequest(pGMM->Mtx);
    2363     AssertRCReturn(rc, rc);
     2525    gmmR0MutexAcquire(pGMM);
    23642526    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    23652527    {
     
    23722534            Log(("GMMR0AllocateLargePage: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n",
    23732535                 pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, cPages));
    2374             RTSemFastMutexRelease(pGMM->Mtx);
     2536            gmmR0MutexRelease(pGMM);
    23752537            return VERR_GMM_HIT_VM_ACCOUNT_LIMIT;
    23762538        }
     
    23802542        if (RT_FAILURE(rc))
    23812543        {
    2382             RTSemFastMutexRelease(pGMM->Mtx);
     2544            gmmR0MutexRelease(pGMM);
    23832545            return rc;
    23842546        }
     
    24062568        rc = VERR_INTERNAL_ERROR_5;
    24072569
    2408     RTSemFastMutexRelease(pGMM->Mtx);
     2570    gmmR0MutexRelease(pGMM);
    24092571    LogFlow(("GMMR0AllocateLargePage: returns %Rrc\n", rc));
    24102572    return rc;
     
    24382600        return VERR_NOT_SUPPORTED;
    24392601
    2440     rc = RTSemFastMutexRequest(pGMM->Mtx);
    2441     AssertRC(rc);
     2602    gmmR0MutexAcquire(pGMM);
    24422603    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    24432604    {
     
    24472608        {
    24482609            Log(("GMMR0FreeLargePage: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages));
    2449             RTSemFastMutexRelease(pGMM->Mtx);
     2610            gmmR0MutexRelease(pGMM);
    24502611            return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH;
    24512612        }
    24522613
    24532614        PGMMPAGE pPage = gmmR0GetPage(pGMM, idPage);
    2454         if (    RT_LIKELY(pPage)
    2455             &&  RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage)))
    2456         {
    2457                 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
    2458                 Assert(pChunk);
    2459                 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
    2460                 Assert(pChunk->cPrivate > 0);
    2461 
    2462                 /* Release the memory immediately. */
    2463                 gmmR0FreeChunk(pGMM, NULL, pChunk);
    2464 
    2465                 /* Update accounting. */
    2466                 pGVM->gmm.s.Allocated.cBasePages -= cPages;
    2467                 pGVM->gmm.s.cPrivatePages        -= cPages;
    2468                 pGMM->cAllocatedPages            -= cPages;
     2615        if (RT_LIKELY(   pPage
     2616                      && GMM_PAGE_IS_PRIVATE(pPage)))
     2617        {
     2618            PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
     2619            Assert(pChunk);
     2620            Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES);
     2621            Assert(pChunk->cPrivate > 0);
     2622
     2623            /* Release the memory immediately. */
     2624            gmmR0FreeChunk(pGMM, NULL, pChunk);
     2625
     2626            /* Update accounting. */
     2627            pGVM->gmm.s.Allocated.cBasePages -= cPages;
     2628            pGVM->gmm.s.cPrivatePages        -= cPages;
     2629            pGMM->cAllocatedPages            -= cPages;
    24692630        }
    24702631        else
     
    24742635        rc = VERR_INTERNAL_ERROR_5;
    24752636
    2476     RTSemFastMutexRelease(pGMM->Mtx);
     2637    gmmR0MutexRelease(pGMM);
    24772638    LogFlow(("GMMR0FreeLargePage: returns %Rrc\n", rc));
    24782639    return rc;
     
    25022663}
    25032664
     2665
    25042666/**
    25052667 * Frees a chunk, giving it back to the host OS.
     
    25462708             */
    25472709            gmmR0UnlinkChunk(pChunk);
     2710
     2711            RTListNodeRemove(&pChunk->ListNode);
    25482712
    25492713            PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key);
     
    25702734
    25712735            RTMemFree(pChunk);
     2736
     2737            pGMM->cFreedChunks++;
    25722738        }
    25732739        else
     
    26582824
    26592825#ifdef VBOX_WITH_PAGE_SHARING
     2826
    26602827/**
    26612828 * Converts a private page to a shared page, the page is known to exist and be valid and such.
     
    26882855}
    26892856
     2857
    26902858/**
    26912859 * Increase the use count of a shared page, the page is known to exist and be valid and such.
     
    27062874    pGVM->gmm.s.Allocated.cBasePages++;
    27072875}
    2708 #endif
     2876
     2877#endif /* VBOX_WITH_PAGE_SHARING */
    27092878
    27102879/**
     
    27272896    gmmR0FreePageWorker(pGMM, pChunk, idPage, pPage);
    27282897}
     2898
    27292899
    27302900/**
     
    28082978                Assert(pPage->Shared.cRefs);
    28092979                if (!--pPage->Shared.cRefs)
    2810                 {
    28112980                    gmmR0FreeSharedPage(pGMM, idPage, pPage);
    2812                 }
    28132981                else
    28142982                {
     
    28943062     * Take the semaphore and call the worker function.
    28953063     */
    2896     rc = RTSemFastMutexRequest(pGMM->Mtx);
    2897     AssertRC(rc);
     3064    gmmR0MutexAcquire(pGMM);
    28983065    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    28993066    {
     
    29033070    else
    29043071        rc = VERR_INTERNAL_ERROR_5;
    2905     RTSemFastMutexRelease(pGMM->Mtx);
     3072    gmmR0MutexRelease(pGMM);
    29063073    LogFlow(("GMMR0FreePages: returns %Rrc\n", rc));
    29073074    return rc;
     
    29783145     * Take the semaphore and do some more validations.
    29793146     */
    2980     rc = RTSemFastMutexRequest(pGMM->Mtx);
    2981     AssertRC(rc);
     3147    gmmR0MutexAcquire(pGMM);
    29823148    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    29833149    {
     
    30713237        rc = VERR_INTERNAL_ERROR_5;
    30723238
    3073     RTSemFastMutexRelease(pGMM->Mtx);
     3239    gmmR0MutexRelease(pGMM);
    30743240    LogFlow(("GMMR0BalloonedPages: returns %Rrc\n", rc));
    30753241    return rc;
     
    31643330     * Take the semaphore and do some more validations.
    31653331     */
    3166     rc = RTSemFastMutexRequest(pGMM->Mtx);
    3167     AssertRC(rc);
     3332    gmmR0MutexAcquire(pGMM);
    31683333    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    31693334    {
     
    31763341        rc = VERR_INTERNAL_ERROR_5;
    31773342
    3178     RTSemFastMutexRelease(pGMM->Mtx);
     3343    gmmR0MutexRelease(pGMM);
    31793344    LogFlow(("GMMR3QueryVMMemoryStats: returns %Rrc\n", rc));
    31803345    return rc;
    31813346}
     3347
    31823348
    31833349/**
     
    31963362         * Find the mapping and try unmapping it.
    31973363         */
    3198         for (uint32_t i = 0; i < pChunk->cMappings; i++)
     3364        uint32_t cMappings = pChunk->cMappings;
     3365        for (uint32_t i = 0; i < cMappings; i++)
    31993366        {
    32003367            Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
     
    32063373                {
    32073374                    /* update the record. */
    3208                     pChunk->cMappings--;
    3209                     if (i < pChunk->cMappings)
    3210                         pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings];
    3211                     pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ;
    3212                     pChunk->paMappings[pChunk->cMappings].pGVM = NULL;
     3375                    cMappings--;
     3376                    if (i < cMappings)
     3377                        pChunk->paMappings[i] = pChunk->paMappings[cMappings];
     3378                    pChunk->paMappings[cMappings].MapObj = NIL_RTR0MEMOBJ;
     3379                    pChunk->paMappings[cMappings].pGVM   = NULL;
     3380                    Assert(pChunk->cMappings - 1U == cMappings);
     3381                    pChunk->cMappings = cMappings;
    32133382                }
    32143383                return rc;
     
    32373406static int gmmR0MapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
    32383407{
     3408    Assert(pGMM->hMtxOwner == RTThreadNativeSelf());
     3409
    32393410    /*
    32403411     * If we're in legacy mode this is simple.
     
    32793450    {
    32803451        /* reallocate the array? assumes few users per chunk (usually one). */
    3281         if (   pChunk->cMappings <= 3
    3282             || (pChunk->cMappings & 3) == 0)
    3283         {
    3284             unsigned cNewSize = pChunk->cMappings <= 3
    3285                               ? pChunk->cMappings + 1
    3286                               : pChunk->cMappings + 4;
     3452        unsigned iMapping = pChunk->cMappings;
     3453        if (   iMapping <= 3
     3454            || (iMapping & 3) == 0)
     3455        {
     3456            unsigned cNewSize = iMapping <= 3
     3457                              ? iMapping + 1
     3458                              : iMapping + 4;
    32873459            Assert(cNewSize < 4 || RT_ALIGN_32(cNewSize, 4) == cNewSize);
     3460            if (RT_UNLIKELY(cNewSize > UINT16_MAX))
     3461            {
     3462                rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */); AssertRC(rc);
     3463                return VERR_GMM_TOO_MANY_CHUNK_MAPPINGS;
     3464            }
     3465
    32883466            void *pvMappings = RTMemRealloc(pChunk->paMappings, cNewSize * sizeof(pChunk->paMappings[0]));
    32893467            if (RT_UNLIKELY(!pvMappings))
    32903468            {
    3291                 rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */);
    3292                 AssertRC(rc);
     3469                rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */); AssertRC(rc);
    32933470                return VERR_NO_MEMORY;
    32943471            }
     
    32973474
    32983475        /* insert new entry */
    3299         pChunk->paMappings[pChunk->cMappings].MapObj = MapObj;
    3300         pChunk->paMappings[pChunk->cMappings].pGVM   = pGVM;
    3301         pChunk->cMappings++;
     3476        pChunk->paMappings[iMapping].MapObj = MapObj;
     3477        pChunk->paMappings[iMapping].pGVM   = pGVM;
     3478        Assert(pChunk->cMappings == iMapping);
     3479        pChunk->cMappings = iMapping + 1;
    33023480
    33033481        *ppvR3 = RTR0MemObjAddressR3(MapObj);
     
    33873565     * it it's limits, so, no problem here.
    33883566     */
    3389     rc = RTSemFastMutexRequest(pGMM->Mtx);
    3390     AssertRC(rc);
     3567    gmmR0MutexAcquire(pGMM);
    33913568    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    33923569    {
     
    34243601    else
    34253602        rc = VERR_INTERNAL_ERROR_5;
    3426     RTSemFastMutexRelease(pGMM->Mtx);
     3603    gmmR0MutexRelease(pGMM);
    34273604
    34283605    LogFlow(("GMMR0MapUnmapChunk: returns %Rrc\n", rc));
     
    34913668    {
    34923669        /* Grab the lock. */
    3493         rc = RTSemFastMutexRequest(pGMM->Mtx);
    3494         AssertRC(rc);
     3670        rc = gmmR0MutexAcquire(pGMM);
    34953671        if (RT_SUCCESS(rc))
    34963672        {
     
    34993675             */
    35003676            rc = gmmR0RegisterChunk(pGMM, &pGMM->Private, MemObj, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS);
    3501             RTSemFastMutexRelease(pGMM->Mtx);
     3677            gmmR0MutexRelease(pGMM);
    35023678        }
    35033679
     
    35723748     * Take the semaphore and do some more validations.
    35733749     */
    3574     rc = RTSemFastMutexRequest(pGMM->Mtx);
    3575     AssertRC(rc);
     3750    gmmR0MutexAcquire(pGMM);
    35763751    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    35773752    {
     
    37133888
    37143889end:
    3715     RTSemFastMutexRelease(pGMM->Mtx);
     3890    gmmR0MutexRelease(pGMM);
    37163891    return rc;
    37173892#else
     
    37723947     * Take the semaphore and do some more validations.
    37733948     */
    3774     rc = RTSemFastMutexRequest(pGMM->Mtx);
    3775     AssertRC(rc);
     3949    gmmR0MutexAcquire(pGMM);
    37763950    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    37773951    {
     
    38294003        rc = VERR_INTERNAL_ERROR_5;
    38304004
    3831     RTSemFastMutexRelease(pGMM->Mtx);
     4005    gmmR0MutexRelease(pGMM);
    38324006    return rc;
    38334007#else
     
    40274201}
    40284202
    4029 /**
    4030  * RTAvlU32Destroy callback.
    4031  *
    4032  * @returns 0
     4203
     4204/**
     4205 * RTAvlGCPtrDestroy callback.
     4206 *
     4207 * @returns 0 or VERR_INTERNAL_ERROR.
    40334208 * @param   pNode   The node to destroy.
    40344209 * @param   pvGVM   The GVM handle.
     
    40364211static DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM)
    40374212{
    4038     PGVM pGVM = (PGVM)pvGVM;
    4039     PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)pNode;
    4040     PGMM pGMM;
    4041     GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
     4213    PGVM                    pGVM   = (PGVM)pvGVM;
     4214    PGMMSHAREDMODULEPERVM   pRecVM = (PGMMSHAREDMODULEPERVM)pNode;
    40424215
    40434216    Assert(pRecVM->pGlobalModule || pRecVM->fCollision);
     
    40454218    {
    40464219        PGMMSHAREDMODULE pRec = pRecVM->pGlobalModule;
    4047         Assert(pRec);
     4220        AssertPtr(pRec);
    40484221        Assert(pRec->cUsers);
    40494222
     
    40524225        if (pRec->cUsers == 0)
    40534226        {
    4054             for (unsigned i = 0; i < pRec->cRegions; i++)
     4227            for (uint32_t i = 0; i < pRec->cRegions; i++)
    40554228                if (pRec->aRegions[i].paHCPhysPageID)
    40564229                    RTMemFree(pRec->aRegions[i].paHCPhysPageID);
    40574230
    40584231            /* Remove from the tree and free memory. */
     4232            PGMM pGMM;
     4233            GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
    40594234            RTAvlGCPtrRemove(&pGMM->pGlobalSharedModuleTree, pRec->Core.Key);
    40604235            RTMemFree(pRec);
     
    40634238    RTMemFree(pRecVM);
    40644239    return 0;
     4240}
     4241
     4242
     4243/**
     4244 * Used by GMMR0CleanupVM to clean up shared modules.
     4245 *
     4246 * This is called without taking the GMM lock so that it can be yielded as
     4247 * needed here.
     4248 *
     4249 * @param   pGMM                The GMM handle.
     4250 * @param   pGVM                The global VM handle.
     4251 */
     4252static void gmmR0SharedModuleCleanup(PGMM pGMM, PGVM pGVM)
     4253{
     4254    gmmR0MutexAcquire(pGMM);
     4255    GMM_CHECK_SANITY_UPON_ENTERING(pGMM);
     4256
     4257    RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);
     4258
     4259    gmmR0MutexRelease(pGMM);
    40654260}
    40664261
     
    40904285     * Take the semaphore and do some more validations.
    40914286     */
    4092     rc = RTSemFastMutexRequest(pGMM->Mtx);
    4093     AssertRC(rc);
     4287    gmmR0MutexAcquire(pGMM);
    40944288    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    40954289    {
     
    41034297        rc = VERR_INTERNAL_ERROR_5;
    41044298
    4105     RTSemFastMutexRelease(pGMM->Mtx);
     4299    gmmR0MutexRelease(pGMM);
    41064300    return rc;
    41074301#else
     
    41594353     * Take the semaphore and do some more validations.
    41604354     */
    4161     int rc = RTSemFastMutexRequest(pGMM->Mtx);
    4162     AssertRC(rc);
     4355    gmmR0MutexAcquire(pGMM);
    41634356    if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    41644357        rc = VERR_INTERNAL_ERROR_5;
     
    41834376    GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR);
    41844377
    4185     RTSemFastMutexRelease(pGMM->Mtx);
     4378    gmmR0MutexRelease(pGMM);
    41864379    return VINF_SUCCESS;
    41874380}
     
    42134406     * Take the semaphore and do some more validations.
    42144407     */
    4215     rc = RTSemFastMutexRequest(pGMM->Mtx);
    4216     AssertRC(rc);
     4408    gmmR0MutexAcquire(pGMM);
    42174409# endif
    42184410    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
     
    42374429
    42384430# ifndef DEBUG_sandervl
    4239     RTSemFastMutexRelease(pGMM->Mtx);
     4431    gmmR0MutexRelease(pGMM);
    42404432# endif
    42414433    return rc;
     
    43264518     * Take the semaphore and do some more validations.
    43274519     */
    4328     int rc = RTSemFastMutexRequest(pGMM->Mtx);
    4329     AssertRC(rc);
     4520    int rc = gmmR0MutexAcquire(pGMM);
    43304521    if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM))
    43314522    {
     
    43724563
    43734564end:
    4374     RTSemFastMutexRelease(pGMM->Mtx);
     4565    gmmR0MutexRelease(pGMM);
    43754566    return rc;
    43764567}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette