VirtualBox

Changeset 37192 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
May 24, 2011 12:06:38 PM (14 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
71888
Message:

GMMR0: Use chunk level locking to avoid having to hold the giant GMM lock when doing mapping, umapping and free operations.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/GMMR0.cpp

    r37178 r37192  
    364364{
    365365    /** The mapping object. */
    366     RTR0MEMOBJ          MapObj;
     366    RTR0MEMOBJ          hMapObj;
    367367    /** The VM owning the mapping. */
    368368    PGVM                pGVM;
     
    386386{
    387387    /** The AVL node core.
    388      * The Key is the chunk ID. */
     388     * The Key is the chunk ID.  (Giant mtx.) */
    389389    AVLU32NODECORE      Core;
    390390    /** The memory object.
    391391     * Either from RTR0MemObjAllocPhysNC or RTR0MemObjLockUser depending on
    392      * what the host can dish up with. */
    393     RTR0MEMOBJ          MemObj;
    394     /** Pointer to the next chunk in the free list. */
     392     * what the host can dish up with.  (Chunk mtx protects mapping accesses
     393     * and related frees.) */
     394    RTR0MEMOBJ          hMemObj;
     395    /** Pointer to the next chunk in the free list.  (Giant mtx.) */
    395396    PGMMCHUNK           pFreeNext;
    396     /** Pointer to the previous chunk in the free list. */
     397    /** Pointer to the previous chunk in the free list. (Giant mtx.) */
    397398    PGMMCHUNK           pFreePrev;
    398399    /** Pointer to the free set this chunk belongs to.  NULL for
    399      * chunks with no free pages. */
     400     * chunks with no free pages. (Giant mtx.) */
    400401    PGMMCHUNKFREESET    pSet;
    401     /** List node in the chunk list (GMM::ChunkList).  */
     402    /** List node in the chunk list (GMM::ChunkList).  (Giant mtx.) */
    402403    RTLISTNODE          ListNode;
    403     /** Pointer to an array of mappings. */
    404     PGMMCHUNKMAP        paMappings;
    405     /** The number of mappings. */
    406     uint16_t            cMappings;
    407     /** The number of mapping operations that is in progress without owning
    408      * the semaphore. */
    409     uint16_t volatile   cMappingsInProgress;
    410     /** The head of the list of free pages. UINT16_MAX is the NIL value. */
     404    /** Pointer to an array of mappings.  (Chunk mtx.) */
     405    PGMMCHUNKMAP        paMappingsX;
     406    /** The number of mappings.  (Chunk mtx.) */
     407    uint16_t            cMappingsX;
     408    /** The mapping lock this chunk is using using.  UINT16_MAX if nobody is
     409     *  mapping or freeing anything.  (Giant mtx.) */
     410    uint8_t volatile    iMemLock;
     411    /** Flags field reserved for future use (like eliminating enmType).
     412     *  (Giant mtx.) */
     413    uint8_t             fFlags;
     414    /** The head of the list of free pages. UINT16_MAX is the NIL value.
     415     *  (Giant mtx.) */
    411416    uint16_t            iFreeHead;
    412     /** The number of free pages. */
     417    /** The number of free pages.  (Giant mtx.) */
    413418    uint16_t            cFree;
    414419    /** The GVM handle of the VM that first allocated pages from this chunk, this
    415420     * is used as a preference when there are several chunks to choose from.
    416      * When in bound memory mode this isn't a preference any longer. */
     421     * When in bound memory mode this isn't a preference any longer.  (Giant
     422     * mtx.) */
    417423    uint16_t            hGVM;
    418     /** The ID of the NUMA node the memory mostly resides on. (Reserved for
    419      *  future use.) */
     424    /** The ID of the NUMA node the memory mostly resides on.  (Reserved for
     425     *  future use.)  (Giant mtx.) */
    420426    uint16_t            idNumaNode;
    421     /** The number of private pages. */
     427    /** The number of private pages.  (Giant mtx.) */
    422428    uint16_t            cPrivate;
    423     /** The number of shared pages. */
     429    /** The number of shared pages.  (Giant mtx.) */
    424430    uint16_t            cShared;
    425     /** Chunk type */
     431    /** Chunk type.  (Giant mtx.) */
    426432    GMMCHUNKTYPE        enmType;
    427     /** The pages. */
     433    /** The pages.  (Giant mtx.) */
    428434    GMMPAGE             aPages[GMM_CHUNK_SIZE >> PAGE_SHIFT];
    429435} GMMCHUNK;
     
    567573     * The NIL id (0) is marked allocated. */
    568574    uint32_t            bmChunkId[(GMM_CHUNKID_LAST + 1 + 31) / 32];
     575
     576    /** The index of the next mutex to use. */
     577    uint32_t            iNextChunkMtx;
     578    /** Chunk locks for reducing lock contention without having to allocate
     579     * one lock per chunk. */
     580    RTSEMFASTMUTEX      ahChunkMtx[64];
    569581} GMM;
    570582/** Pointer to the GMM instance. */
     
    573585/** The value of GMM::u32Magic (Katsuhiro Otomo). */
    574586#define GMM_MAGIC       UINT32_C(0x19540414)
     587
     588
     589/**
     590 * GMM chunk mutex state.
     591 *
     592 * This is returned by gmmR0ChunkMutexAcquire and is used by the other
     593 * gmmR0ChunkMutex* methods.
     594 */
     595typedef struct GMMR0CHUNKMTXSTATE
     596{
     597    PGMM                pGMM;
     598    /** The index of the chunk mutex. */
     599    uint8_t             iChunkMtx;
     600    /** The relevant flags (GMMR0CHUNK_MTX_XXX). */
     601    uint8_t             fFlags;
     602} GMMR0CHUNKMTXSTATE;
     603/** Pointer to a chunk mutex state. */
     604typedef GMMR0CHUNKMTXSTATE *PGMMR0CHUNKMTXSTATE;
     605
     606/** @name GMMR0CHUNK_MTX_XXX
     607 * @{ */
     608#define GMMR0CHUNK_MTX_INVALID          UINT32_C(0)
     609#define GMMR0CHUNK_MTX_KEEP_GIANT       UINT32_C(1)
     610#define GMMR0CHUNK_MTX_RETAKE_GIANT     UINT32_C(2)
     611#define GMMR0CHUNK_MTX_DROP_GIANT       UINT32_C(3)
     612#define GMMR0CHUNK_MTX_END              UINT32_C(4)
     613/** @} */
    575614
    576615
     
    655694*******************************************************************************/
    656695static DECLCALLBACK(int)     gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM);
    657 static bool                  gmmR0CleanupVMScanChunk(PGVM pGVM, PGMMCHUNK pChunk);
    658 /*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM);
     696static bool                  gmmR0CleanupVMScanChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
    659697DECLINLINE(void)             gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet);
    660698DECLINLINE(void)             gmmR0UnlinkChunk(PGMMCHUNK pChunk);
     
    662700static void                  gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
    663701static void                  gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage);
    664 static int                   gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
     702static int                   gmmR0UnmapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);
    665703static void                  gmmR0SharedModuleCleanup(PGMM pGMM, PGVM pGVM);
    666704
     
    698736        if (RT_SUCCESS(rc))
    699737        {
    700             /*
    701              * Check and see if RTR0MemObjAllocPhysNC works.
    702              */
    703 #if 0 /* later, see #3170. */
    704             RTR0MEMOBJ MemObj;
    705             rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
     738            unsigned iMtx;
     739            for (iMtx = 0; iMtx < RT_ELEMENTS(pGMM->ahChunkMtx); iMtx++)
     740            {
     741                rc = RTSemFastMutexCreate(&pGMM->ahChunkMtx[iMtx]);
     742                if (RT_FAILURE(rc))
     743                    break;
     744            }
    706745            if (RT_SUCCESS(rc))
    707746            {
    708                 rc = RTR0MemObjFree(MemObj, true);
    709                 AssertRC(rc);
    710             }
    711             else if (rc == VERR_NOT_SUPPORTED)
    712                 pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
    713             else
    714                 SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
     747                /*
     748                 * Check and see if RTR0MemObjAllocPhysNC works.
     749                 */
     750#if 0 /* later, see #3170. */
     751                RTR0MEMOBJ MemObj;
     752                rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);
     753                if (RT_SUCCESS(rc))
     754                {
     755                    rc = RTR0MemObjFree(MemObj, true);
     756                    AssertRC(rc);
     757                }
     758                else if (rc == VERR_NOT_SUPPORTED)
     759                    pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;
     760                else
     761                    SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);
    715762#else
    716763# if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
    717             pGMM->fLegacyAllocationMode = false;
     764                pGMM->fLegacyAllocationMode = false;
    718765#  if ARCH_BITS == 32
    719             /* Don't reuse possibly partial chunks because of the virtual address space limitation. */
    720             pGMM->fBoundMemoryMode = true;
     766                /* Don't reuse possibly partial chunks because of the virtual
     767                   address space limitation. */
     768                pGMM->fBoundMemoryMode = true;
    721769#  else
    722             pGMM->fBoundMemoryMode = false;
     770                pGMM->fBoundMemoryMode = false;
    723771#  endif
    724772# else
    725             pGMM->fLegacyAllocationMode = true;
    726             pGMM->fBoundMemoryMode = true;
     773                pGMM->fLegacyAllocationMode = true;
     774                pGMM->fBoundMemoryMode = true;
    727775# endif
    728776#endif
    729777
     778                /*
     779                 * Query system page count and guess a reasonable cMaxPages value.
     780                 */
     781                pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
     782
     783                g_pGMM = pGMM;
     784                LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
     785                return VINF_SUCCESS;
     786            }
     787
    730788            /*
    731              * Query system page count and guess a reasonable cMaxPages value.
     789             * Bail out.
    732790             */
    733             pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */
    734 
    735             g_pGMM = pGMM;
    736             LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));
    737             return VINF_SUCCESS;
    738         }
    739 
     791            while (iMtx-- > 0)
     792                RTSemFastMutexDestroy(pGMM->ahChunkMtx[iMtx]);
     793        }
    740794        RTSemFastMutexDestroy(pGMM->hMtx);
    741795    }
     
    778832    pGMM->hMtxCleanup = NIL_RTSEMFASTMUTEX;
    779833
    780     /* free any chunks still hanging around. */
     834    /* Free any chunks still hanging around. */
    781835    RTAvlU32Destroy(&pGMM->pChunks, gmmR0TermDestroyChunk, pGMM);
    782836
    783     /* finally the instance data itself. */
     837    /* Destroy the chunk locks. */
     838    for (unsigned iMtx = 0; iMtx++ < RT_ELEMENTS(pGMM->ahChunkMtx); iMtx++)
     839    {
     840        RTSemFastMutexDestroy(pGMM->ahChunkMtx[iMtx]);
     841        pGMM->ahChunkMtx[iMtx] = NIL_RTSEMFASTMUTEX;
     842    }
     843
     844    /* Finally the instance data itself. */
    784845    RTMemFree(pGMM);
    785846    LogFlow(("GMMTerm: done\n"));
     
    800861    if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT))
    801862        SUPR0Printf("GMMR0Term: %p/%#x: cFree=%d cPrivate=%d cShared=%d cMappings=%d\n", pChunk,
    802                     pChunk->Core.Key, pChunk->cFree, pChunk->cPrivate, pChunk->cShared, pChunk->cMappings);
    803     Assert(pChunk->cMappingsInProgress == 0);
    804 
    805     int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
     863                    pChunk->Core.Key, pChunk->cFree, pChunk->cPrivate, pChunk->cShared, pChunk->cMappingsX);
     864
     865    int rc = RTR0MemObjFree(pChunk->hMemObj, true /* fFreeMappings */);
    806866    if (RT_FAILURE(rc))
    807867    {
    808868        SUPR0Printf("GMMR0Term: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
    809                     pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
     869                    pChunk->Core.Key, pChunk->hMemObj, rc, pChunk->cMappingsX);
    810870        AssertRC(rc);
    811871    }
    812     pChunk->MemObj = NIL_RTR0MEMOBJ;
    813 
    814     RTMemFree(pChunk->paMappings);
    815     pChunk->paMappings = NULL;
     872    pChunk->hMemObj = NIL_RTR0MEMOBJ;
     873
     874    RTMemFree(pChunk->paMappingsX);
     875    pChunk->paMappingsX = NULL;
    816876
    817877    RTMemFree(pChunk);
     
    920980
    921981    return true;
     982}
     983
     984
     985/**
     986 * Acquires a chunk lock.
     987 *
     988 * The caller must own the giant lock.
     989 *
     990 * @returns Assert status code from RTSemFastMutexRequest.
     991 * @param   pMtxState   The chunk mutex state info.  (Avoids
     992 *                      passing the same flags and stuff around
     993 *                      for subsequent release and drop-giant
     994 *                      calls.)
     995 * @param   pGMM        Pointer to the GMM instance.
     996 * @param   pChunk      Pointer to the chunk.
     997 * @param   fFlags      Flags regarding the giant lock, GMMR0CHUNK_MTX_XXX.
     998 */
     999static int gmmR0ChunkMutexAcquire(PGMMR0CHUNKMTXSTATE pMtxState, PGMM pGMM, PGMMCHUNK pChunk, uint32_t fFlags)
     1000{
     1001    Assert(fFlags > GMMR0CHUNK_MTX_INVALID && fFlags < GMMR0CHUNK_MTX_END);
     1002    pMtxState->pGMM   = pGMM;
     1003    pMtxState->fFlags = (uint8_t)fFlags;
     1004
     1005    /*
     1006     * Get the lock index.
     1007     */
     1008    Assert(pGMM->hMtxOwner == RTThreadNativeSelf());
     1009    uint32_t iChunkMtx = pChunk->iMemLock;
     1010    if (iChunkMtx == UINT8_MAX)
     1011    {
     1012        /** @todo skip mutexes that are currently owned.  */
     1013        iChunkMtx = pGMM->iNextChunkMtx++;
     1014        iChunkMtx %= RT_ELEMENTS(pGMM->ahChunkMtx);
     1015        pChunk->iMemLock = iChunkMtx;
     1016    }
     1017    AssertCompile(RT_ELEMENTS(pGMM->ahChunkMtx) < UINT8_MAX);
     1018    pMtxState->iChunkMtx = (uint8_t)iChunkMtx;
     1019
     1020    /*
     1021     * Drop the giant?
     1022     */
     1023    if (fFlags != GMMR0CHUNK_MTX_KEEP_GIANT)
     1024    {
     1025        /** @todo GMM life cycle cleanup (we may race someone
     1026         *        destroying and cleaning up GMM)? */
     1027        gmmR0MutexRelease(pGMM);
     1028    }
     1029
     1030    /*
     1031     * Take the chunk mutex.
     1032     */
     1033    int rc = RTSemFastMutexRequest(pGMM->ahChunkMtx[iChunkMtx]);
     1034    AssertRC(rc);
     1035    return rc;
     1036}
     1037
     1038
     1039/**
     1040 * Releases the GMM giant lock.
     1041 *
     1042 * @returns Assert status code from RTSemFastMutexRequest.
     1043 * @param   pGMM        Pointer to the GMM instance.
     1044 * @param   pChunk      Pointer to the chunk if it's still
     1045 *                      alive, NULL if it isn't.  This is
     1046 *                      inteded for later optimizations where we
     1047 *                      will deassociate the chunk mutex if
     1048 *                      considered safe.
     1049 */
     1050static int gmmR0ChunkMutexRelease(PGMMR0CHUNKMTXSTATE pMtxState, PGMMCHUNK pChunk)
     1051{
     1052    int rc = RTSemFastMutexRelease(pMtxState->pGMM->ahChunkMtx[pMtxState->iChunkMtx]);
     1053    AssertRC(rc);
     1054    if (pMtxState->fFlags == GMMR0CHUNK_MTX_RETAKE_GIANT)
     1055        rc = gmmR0MutexAcquire(pMtxState->pGMM);
     1056    pMtxState->pGMM = NULL;
     1057    return rc;
     1058}
     1059
     1060
     1061/**
     1062 * Drops the giant GMM lock we kept in gmmR0ChunkMutexAcquire while keeping the
     1063 * chunk locked.
     1064 *
     1065 * This only works if gmmR0ChunkMutexAcquire was called with
     1066 * GMMR0CHUNK_MTX_KEEP_GIANT.  Release will NOT retake the giant
     1067 * when dropped this way, the behavior will be like if
     1068 * GMMR0CHUNK_MTX_DROP_GIANT was used.
     1069 *
     1070 * @returns VBox status code (assuming success is ok).
     1071 * @param   pMtxState   Pointer to the chunk mutex state.
     1072 */
     1073static int gmmR0ChunkMutexDropGiant(PGMMR0CHUNKMTXSTATE pMtxState)
     1074{
     1075    AssertReturn(pMtxState->fFlags == GMMR0CHUNK_MTX_KEEP_GIANT, VERR_INTERNAL_ERROR_2);
     1076    Assert(pMtxState->pGMM->hMtxOwner == RTThreadNativeSelf());
     1077    pMtxState->fFlags = GMMR0CHUNK_MTX_DROP_GIANT;
     1078    /** @todo GMM life cycle cleanup (we may race someone
     1079     *        destroying and cleaning up GMM)? */
     1080    return gmmR0MutexRelease(pMtxState->pGMM);
    9221081}
    9231082
     
    9631122        Assert(pGMM->cRegisteredVMs);
    9641123        pGMM->cRegisteredVMs--;
    965 #if 0 /* disabled so it won't hide bugs. */
    966         if (!pGMM->cRegisteredVMs)
    967         {
    968             RTAvlU32Destroy(&pGMM->pChunks, gmmR0CleanupVMDestroyChunk, pGMM);
    969 
    970             for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++)
     1124
     1125        /*
     1126         * Walk the entire pool looking for pages that belong to this VM
     1127         * and left over mappings.  (This'll only catch private pages,
     1128         * shared pages will be 'left behind'.)
     1129         */
     1130        uint64_t    cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */
     1131
     1132        unsigned    iCountDown = 64;
     1133        bool        fRedoFromStart;
     1134        PGMMCHUNK   pChunk;
     1135        do
     1136        {
     1137            fRedoFromStart = false;
     1138            RTListForEachReverse(&pGMM->ChunkList, pChunk, GMMCHUNK, ListNode)
    9711139            {
    972                 pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID;
    973                 pGMM->ChunkTLB.aEntries[i].pChunk = NULL;
    974             }
    975 
    976             memset(&pGMM->Private, 0, sizeof(pGMM->Private));
    977             memset(&pGMM->Shared, 0, sizeof(pGMM->Shared));
    978 
    979             memset(&pGMM->bmChunkId[0], 0, sizeof(pGMM->bmChunkId));
    980             ASMBitSet(&pGMM->bmChunkId[0], NIL_GMM_CHUNKID);
    981 
    982             pGMM->cReservedPages = 0;
    983             pGMM->cOverCommittedPages = 0;
    984             pGMM->cAllocatedPages = 0;
    985             pGMM->cSharedPages = 0;
    986             pGMM->cDuplicatePages = 0;
    987             pGMM->cLeftBehindSharedPages = 0;
    988             pGMM->cChunks = 0;
    989             pGMM->cBalloonedPages = 0;
    990         }
    991         else
    992 #endif
    993         {
    994             /*
    995              * Walk the entire pool looking for pages that belong to this VM
    996              * and left over mappings.  (This'll only catch private pages,
    997              * shared pages will be 'left behind'.)
    998              */
    999             uint64_t    cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */
    1000 
    1001             unsigned    iCountDown = 64;
    1002             bool        fRedoFromStart;
    1003             PGMMCHUNK   pChunk;
    1004             do
    1005             {
    1006                 fRedoFromStart = false;
    1007                 RTListForEachReverse(&pGMM->ChunkList, pChunk, GMMCHUNK, ListNode)
     1140                uint32_t const cFreeChunksOld = pGMM->cFreedChunks;
     1141                if (gmmR0CleanupVMScanChunk(pGMM, pGVM, pChunk))
    10081142                {
    1009                     if (   !gmmR0CleanupVMScanChunk(pGVM, pChunk)
    1010                         || iCountDown != 0)
     1143                    gmmR0MutexAcquire(pGMM);
     1144                    uLockNanoTS = RTTimeSystemNanoTS();
     1145                }
     1146                else
     1147                {
     1148                    if (!iCountDown)
     1149                        gmmR0MutexYield(pGMM, &uLockNanoTS);
     1150                    else
    10111151                        iCountDown--;
    1012                     else
    1013                     {
    1014                         iCountDown = 64;
    1015                         uint32_t const cFreeChunksOld = pGMM->cFreedChunks;
    1016                         fRedoFromStart = gmmR0MutexYield(pGMM, &uLockNanoTS)
    1017                                       && pGMM->cFreedChunks != cFreeChunksOld;
    1018                         if (fRedoFromStart)
    1019                             break;
    1020                     }
    10211152                }
    1022             } while (fRedoFromStart);
    1023 
    1024             if (pGVM->gmm.s.cPrivatePages)
    1025                 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages);
    1026 
    1027             pGMM->cAllocatedPages -= cPrivatePages;
    1028 
    1029             /*
    1030              * Free empty chunks.
    1031              */
    1032             do
    1033             {
    1034                 iCountDown = 10240;
    1035                 pChunk = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1];
    1036                 while (pChunk)
    1037                 {
    1038                     PGMMCHUNK pNext = pChunk->pFreeNext;
    1039                     if (    pChunk->cFree == GMM_CHUNK_NUM_PAGES
    1040                         &&  (   !pGMM->fBoundMemoryMode
    1041                              || pChunk->hGVM == pGVM->hSelf))
    1042                     {
    1043                         gmmR0FreeChunk(pGMM, pGVM, pChunk);
    1044                         iCountDown = 1;
    1045                     }
    1046                     pChunk = pNext;
    1047 
    1048                     if (--iCountDown == 0)
    1049                     {
    1050                         uint64_t const idGenerationOld = pGMM->Private.idGeneration;
    1051                         fRedoFromStart = gmmR0MutexYield(pGMM, &uLockNanoTS)
    1052                                       && pGMM->Private.idGeneration != idGenerationOld;
    1053                         if (fRedoFromStart)
    1054                             break;
    1055                         iCountDown = 10240;
    1056                     }
    1057                 }
    1058             } while (fRedoFromStart);
    1059 
    1060             /*
    1061              * Account for shared pages that weren't freed.
    1062              */
    1063             if (pGVM->gmm.s.cSharedPages)
    1064             {
    1065                 Assert(pGMM->cSharedPages >= pGVM->gmm.s.cSharedPages);
    1066                 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages);
    1067                 pGMM->cLeftBehindSharedPages += pGVM->gmm.s.cSharedPages;
    1068             }
    1069 
    1070             /*
    1071              * Clean up balloon statistics in case the VM process crashed.
    1072              */
    1073             Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages);
    1074             pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;
    1075 
    1076             /*
    1077              * Update the over-commitment management statistics.
    1078              */
    1079             pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
    1080                                   + pGVM->gmm.s.Reserved.cFixedPages
    1081                                   + pGVM->gmm.s.Reserved.cShadowPages;
    1082             switch (pGVM->gmm.s.enmPolicy)
    1083             {
    1084                 case GMMOCPOLICY_NO_OC:
    1085                     break;
    1086                 default:
    1087                     /** @todo Update GMM->cOverCommittedPages */
     1153                if (pGMM->cFreedChunks != cFreeChunksOld)
    10881154                    break;
    10891155            }
     1156        } while (fRedoFromStart);
     1157
     1158        if (pGVM->gmm.s.cPrivatePages)
     1159            SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages);
     1160
     1161        pGMM->cAllocatedPages -= cPrivatePages;
     1162
     1163        /*
     1164         * Free empty chunks.
     1165         */
     1166        do
     1167        {
     1168            iCountDown = 10240;
     1169            pChunk = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1];
     1170            while (pChunk)
     1171            {
     1172                PGMMCHUNK pNext = pChunk->pFreeNext;
     1173                if (    pChunk->cFree == GMM_CHUNK_NUM_PAGES
     1174                    &&  (   !pGMM->fBoundMemoryMode
     1175                         || pChunk->hGVM == pGVM->hSelf))
     1176                {
     1177                    gmmR0FreeChunk(pGMM, pGVM, pChunk);
     1178                    iCountDown = 1;
     1179                }
     1180                pChunk = pNext;
     1181
     1182                if (--iCountDown == 0)
     1183                {
     1184                    uint64_t const idGenerationOld = pGMM->Private.idGeneration;
     1185                    fRedoFromStart = gmmR0MutexYield(pGMM, &uLockNanoTS)
     1186                                  && pGMM->Private.idGeneration != idGenerationOld;
     1187                    if (fRedoFromStart)
     1188                        break;
     1189                    iCountDown = 10240;
     1190                }
     1191            }
     1192        } while (fRedoFromStart);
     1193
     1194        /*
     1195         * Account for shared pages that weren't freed.
     1196         */
     1197        if (pGVM->gmm.s.cSharedPages)
     1198        {
     1199            Assert(pGMM->cSharedPages >= pGVM->gmm.s.cSharedPages);
     1200            SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages);
     1201            pGMM->cLeftBehindSharedPages += pGVM->gmm.s.cSharedPages;
     1202        }
     1203
     1204        /*
     1205         * Clean up balloon statistics in case the VM process crashed.
     1206         */
     1207        Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages);
     1208        pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages;
     1209
     1210        /*
     1211         * Update the over-commitment management statistics.
     1212         */
     1213        pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages
     1214                              + pGVM->gmm.s.Reserved.cFixedPages
     1215                              + pGVM->gmm.s.Reserved.cShadowPages;
     1216        switch (pGVM->gmm.s.enmPolicy)
     1217        {
     1218            case GMMOCPOLICY_NO_OC:
     1219                break;
     1220            default:
     1221                /** @todo Update GMM->cOverCommittedPages */
     1222                break;
    10901223        }
    10911224    }
     
    11071240 * Scan one chunk for private pages belonging to the specified VM.
    11081241 *
    1109  * @returns @c true if a mapping was found (and freed), @c false if not.
     1242 * @note    This function is ugly since may drop the ownership of the giant GMM
     1243 *          mutex!
     1244 *
     1245 * @returns @c true if we've dropped the giant mutex, @c false if we didn't.
     1246 * @param   pGMM        Pointer to the GMM instance.
    11101247 * @param   pGVM        The global VM handle.
    11111248 * @param   pChunk      The chunk to scan.
    11121249 */
    1113 static bool gmmR0CleanupVMScanChunk(PGVM pGVM, PGMMCHUNK pChunk)
     1250static bool gmmR0CleanupVMScanChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
    11141251{
    11151252    /*
     
    11761313
    11771314    /*
     1315     * If not in bound memory mode, we should reset the hGVM field
     1316     * if it has our handle in it.
     1317     */
     1318    if (pChunk->hGVM == pGVM->hSelf)
     1319    {
     1320        if (!g_pGMM->fBoundMemoryMode)
     1321            pChunk->hGVM = NIL_GVM_HANDLE;
     1322        else if (pChunk->cFree != GMM_CHUNK_NUM_PAGES)
     1323        {
     1324            SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: cFree=%#x - it should be 0 in bound mode!\n",
     1325                        pChunk, pChunk->Core.Key, pChunk->cFree);
     1326            AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in bound mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree));
     1327
     1328            gmmR0UnlinkChunk(pChunk);
     1329            pChunk->cFree = GMM_CHUNK_NUM_PAGES;
     1330            gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
     1331        }
     1332    }
     1333
     1334    /*
    11781335     * Look for a mapping belonging to the terminating VM.
    11791336     */
    1180     unsigned cMappings = pChunk->cMappings;
    1181     bool fMappingFreed = true;
     1337    GMMR0CHUNKMTXSTATE MtxState;
     1338    gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, GMMR0CHUNK_MTX_KEEP_GIANT);
     1339    unsigned cMappings = pChunk->cMappingsX;
    11821340    for (unsigned i = 0; i < cMappings; i++)
    1183         if (pChunk->paMappings[i].pGVM == pGVM)
    1184         {
    1185             RTR0MEMOBJ MemObj = pChunk->paMappings[i].MapObj;
     1341        if (pChunk->paMappingsX[i].pGVM == pGVM)
     1342        {
     1343            gmmR0ChunkMutexDropGiant(&MtxState);
     1344
     1345            RTR0MEMOBJ hMemObj = pChunk->paMappingsX[i].hMapObj;
    11861346
    11871347            cMappings--;
    11881348            if (i < cMappings)
    1189                  pChunk->paMappings[i] = pChunk->paMappings[cMappings];
    1190             pChunk->paMappings[cMappings].pGVM   = NULL;
    1191             pChunk->paMappings[cMappings].MapObj = NIL_RTR0MEMOBJ;
    1192             Assert(pChunk->cMappings - 1U == cMappings);
    1193             pChunk->cMappings = cMappings;
    1194 
    1195 /** @todo Leave the GMM mutex when doing this, it's expensive. */
    1196             int rc = RTR0MemObjFree(MemObj, false /* fFreeMappings (NA) */);
     1349                 pChunk->paMappingsX[i] = pChunk->paMappingsX[cMappings];
     1350            pChunk->paMappingsX[cMappings].pGVM    = NULL;
     1351            pChunk->paMappingsX[cMappings].hMapObj = NIL_RTR0MEMOBJ;
     1352            Assert(pChunk->cMappingsX - 1U == cMappings);
     1353            pChunk->cMappingsX = cMappings;
     1354
     1355            int rc = RTR0MemObjFree(hMemObj, false /* fFreeMappings (NA) */);
    11971356            if (RT_FAILURE(rc))
    11981357            {
    11991358                SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n",
    1200                             pChunk, pChunk->Core.Key, i, MemObj, rc);
     1359                            pChunk, pChunk->Core.Key, i, hMemObj, rc);
    12011360                AssertRC(rc);
    12021361            }
    1203             fMappingFreed = true;
    1204             break;
    1205         }
    1206 
    1207     /*
    1208      * If not in bound memory mode, we should reset the hGVM field
    1209      * if it has our handle in it.
    1210      */
    1211     if (pChunk->hGVM == pGVM->hSelf)
    1212     {
    1213         if (!g_pGMM->fBoundMemoryMode)
    1214             pChunk->hGVM = NIL_GVM_HANDLE;
    1215         else if (pChunk->cFree != GMM_CHUNK_NUM_PAGES)
    1216         {
    1217             SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: cFree=%#x - it should be 0 in bound mode!\n",
    1218                         pChunk, pChunk->Core.Key, pChunk->cFree);
    1219             AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in bound mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree));
    1220 
    1221             gmmR0UnlinkChunk(pChunk);
    1222             pChunk->cFree = GMM_CHUNK_NUM_PAGES;
    1223             gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private);
    1224         }
    1225     }
    1226 
    1227     return fMappingFreed;
    1228 }
    1229 
    1230 
    1231 /**
    1232  * RTAvlU32Destroy callback for GMMR0CleanupVM.
    1233  *
    1234  * @returns 0
    1235  * @param   pNode   The node (allocation chunk) to destroy.
    1236  * @param   pvGVM   Pointer to the shared VM structure.
    1237  */
    1238 /*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM)
    1239 {
    1240     PGMMCHUNK pChunk = (PGMMCHUNK)pNode;
    1241     PGVM pGVM = (PGVM)pvGVM;
    1242 
    1243     for (unsigned i = 0; i < pChunk->cMappings; i++)
    1244     {
    1245         if (pChunk->paMappings[i].pGVM != pGVM)
    1246             SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: pGVM=%p expected %p\n", pChunk,
    1247                         pChunk->Core.Key, i, pChunk->paMappings[i].pGVM, pGVM);
    1248         int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
    1249         if (RT_FAILURE(rc))
    1250         {
    1251             SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n", pChunk,
    1252                         pChunk->Core.Key, i, pChunk->paMappings[i].MapObj, rc);
    1253             AssertRC(rc);
    1254         }
    1255     }
    1256 
    1257     int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */);
    1258     if (RT_FAILURE(rc))
    1259     {
    1260         SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk,
    1261                     pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings);
    1262         AssertRC(rc);
    1263     }
    1264     pChunk->MemObj = NIL_RTR0MEMOBJ;
    1265 
    1266     RTMemFree(pChunk->paMappings);
    1267     pChunk->paMappings = NULL;
    1268 
    1269     RTMemFree(pChunk);
    1270     return 0;
     1362            gmmR0ChunkMutexRelease(&MtxState, pChunk);
     1363            return true;
     1364        }
     1365
     1366    gmmR0ChunkMutexRelease(&MtxState, pChunk);
     1367    return false;
    12711368}
    12721369
     
    16061703    PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
    16071704    if (RT_LIKELY(pChunk))
    1608         return RTR0MemObjGetPagePhysAddr(pChunk->MemObj, idPage & GMM_PAGEID_IDX_MASK);
     1705        return RTR0MemObjGetPagePhysAddr(pChunk->hMemObj, idPage & GMM_PAGEID_IDX_MASK);
    16091706    return NIL_RTHCPHYS;
    16101707}
     
    17641861         * Initialize it.
    17651862         */
    1766         pChunk->MemObj = MemObj;
    1767         pChunk->cFree = GMM_CHUNK_NUM_PAGES;
    1768         pChunk->hGVM = hGVM;
     1863        pChunk->hMemObj    = MemObj;
     1864        pChunk->cFree       = GMM_CHUNK_NUM_PAGES;
     1865        pChunk->hGVM        = hGVM;
    17691866        /*pChunk->iFreeHead = 0;*/
    1770         pChunk->idNumaNode = GMM_CHUNK_NUMA_ID_UNKNOWN;
    1771         pChunk->enmType = enmChunkType;
     1867        pChunk->idNumaNode  = GMM_CHUNK_NUMA_ID_UNKNOWN;
     1868        pChunk->enmType     = enmChunkType;
    17721869        for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++)
    17731870        {
     
    17761873        }
    17771874        pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.u2State = GMM_PAGE_STATE_FREE;
    1778         pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX;
     1875        pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext   = UINT16_MAX;
    17791876
    17801877        /*
     
    19902087
    19912088    /* update the page descriptor. */
    1992     pPageDesc->HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(pChunk->MemObj, iPage);
     2089    pPageDesc->HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(pChunk->hMemObj, iPage);
    19932090    Assert(pPageDesc->HCPhysGCPhys != NIL_RTHCPHYS);
    19942091    pPageDesc->idPage = (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage;
     
    26882785    Assert(pChunk->Core.Key != NIL_GMM_CHUNKID);
    26892786
     2787    GMMR0CHUNKMTXSTATE MtxState;
     2788    gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, GMMR0CHUNK_MTX_KEEP_GIANT);
     2789
    26902790    /*
    26912791     * Cleanup hack! Unmap the chunk from the callers address space.
    26922792     */
    2693     if (    pChunk->cMappings
     2793    if (    pChunk->cMappingsX
     2794        &&  !pGMM->fLegacyAllocationMode
    26942795        &&  pGVM)
    2695         gmmR0UnmapChunk(pGMM, pGVM, pChunk);
     2796        gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk);
    26962797
    26972798    /*
     
    27002801     * it won't be a likely candidate for allocations.
    27012802     */
    2702     if (   pChunk->cMappings
    2703         || pChunk->cMappingsInProgress)
     2803    if (pChunk->cMappingsX)
    27042804    {
    27052805        /** @todo R0 -> VM request */
    27062806        /* The chunk can be mapped by more than one VM if fBoundMemoryMode is false! */
    2707         Log(("gmmR0FreeChunk: chunk still has %d/%d mappings; don't free!\n", pChunk->cMappings, pChunk->cMappingsInProgress));
     2807        Log(("gmmR0FreeChunk: chunk still has %d/%d mappings; don't free!\n", pChunk->cMappingsX));
    27082808    }
    27092809    else
     
    27122812         * Try free the memory object.
    27132813         */
    2714         int rc = RTR0MemObjFree(pChunk->MemObj, false /* fFreeMappings */);
     2814/** @todo drop the giant lock here! */
     2815        int rc = RTR0MemObjFree(pChunk->hMemObj, false /* fFreeMappings */);
    27152816        if (RT_SUCCESS(rc))
    27162817        {
    2717             pChunk->MemObj = NIL_RTR0MEMOBJ;
     2818            pChunk->hMemObj = NIL_RTR0MEMOBJ;
    27182819
    27192820            /*
     
    27432844            pChunk->Core.Key = NIL_GMM_CHUNKID;
    27442845
    2745             RTMemFree(pChunk->paMappings);
    2746             pChunk->paMappings = NULL;
     2846            RTMemFree(pChunk->paMappingsX);
     2847            pChunk->paMappingsX = NULL;
    27472848
    27482849            RTMemFree(pChunk);
     2850            pChunk = NULL;              /* (for gmmR0ChunkMutexRelease) */
    27492851
    27502852            pGMM->cFreedChunks++;
     
    27532855            AssertRC(rc);
    27542856    }
     2857
     2858    gmmR0ChunkMutexRelease(&MtxState, pChunk);
    27552859}
    27562860
     
    28062910        if (RT_UNLIKELY(   pChunk->cFree == GMM_CHUNK_NUM_PAGES
    28072911                        && pChunk->pFreeNext
    2808                         && pChunk->pFreePrev
     2912                        && pChunk->pFreePrev /** @todo this is probably misfiring, see reset... */
    28092913                        && !pGMM->fLegacyAllocationMode))
    28102914            gmmR0FreeChunk(pGMM, NULL, pChunk);
     
    33613465
    33623466/**
    3363  * Unmaps a chunk previously mapped into the address space of the current process.
     3467 * Worker for gmmR0UnmapChunk and gmmr0FreeChunk.
     3468 *
     3469 * Don't call this in legacy allocation mode!
    33643470 *
    33653471 * @returns VBox status code.
     
    33683474 * @param   pChunk      Pointer to the chunk to be unmapped.
    33693475 */
    3370 static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
    3371 {
    3372     if (!pGMM->fLegacyAllocationMode)
    3373     {
    3374         /*
    3375          * Find the mapping and try unmapping it.
    3376          */
    3377         uint32_t cMappings = pChunk->cMappings;
    3378         for (uint32_t i = 0; i < cMappings; i++)
    3379         {
    3380             Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
    3381             if (pChunk->paMappings[i].pGVM == pGVM)
     3476static int gmmR0UnmapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk)
     3477{
     3478    Assert(!pGMM->fLegacyAllocationMode);
     3479
     3480    /*
     3481     * Find the mapping and try unmapping it.
     3482     */
     3483    uint32_t cMappings = pChunk->cMappingsX;
     3484    for (uint32_t i = 0; i < cMappings; i++)
     3485    {
     3486        Assert(pChunk->paMappingsX[i].pGVM && pChunk->paMappingsX[i].hMapObj != NIL_RTR0MEMOBJ);
     3487        if (pChunk->paMappingsX[i].pGVM == pGVM)
     3488        {
     3489            /* unmap */
     3490            int rc = RTR0MemObjFree(pChunk->paMappingsX[i].hMapObj, false /* fFreeMappings (NA) */);
     3491            if (RT_SUCCESS(rc))
    33823492            {
    3383                 /* unmap */
    3384                 int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */);
    3385                 if (RT_SUCCESS(rc))
    3386                 {
    3387                     /* update the record. */
    3388                     cMappings--;
    3389                     if (i < cMappings)
    3390                         pChunk->paMappings[i] = pChunk->paMappings[cMappings];
    3391                     pChunk->paMappings[cMappings].MapObj = NIL_RTR0MEMOBJ;
    3392                     pChunk->paMappings[cMappings].pGVM   = NULL;
    3393                     Assert(pChunk->cMappings - 1U == cMappings);
    3394                     pChunk->cMappings = cMappings;
    3395                 }
    3396                 return rc;
     3493                /* update the record. */
     3494                cMappings--;
     3495                if (i < cMappings)
     3496                    pChunk->paMappingsX[i] = pChunk->paMappingsX[cMappings];
     3497                pChunk->paMappingsX[cMappings].hMapObj = NIL_RTR0MEMOBJ;
     3498                pChunk->paMappingsX[cMappings].pGVM    = NULL;
     3499                Assert(pChunk->cMappingsX - 1U == cMappings);
     3500                pChunk->cMappingsX = cMappings;
    33973501            }
    3398         }
    3399     }
    3400     else if (pChunk->hGVM == pGVM->hSelf)
    3401         return VINF_SUCCESS;
     3502
     3503            return rc;
     3504        }
     3505    }
    34023506
    34033507    Log(("gmmR0UnmapChunk: Chunk %#x is not mapped into pGVM=%p/%#x\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
    34043508    return VERR_GMM_CHUNK_NOT_MAPPED;
     3509}
     3510
     3511
     3512/**
     3513 * Unmaps a chunk previously mapped into the address space of the current process.
     3514 *
     3515 * @returns VBox status code.
     3516 * @param   pGMM        Pointer to the GMM instance data.
     3517 * @param   pGVM        Pointer to the Global VM structure.
     3518 * @param   pChunk      Pointer to the chunk to be unmapped.
     3519 */
     3520static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem)
     3521{
     3522    if (!pGMM->fLegacyAllocationMode)
     3523    {
     3524        /*
     3525         * Lock the chunk and if possible leave the giant GMM lock.
     3526         */
     3527        GMMR0CHUNKMTXSTATE MtxState;
     3528        int rc = gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk,
     3529                                        fRelaxedSem ? GMMR0CHUNK_MTX_RETAKE_GIANT : GMMR0CHUNK_MTX_KEEP_GIANT);
     3530        if (RT_SUCCESS(rc))
     3531        {
     3532            rc = gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk);
     3533            gmmR0ChunkMutexRelease(&MtxState, pChunk);
     3534        }
     3535        return rc;
     3536    }
     3537
     3538    if (pChunk->hGVM == pGVM->hSelf)
     3539        return VINF_SUCCESS;
     3540
     3541    Log(("gmmR0UnmapChunk: Chunk %#x is not mapped into pGVM=%p/%#x (legacy)\n", pChunk->Core.Key, pGVM, pGVM->hSelf));
     3542    return VERR_GMM_CHUNK_NOT_MAPPED;
     3543}
     3544
     3545
     3546/**
     3547 * Worker for gmmR0MapChunk.
     3548 *
     3549 * @returns VBox status code.
     3550 * @param   pGMM        Pointer to the GMM instance data.
     3551 * @param   pGVM        Pointer to the Global VM structure.
     3552 * @param   pChunk      Pointer to the chunk to be mapped.
     3553 * @param   ppvR3       Where to store the ring-3 address of the mapping.
     3554 *                      In the VERR_GMM_CHUNK_ALREADY_MAPPED case, this will be
     3555 *                      contain the address of the existing mapping.
     3556 */
     3557static int gmmR0MapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
     3558{
     3559    /*
     3560     * If we're in legacy mode this is simple.
     3561     */
     3562    if (pGMM->fLegacyAllocationMode)
     3563    {
     3564        if (pChunk->hGVM != pGVM->hSelf)
     3565        {
     3566            Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
     3567            return VERR_GMM_CHUNK_NOT_FOUND;
     3568        }
     3569
     3570        *ppvR3 = RTR0MemObjAddressR3(pChunk->hMemObj);
     3571        return VINF_SUCCESS;
     3572    }
     3573
     3574    /*
     3575     * Check to see if the chunk is already mapped.
     3576     */
     3577    for (uint32_t i = 0; i < pChunk->cMappingsX; i++)
     3578    {
     3579        Assert(pChunk->paMappingsX[i].pGVM && pChunk->paMappingsX[i].hMapObj != NIL_RTR0MEMOBJ);
     3580        if (pChunk->paMappingsX[i].pGVM == pGVM)
     3581        {
     3582            *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappingsX[i].hMapObj);
     3583            Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
     3584#ifdef VBOX_WITH_PAGE_SHARING
     3585            /* The ring-3 chunk cache can be out of sync; don't fail. */
     3586            return VINF_SUCCESS;
     3587#else
     3588            return VERR_GMM_CHUNK_ALREADY_MAPPED;
     3589#endif
     3590        }
     3591    }
     3592
     3593    /*
     3594     * Do the mapping.
     3595     */
     3596    RTR0MEMOBJ hMapObj;
     3597    int rc = RTR0MemObjMapUser(&hMapObj, pChunk->hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
     3598    if (RT_SUCCESS(rc))
     3599    {
     3600        /* reallocate the array? assumes few users per chunk (usually one). */
     3601        unsigned iMapping = pChunk->cMappingsX;
     3602        if (   iMapping <= 3
     3603            || (iMapping & 3) == 0)
     3604        {
     3605            unsigned cNewSize = iMapping <= 3
     3606                              ? iMapping + 1
     3607                              : iMapping + 4;
     3608            Assert(cNewSize < 4 || RT_ALIGN_32(cNewSize, 4) == cNewSize);
     3609            if (RT_UNLIKELY(cNewSize > UINT16_MAX))
     3610            {
     3611                rc = RTR0MemObjFree(hMapObj, false /* fFreeMappings (NA) */); AssertRC(rc);
     3612                return VERR_GMM_TOO_MANY_CHUNK_MAPPINGS;
     3613            }
     3614
     3615            void *pvMappings = RTMemRealloc(pChunk->paMappingsX, cNewSize * sizeof(pChunk->paMappingsX[0]));
     3616            if (RT_UNLIKELY(!pvMappings))
     3617            {
     3618                rc = RTR0MemObjFree(hMapObj, false /* fFreeMappings (NA) */); AssertRC(rc);
     3619                return VERR_NO_MEMORY;
     3620            }
     3621            pChunk->paMappingsX = (PGMMCHUNKMAP)pvMappings;
     3622        }
     3623
     3624        /* insert new entry */
     3625        pChunk->paMappingsX[iMapping].hMapObj = hMapObj;
     3626        pChunk->paMappingsX[iMapping].pGVM    = pGVM;
     3627        Assert(pChunk->cMappingsX == iMapping);
     3628        pChunk->cMappingsX = iMapping + 1;
     3629
     3630        *ppvR3 = RTR0MemObjAddressR3(hMapObj);
     3631    }
     3632
     3633    return rc;
    34053634}
    34063635
     
    34213650static int gmmR0MapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem, PRTR3PTR ppvR3)
    34223651{
    3423     Assert(pGMM->hMtxOwner == RTThreadNativeSelf());
    3424 
    3425     /*
    3426      * If we're in legacy mode this is simple.
    3427      */
    3428     if (pGMM->fLegacyAllocationMode)
    3429     {
    3430         if (pChunk->hGVM != pGVM->hSelf)
    3431         {
    3432             Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
    3433             return VERR_GMM_CHUNK_NOT_FOUND;
    3434         }
    3435 
    3436         *ppvR3 = RTR0MemObjAddressR3(pChunk->MemObj);
    3437         return VINF_SUCCESS;
    3438     }
    3439 
    3440     /*
    3441      * Check to see if the chunk is already mapped.
    3442      */
    3443     for (uint32_t i = 0; i < pChunk->cMappings; i++)
    3444     {
    3445         Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
    3446         if (pChunk->paMappings[i].pGVM == pGVM)
    3447         {
    3448             *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj);
    3449             Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3));
    3450 #ifdef VBOX_WITH_PAGE_SHARING
    3451             /* The ring-3 chunk cache can be out of sync; don't fail. */
    3452             return VINF_SUCCESS;
    3453 #else
    3454             return VERR_GMM_CHUNK_ALREADY_MAPPED;
    3455 #endif
    3456         }
    3457     }
    3458 
    3459     /*
    3460      * Do the mapping.  Leave the semaphore when possible since mapping memory
    3461      * into the user process can be very expensive.
    3462      *
    3463      * ASSUMES that all mappers will hold the PGM lock and therefore prevent
    3464      * other threads from mapping the memory into the same process.
    3465      */
    3466     RTR0MEMOBJ MapObj;
    3467     int rc;
    3468     if (   !fRelaxedSem
    3469         || pChunk->cFree == GMM_CHUNK_NUM_PAGES)
    3470         rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
    3471     else
    3472     {
    3473         pChunk->cMappingsInProgress++;
    3474         gmmR0MutexRelease(pGMM);
    3475         Assert(PGMIsLockOwner(pGVM->pVM));
    3476 
    3477         rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
    3478 
    3479         int rc2 = gmmR0MutexAcquire(pGMM); AssertRC(rc2);
    3480         pChunk->cMappingsInProgress--;
    3481     }
     3652    /*
     3653     * Take the chunk lock and leave the giant GMM lock when possible, then
     3654     * call the worker function.
     3655     */
     3656    GMMR0CHUNKMTXSTATE MtxState;
     3657    int rc = gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk,
     3658                                    fRelaxedSem ? GMMR0CHUNK_MTX_RETAKE_GIANT : GMMR0CHUNK_MTX_KEEP_GIANT);
    34823659    if (RT_SUCCESS(rc))
    34833660    {
    3484         /* reallocate the array? assumes few users per chunk (usually one). */
    3485         unsigned iMapping = pChunk->cMappings;
    3486         if (   iMapping <= 3
    3487             || (iMapping & 3) == 0)
    3488         {
    3489             unsigned cNewSize = iMapping <= 3
    3490                               ? iMapping + 1
    3491                               : iMapping + 4;
    3492             Assert(cNewSize < 4 || RT_ALIGN_32(cNewSize, 4) == cNewSize);
    3493             if (RT_UNLIKELY(cNewSize > UINT16_MAX))
    3494             {
    3495                 rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */); AssertRC(rc);
    3496                 return VERR_GMM_TOO_MANY_CHUNK_MAPPINGS;
    3497             }
    3498 
    3499             void *pvMappings = RTMemRealloc(pChunk->paMappings, cNewSize * sizeof(pChunk->paMappings[0]));
    3500             if (RT_UNLIKELY(!pvMappings))
    3501             {
    3502                 rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */); AssertRC(rc);
    3503                 return VERR_NO_MEMORY;
    3504             }
    3505             pChunk->paMappings = (PGMMCHUNKMAP)pvMappings;
    3506         }
    3507 
    3508         /* insert new entry */
    3509         pChunk->paMappings[iMapping].MapObj = MapObj;
    3510         pChunk->paMappings[iMapping].pGVM   = pGVM;
    3511         Assert(pChunk->cMappings == iMapping);
    3512         pChunk->cMappings = iMapping + 1;
    3513 
    3514         *ppvR3 = RTR0MemObjAddressR3(MapObj);
     3661        rc = gmmR0MapChunkLocked(pGMM, pGVM, pChunk, ppvR3);
     3662        gmmR0ChunkMutexRelease(&MtxState, pChunk);
    35153663    }
    35163664
     
    35193667
    35203668
     3669
    35213670/**
    35223671 * Check if a chunk is mapped into the specified VM
    35233672 *
    35243673 * @returns mapped yes/no
     3674 * @param   pGMM        Pointer to the GMM instance.
    35253675 * @param   pGVM        Pointer to the Global VM structure.
    35263676 * @param   pChunk      Pointer to the chunk to be mapped.
    35273677 * @param   ppvR3       Where to store the ring-3 address of the mapping.
    35283678 */
    3529 static int gmmR0IsChunkMapped(PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
    3530 {
    3531     /*
    3532      * Check to see if the chunk is already mapped.
    3533      */
    3534     for (uint32_t i = 0; i < pChunk->cMappings; i++)
    3535     {
    3536         Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);
    3537         if (pChunk->paMappings[i].pGVM == pGVM)
    3538         {
    3539             *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj);
     3679static int gmmR0IsChunkMapped(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)
     3680{
     3681    GMMR0CHUNKMTXSTATE MtxState;
     3682    gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, GMMR0CHUNK_MTX_KEEP_GIANT);
     3683    for (uint32_t i = 0; i < pChunk->cMappingsX; i++)
     3684    {
     3685        Assert(pChunk->paMappingsX[i].pGVM && pChunk->paMappingsX[i].hMapObj != NIL_RTR0MEMOBJ);
     3686        if (pChunk->paMappingsX[i].pGVM == pGVM)
     3687        {
     3688            *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappingsX[i].hMapObj);
     3689            gmmR0ChunkMutexRelease(&MtxState, pChunk);
    35403690            return true;
    35413691        }
    35423692    }
    35433693    *ppvR3 = NULL;
     3694    gmmR0ChunkMutexRelease(&MtxState, pChunk);
    35443695    return false;
    35453696}
     
    36133764            }
    36143765        }
     3766/** @todo split this operation, the bail out might (theoretcially) not be
     3767 *        entirely safe. */
    36153768
    36163769        if (    idChunkUnmap != NIL_GMM_CHUNKID
     
    36193772            PGMMCHUNK pUnmap = gmmR0GetChunk(pGMM, idChunkUnmap);
    36203773            if (RT_LIKELY(pUnmap))
    3621                 rc = gmmR0UnmapChunk(pGMM, pGVM, pUnmap);
     3774                rc = gmmR0UnmapChunk(pGMM, pGVM, pUnmap, true /*fRelaxedSem*/);
    36223775            else
    36233776            {
     
    36273780
    36283781            if (RT_FAILURE(rc) && pMap)
    3629                 gmmR0UnmapChunk(pGMM, pGVM, pMap);
     3782                gmmR0UnmapChunk(pGMM, pGVM, pMap, false /*fRelaxedSem*/);
    36303783        }
    36313784
     
    41744327        if (pChunk)
    41754328        {
    4176             if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
     4329            if (!gmmR0IsChunkMapped(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk))
    41774330            {
    41784331                Log(("GMMR0SharedModuleCheckPage: Invalid idPage=%#x #3\n", pPageDesc->uHCPhysPageId));
     
    41964349
    41974350        /* Get the virtual address of the physical page; map the chunk into the VM process if not already done. */
    4198         if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
     4351        if (!gmmR0IsChunkMapped(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk))
    41994352        {
    42004353            Log(("Map chunk into process!\n"));
     
    44964649
    44974650    /* Only take chunks not mapped into this VM process; not entirely correct. */
    4498     if (!gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
     4651    if (!gmmR0IsChunkMapped(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk))
    44994652    {
    45004653        int rc = gmmR0MapChunk(pGMM, pGVM, pChunk, false /*fRelaxedSem*/, (PRTR3PTR)&pbChunk);
     
    45184671                }
    45194672            }
    4520             gmmR0UnmapChunk(pGMM, pGVM, pChunk);
     4673            gmmR0UnmapChunk(pGMM, pGVM, pChunk, false /*fRelaxedSem*/);
    45214674        }
    45224675    }
     
    45594712        if (pChunk)
    45604713        {
    4561             if (gmmR0IsChunkMapped(pGVM, pChunk, (PRTR3PTR)&pbChunk))
     4714            if (gmmR0IsChunkMapped(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk))
    45624715            {
    45634716                uint8_t *pbSourcePage = pbChunk + ((pReq->idPage & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette