VirtualBox

Changeset 82978 in vbox for trunk


Ignore:
Timestamp:
Feb 4, 2020 2:52:50 PM (5 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
135987
Message:

VMM/GMMR0: Introduce a spinlock to protect the AVL tree and associated TLB. bugref:9627

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/GMMR0.cpp

    r82977 r82978  
    177177#include <iprt/mp.h>
    178178#include <iprt/semaphore.h>
     179#include <iprt/spinlock.h>
    179180#include <iprt/string.h>
    180181#include <iprt/time.h>
     
    522523    RTNATIVETHREAD      hMtxOwner;
    523524#endif
    524     /** The chunk tree. */
     525    /** Spinlock protecting the AVL tree.
     526     * @todo Make this a read-write spinlock as we should allow concurrent
     527     *       lookups. */
     528    RTSPINLOCK          hSpinLockTree;
     529    /** The chunk tree.
     530     * Protected by hSpinLockTree. */
    525531    PAVLU32NODECORE     pChunks;
    526     /** The chunk TLB. */
     532    /** The chunk TLB.
     533     * Protected by hSpinLockTree. */
    527534    GMMCHUNKTLB         ChunkTLB;
    528535    /** The private free set. */
     
    810817                break;
    811818        }
     819        pGMM->hSpinLockTree = NIL_RTSPINLOCK;
     820        if (RT_SUCCESS(rc))
     821            rc = RTSpinlockCreate(&pGMM->hSpinLockTree, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "gmm-chunk-tree");
    812822        if (RT_SUCCESS(rc))
    813823        {
     
    884894         * Bail out.
    885895         */
     896        RTSpinlockDestroy(pGMM->hSpinLockTree);
    886897        while (iMtx-- > 0)
    887898            RTSemFastMutexDestroy(pGMM->aChunkMtx[iMtx].hMtx);
     
    931942    pGMM->hMtx        = NIL_RTSEMFASTMUTEX;
    932943#endif
     944    RTSpinlockDestroy(pGMM->hSpinLockTree);
     945    pGMM->hSpinLockTree = NIL_RTSPINLOCK;
    933946
    934947    /* Free any chunks still hanging around. */
     
    18451858 * @param   idChunk     The ID of the chunk to find.
    18461859 * @param   pTlbe       Pointer to the TLB entry.
     1860 *
     1861 * @note    Caller owns spinlock.
    18471862 */
    18481863static PGMMCHUNK gmmR0GetChunkSlow(PGMM pGMM, uint32_t idChunk, PGMMCHUNKTLBE pTlbe)
     
    18571872
    18581873/**
    1859  * Finds a allocation chunk.
     1874 * Finds a allocation chunk, spin-locked.
    18601875 *
    18611876 * This is not expected to fail and will bitch if it does.
     
    18651880 * @param   idChunk     The ID of the chunk to find.
    18661881 */
     1882DECLINLINE(PGMMCHUNK) gmmR0GetChunkLocked(PGMM pGMM, uint32_t idChunk)
     1883{
     1884    /*
     1885     * Do a TLB lookup, branch if not in the TLB.
     1886     */
     1887    PGMMCHUNKTLBE pTlbe  = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(idChunk)];
     1888    PGMMCHUNK     pChunk = pTlbe->pChunk;
     1889    if (   pChunk == NULL
     1890        || pTlbe->idChunk != idChunk)
     1891        pChunk = gmmR0GetChunkSlow(pGMM, idChunk, pTlbe);
     1892    return pChunk;
     1893}
     1894
     1895
     1896/**
     1897 * Finds a allocation chunk.
     1898 *
     1899 * This is not expected to fail and will bitch if it does.
     1900 *
     1901 * @returns Pointer to the allocation chunk, NULL if not found.
     1902 * @param   pGMM        Pointer to the GMM instance.
     1903 * @param   idChunk     The ID of the chunk to find.
     1904 */
    18671905DECLINLINE(PGMMCHUNK) gmmR0GetChunk(PGMM pGMM, uint32_t idChunk)
    18681906{
    1869     /*
    1870      * Do a TLB lookup, branch if not in the TLB.
    1871      */
    1872     PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(idChunk)];
    1873     if (    pTlbe->idChunk != idChunk
    1874         ||  !pTlbe->pChunk)
    1875         return gmmR0GetChunkSlow(pGMM, idChunk, pTlbe);
    1876     return pTlbe->pChunk;
     1907    RTSpinlockAcquire(pGMM->hSpinLockTree);
     1908    PGMMCHUNK pChunk = gmmR0GetChunkLocked(pGMM, idChunk);
     1909    RTSpinlockRelease(pGMM->hSpinLockTree);
     1910    return pChunk;
    18771911}
    18781912
     
    22392273            {
    22402274                pChunk->Core.Key = gmmR0AllocateChunkId(pGMM);
    2241                 if (    pChunk->Core.Key != NIL_GMM_CHUNKID
    2242                     &&  pChunk->Core.Key <= GMM_CHUNKID_LAST
    2243                     &&  RTAvlU32Insert(&pGMM->pChunks, &pChunk->Core))
     2275                if (   pChunk->Core.Key != NIL_GMM_CHUNKID
     2276                    && pChunk->Core.Key <= GMM_CHUNKID_LAST)
    22442277                {
    2245                     pGMM->cChunks++;
    2246                     RTListAppend(&pGMM->ChunkList, &pChunk->ListNode);
    2247                     gmmR0LinkChunk(pChunk, pSet);
    2248                     LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
    2249 
    2250                     if (ppChunk)
    2251                         *ppChunk = pChunk;
    2252                     GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
    2253                     return VINF_SUCCESS;
     2278                    RTSpinlockAcquire(pGMM->hSpinLockTree);
     2279                    if (RTAvlU32Insert(&pGMM->pChunks, &pChunk->Core))
     2280                    {
     2281                        pGMM->cChunks++;
     2282                        RTListAppend(&pGMM->ChunkList, &pChunk->ListNode);
     2283                        RTSpinlockRelease(pGMM->hSpinLockTree);
     2284
     2285                        gmmR0LinkChunk(pChunk, pSet);
     2286
     2287                        LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks));
     2288
     2289                        if (ppChunk)
     2290                            *ppChunk = pChunk;
     2291                        GMM_CHECK_SANITY_UPON_LEAVING(pGMM);
     2292                        return VINF_SUCCESS;
     2293                    }
     2294                    RTSpinlockRelease(pGMM->hSpinLockTree);
    22542295                }
    22552296
     
    33583399    gmmR0UnlinkChunk(pChunk);
    33593400
     3401    RTSpinlockAcquire(pGMM->hSpinLockTree);
     3402
    33603403    RTListNodeRemove(&pChunk->ListNode);
    33613404
     
    33723415    Assert(pGMM->cChunks > 0);
    33733416    pGMM->cChunks--;
     3417
     3418    RTSpinlockRelease(pGMM->hSpinLockTree);
    33743419
    33753420    /*
     
    43984443 * Gets the ring-0 virtual address for the given page.
    43994444 *
     4445 * This is used by PGM when IEM and such wants to access guest RAM from ring-0.
     4446 * One of the ASSUMPTIONS here is that the @a idPage is used by the VM and the
     4447 * corresponding chunk will remain valid beyond the call (at least till the EMT
     4448 * returns to ring-3).
     4449 *
    44004450 * @returns VBox status code.
    44014451 * @param   pGVM        Pointer to the kernel-only VM instace data.
     
    44094459    PGMM pGMM;
    44104460    GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE);
    4411     gmmR0MutexAcquire(pGMM); /** @todo shared access */
     4461
     4462    RTSpinlockAcquire(pGMM->hSpinLockTree);
    44124463
    44134464    int rc;
    4414     PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT);
    4415     if (pChunk)
     4465    PGMMCHUNK pChunk = gmmR0GetChunkLocked(pGMM, idPage >> GMM_CHUNKID_SHIFT);
     4466    if (RT_LIKELY(pChunk))
    44164467    {
    44174468        const GMMPAGE *pPage = &pChunk->aPages[idPage & GMM_PAGEID_IDX_MASK];
     
    44304481        rc = VERR_GMM_PAGE_NOT_FOUND;
    44314482
    4432     gmmR0MutexRelease(pGMM);
     4483    RTSpinlockRelease(pGMM->hSpinLockTree);
    44334484    return rc;
    44344485}
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette