- Timestamp:
- Feb 4, 2020 2:52:50 PM (5 years ago)
- svn:sync-xref-src-repo-rev:
- 135987
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r82977 r82978 177 177 #include <iprt/mp.h> 178 178 #include <iprt/semaphore.h> 179 #include <iprt/spinlock.h> 179 180 #include <iprt/string.h> 180 181 #include <iprt/time.h> … … 522 523 RTNATIVETHREAD hMtxOwner; 523 524 #endif 524 /** The chunk tree. */ 525 /** Spinlock protecting the AVL tree. 526 * @todo Make this a read-write spinlock as we should allow concurrent 527 * lookups. */ 528 RTSPINLOCK hSpinLockTree; 529 /** The chunk tree. 530 * Protected by hSpinLockTree. */ 525 531 PAVLU32NODECORE pChunks; 526 /** The chunk TLB. */ 532 /** The chunk TLB. 533 * Protected by hSpinLockTree. */ 527 534 GMMCHUNKTLB ChunkTLB; 528 535 /** The private free set. */ … … 810 817 break; 811 818 } 819 pGMM->hSpinLockTree = NIL_RTSPINLOCK; 820 if (RT_SUCCESS(rc)) 821 rc = RTSpinlockCreate(&pGMM->hSpinLockTree, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "gmm-chunk-tree"); 812 822 if (RT_SUCCESS(rc)) 813 823 { … … 884 894 * Bail out. 885 895 */ 896 RTSpinlockDestroy(pGMM->hSpinLockTree); 886 897 while (iMtx-- > 0) 887 898 RTSemFastMutexDestroy(pGMM->aChunkMtx[iMtx].hMtx); … … 931 942 pGMM->hMtx = NIL_RTSEMFASTMUTEX; 932 943 #endif 944 RTSpinlockDestroy(pGMM->hSpinLockTree); 945 pGMM->hSpinLockTree = NIL_RTSPINLOCK; 933 946 934 947 /* Free any chunks still hanging around. */ … … 1845 1858 * @param idChunk The ID of the chunk to find. 1846 1859 * @param pTlbe Pointer to the TLB entry. 1860 * 1861 * @note Caller owns spinlock. 1847 1862 */ 1848 1863 static PGMMCHUNK gmmR0GetChunkSlow(PGMM pGMM, uint32_t idChunk, PGMMCHUNKTLBE pTlbe) … … 1857 1872 1858 1873 /** 1859 * Finds a allocation chunk .1874 * Finds a allocation chunk, spin-locked. 1860 1875 * 1861 1876 * This is not expected to fail and will bitch if it does. … … 1865 1880 * @param idChunk The ID of the chunk to find. 1866 1881 */ 1882 DECLINLINE(PGMMCHUNK) gmmR0GetChunkLocked(PGMM pGMM, uint32_t idChunk) 1883 { 1884 /* 1885 * Do a TLB lookup, branch if not in the TLB. 1886 */ 1887 PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(idChunk)]; 1888 PGMMCHUNK pChunk = pTlbe->pChunk; 1889 if ( pChunk == NULL 1890 || pTlbe->idChunk != idChunk) 1891 pChunk = gmmR0GetChunkSlow(pGMM, idChunk, pTlbe); 1892 return pChunk; 1893 } 1894 1895 1896 /** 1897 * Finds a allocation chunk. 1898 * 1899 * This is not expected to fail and will bitch if it does. 1900 * 1901 * @returns Pointer to the allocation chunk, NULL if not found. 1902 * @param pGMM Pointer to the GMM instance. 1903 * @param idChunk The ID of the chunk to find. 1904 */ 1867 1905 DECLINLINE(PGMMCHUNK) gmmR0GetChunk(PGMM pGMM, uint32_t idChunk) 1868 1906 { 1869 /* 1870 * Do a TLB lookup, branch if not in the TLB. 1871 */ 1872 PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(idChunk)]; 1873 if ( pTlbe->idChunk != idChunk 1874 || !pTlbe->pChunk) 1875 return gmmR0GetChunkSlow(pGMM, idChunk, pTlbe); 1876 return pTlbe->pChunk; 1907 RTSpinlockAcquire(pGMM->hSpinLockTree); 1908 PGMMCHUNK pChunk = gmmR0GetChunkLocked(pGMM, idChunk); 1909 RTSpinlockRelease(pGMM->hSpinLockTree); 1910 return pChunk; 1877 1911 } 1878 1912 … … 2239 2273 { 2240 2274 pChunk->Core.Key = gmmR0AllocateChunkId(pGMM); 2241 if ( pChunk->Core.Key != NIL_GMM_CHUNKID 2242 && pChunk->Core.Key <= GMM_CHUNKID_LAST 2243 && RTAvlU32Insert(&pGMM->pChunks, &pChunk->Core)) 2275 if ( pChunk->Core.Key != NIL_GMM_CHUNKID 2276 && pChunk->Core.Key <= GMM_CHUNKID_LAST) 2244 2277 { 2245 pGMM->cChunks++; 2246 RTListAppend(&pGMM->ChunkList, &pChunk->ListNode); 2247 gmmR0LinkChunk(pChunk, pSet); 2248 LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks)); 2249 2250 if (ppChunk) 2251 *ppChunk = pChunk; 2252 GMM_CHECK_SANITY_UPON_LEAVING(pGMM); 2253 return VINF_SUCCESS; 2278 RTSpinlockAcquire(pGMM->hSpinLockTree); 2279 if (RTAvlU32Insert(&pGMM->pChunks, &pChunk->Core)) 2280 { 2281 pGMM->cChunks++; 2282 RTListAppend(&pGMM->ChunkList, &pChunk->ListNode); 2283 RTSpinlockRelease(pGMM->hSpinLockTree); 2284 2285 gmmR0LinkChunk(pChunk, pSet); 2286 2287 LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks)); 2288 2289 if (ppChunk) 2290 *ppChunk = pChunk; 2291 GMM_CHECK_SANITY_UPON_LEAVING(pGMM); 2292 return VINF_SUCCESS; 2293 } 2294 RTSpinlockRelease(pGMM->hSpinLockTree); 2254 2295 } 2255 2296 … … 3358 3399 gmmR0UnlinkChunk(pChunk); 3359 3400 3401 RTSpinlockAcquire(pGMM->hSpinLockTree); 3402 3360 3403 RTListNodeRemove(&pChunk->ListNode); 3361 3404 … … 3372 3415 Assert(pGMM->cChunks > 0); 3373 3416 pGMM->cChunks--; 3417 3418 RTSpinlockRelease(pGMM->hSpinLockTree); 3374 3419 3375 3420 /* … … 4398 4443 * Gets the ring-0 virtual address for the given page. 4399 4444 * 4445 * This is used by PGM when IEM and such wants to access guest RAM from ring-0. 4446 * One of the ASSUMPTIONS here is that the @a idPage is used by the VM and the 4447 * corresponding chunk will remain valid beyond the call (at least till the EMT 4448 * returns to ring-3). 4449 * 4400 4450 * @returns VBox status code. 4401 4451 * @param pGVM Pointer to the kernel-only VM instace data. … … 4409 4459 PGMM pGMM; 4410 4460 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 4411 gmmR0MutexAcquire(pGMM); /** @todo shared access */ 4461 4462 RTSpinlockAcquire(pGMM->hSpinLockTree); 4412 4463 4413 4464 int rc; 4414 PGMMCHUNK pChunk = gmmR0GetChunk (pGMM, idPage >> GMM_CHUNKID_SHIFT);4415 if ( pChunk)4465 PGMMCHUNK pChunk = gmmR0GetChunkLocked(pGMM, idPage >> GMM_CHUNKID_SHIFT); 4466 if (RT_LIKELY(pChunk)) 4416 4467 { 4417 4468 const GMMPAGE *pPage = &pChunk->aPages[idPage & GMM_PAGEID_IDX_MASK]; … … 4430 4481 rc = VERR_GMM_PAGE_NOT_FOUND; 4431 4482 4432 gmmR0MutexRelease(pGMM);4483 RTSpinlockRelease(pGMM->hSpinLockTree); 4433 4484 return rc; 4434 4485 }
Note:
See TracChangeset
for help on using the changeset viewer.