- Timestamp:
- Nov 11, 2021 12:40:35 AM (3 years ago)
- svn:sync-xref-src-repo-rev:
- 148193
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r92341 r92342 545 545 * Used as a hint to avoid scanning the whole bitmap. */ 546 546 uint32_t idChunkPrev; 547 /** Spinlock protecting idChunkPrev & bmChunkId. */ 548 RTSPINLOCK hSpinLockChunkId; 547 549 /** Chunk ID allocation bitmap. 548 550 * Bits of allocated IDs are set, free ones are clear. … … 755 757 if (RT_SUCCESS(rc)) 756 758 rc = RTSpinlockCreate(&pGMM->hSpinLockTree, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "gmm-chunk-tree"); 759 pGMM->hSpinLockChunkId = NIL_RTSPINLOCK; 760 if (RT_SUCCESS(rc)) 761 rc = RTSpinlockCreate(&pGMM->hSpinLockChunkId, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "gmm-chunk-id"); 757 762 if (RT_SUCCESS(rc)) 758 763 { … … 800 805 * Bail out. 801 806 */ 807 RTSpinlockDestroy(pGMM->hSpinLockChunkId); 802 808 RTSpinlockDestroy(pGMM->hSpinLockTree); 803 809 while (iMtx-- > 0) … … 850 856 RTSpinlockDestroy(pGMM->hSpinLockTree); 851 857 pGMM->hSpinLockTree = NIL_RTSPINLOCK; 858 RTSpinlockDestroy(pGMM->hSpinLockChunkId); 859 pGMM->hSpinLockChunkId = NIL_RTSPINLOCK; 852 860 853 861 /* Free any chunks still hanging around. */ … … 1979 1987 { 1980 1988 AssertReturnVoid(idChunk != NIL_GMM_CHUNKID); 1989 RTSpinlockAcquire(pGMM->hSpinLockChunkId); /* We could probably skip the locking here, I think. */ 1990 1981 1991 AssertMsg(ASMBitTest(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk)); 1982 1992 ASMAtomicBitClear(&pGMM->bmChunkId[0], idChunk); 1993 1994 RTSpinlockRelease(pGMM->hSpinLockChunkId); 1983 1995 } 1984 1996 … … 1995 2007 AssertCompile(NIL_GMM_CHUNKID == 0); 1996 2008 2009 RTSpinlockAcquire(pGMM->hSpinLockChunkId); 2010 1997 2011 /* 1998 2012 * Try the next sequential one. 1999 2013 */ 2000 2014 int32_t idChunk = ++pGMM->idChunkPrev; 2001 if ( (uint32_t)idChunk <= GMM_CHUNKID_LAST 2002 && idChunk > NIL_GMM_CHUNKID 2003 && !ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk)) 2004 return idChunk; 2005 2006 /* 2007 * Scan sequentially from the last one. 2008 */ 2009 if ( (uint32_t)idChunk < GMM_CHUNKID_LAST 2010 && idChunk > NIL_GMM_CHUNKID) 2011 { 2012 idChunk = ASMBitNextClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1, idChunk - 1); 2013 if (idChunk > NIL_GMM_CHUNKID) 2015 if ( (uint32_t)idChunk <= GMM_CHUNKID_LAST 2016 && idChunk > NIL_GMM_CHUNKID) 2017 { 2018 if (!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk)) 2014 2019 { 2015 AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID); 2016 return pGMM->idChunkPrev = idChunk; 2020 RTSpinlockRelease(pGMM->hSpinLockChunkId); 2021 return idChunk; 2022 } 2023 2024 /* 2025 * Scan sequentially from the last one. 2026 */ 2027 if ((uint32_t)idChunk < GMM_CHUNKID_LAST) 2028 { 2029 idChunk = ASMBitNextClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1, idChunk); 2030 if (idChunk > NIL_GMM_CHUNKID) 2031 { 2032 AssertMsgReturnStmt(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), 2033 RTSpinlockRelease(pGMM->hSpinLockChunkId), NIL_GMM_CHUNKID); 2034 2035 pGMM->idChunkPrev = idChunk; 2036 RTSpinlockRelease(pGMM->hSpinLockChunkId); 2037 return idChunk; 2038 } 2017 2039 } 2018 2040 } … … 2023 2045 */ 2024 2046 idChunk = ASMBitFirstClear(&pGMM->bmChunkId[0], GMM_CHUNKID_LAST + 1); 2025 AssertMsgReturn(idChunk > NIL_GMM_CHUNKID, ("%#x\n", idChunk), NIL_GVM_HANDLE); 2026 AssertMsgReturn(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), NIL_GMM_CHUNKID); 2027 2028 return pGMM->idChunkPrev = idChunk; 2047 AssertMsgReturnStmt(idChunk > NIL_GMM_CHUNKID && idChunk <= GMM_CHUNKID_LAST, ("%#x\n", idChunk), 2048 RTSpinlockRelease(pGMM->hSpinLockChunkId), NIL_GVM_HANDLE); 2049 AssertMsgReturnStmt(!ASMAtomicBitTestAndSet(&pGMM->bmChunkId[0], idChunk), ("%#x\n", idChunk), 2050 RTSpinlockRelease(pGMM->hSpinLockChunkId), NIL_GMM_CHUNKID); 2051 2052 pGMM->idChunkPrev = idChunk; 2053 RTSpinlockRelease(pGMM->hSpinLockChunkId); 2054 return idChunk; 2029 2055 } 2030 2056 … … 2123 2149 * @param pSession Same as @a hGVM. 2124 2150 * @param fChunkFlags The chunk flags, GMM_CHUNK_FLAGS_XXX. 2151 * @param cPages The number of pages requested. Zero for large pages. 2152 * @param paPages The page descriptor table (input + output). NULL for 2153 * large pages. 2154 * @param piPage The pointer to the page descriptor table index variable. 2155 * This will be updated. NULL for large pages. 2125 2156 * @param ppChunk Chunk address (out). 2126 2157 * … … 2132 2163 uint16_t fChunkFlags, uint32_t cPages, PGMMPAGEDESC paPages, uint32_t *piPage, PGMMCHUNK *ppChunk) 2133 2164 { 2165 /* 2166 * Validate input & state. 2167 */ 2134 2168 Assert(pGMM->hMtxOwner != RTThreadNativeSelf()); 2135 2169 Assert(hGVM != NIL_GVM_HANDLE || pGMM->fBoundMemoryMode); … … 2167 2201 2168 2202 /* 2169 * Allocate a chunk .2203 * Allocate a chunk and an ID for it. 2170 2204 */ 2171 2205 int rc; … … 2173 2207 if (pChunk) 2174 2208 { 2175 /* 2176 * Initialize it. 2177 */ 2178 pChunk->hMemObj = hMemObj; 2179 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 2180 pChunk->pbMapping = pbMapping; 2181 #endif 2182 pChunk->hGVM = hGVM; 2183 pChunk->idNumaNode = gmmR0GetCurrentNumaNodeId(); 2184 pChunk->iChunkMtx = UINT8_MAX; 2185 pChunk->fFlags = fChunkFlags; 2186 pChunk->uidOwner = pSession ? SUPR0GetSessionUid(pSession) : NIL_RTUID; 2187 /*pChunk->cShared = 0; */ 2188 2189 uint32_t const iDstPageFirst = piPage ? *piPage : cPages; 2190 if (!(fChunkFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)) 2209 pChunk->Core.Key = gmmR0AllocateChunkId(pGMM); 2210 if ( pChunk->Core.Key != NIL_GMM_CHUNKID 2211 && pChunk->Core.Key <= GMM_CHUNKID_LAST) 2191 2212 { 2192 2213 /* 2193 * Allocate the requested number of pages from the start of the chunk, 2194 * queue the rest (if any) on the free list. 2214 * Initialize it. 2195 2215 */ 2196 uint32_t const cPagesAlloc = RT_MIN(cPages - iDstPageFirst, GMM_CHUNK_NUM_PAGES); 2197 pChunk->cPrivate = cPagesAlloc; 2198 pChunk->cFree = GMM_CHUNK_NUM_PAGES - cPagesAlloc; 2199 pChunk->iFreeHead = GMM_CHUNK_NUM_PAGES > cPagesAlloc ? cPagesAlloc : UINT16_MAX; 2200 2201 /* Alloc pages: */ 2202 uint32_t iPage; 2203 uint32_t iDstPage = iDstPageFirst; 2204 for (iPage = 0; iPage < cPagesAlloc; iPage++, iDstPage++) 2205 { 2206 if (paPages[iDstPage].HCPhysGCPhys <= GMM_GCPHYS_LAST) 2207 pChunk->aPages[iPage].Private.pfn = paPages[iDstPage].HCPhysGCPhys >> PAGE_SHIFT; 2208 else 2209 pChunk->aPages[iPage].Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; /* unshareable / unassigned - same thing. */ 2210 pChunk->aPages[iPage].Private.hGVM = hGVM; 2211 pChunk->aPages[iPage].Private.u2State = GMM_PAGE_STATE_PRIVATE; 2212 2213 paPages[iDstPage].HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(hMemObj, iPage); 2214 paPages[iDstPage].fZeroed = true; 2215 paPages[iDstPage].idPage = iPage; /* The chunk ID will be added as soon as we got one. */ 2216 paPages[iDstPage].idSharedPage = NIL_GMM_PAGEID; 2217 } 2218 *piPage = iDstPage; 2219 2220 /* Build free list: */ 2221 if (iPage < RT_ELEMENTS(pChunk->aPages)) 2222 { 2223 Assert(pChunk->iFreeHead == iPage); 2224 for (; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++) 2225 { 2226 pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE; 2227 pChunk->aPages[iPage].Free.fZeroed = true; 2228 pChunk->aPages[iPage].Free.iNext = iPage + 1; 2229 } 2230 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.u2State = GMM_PAGE_STATE_FREE; 2231 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.fZeroed = true; 2232 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX; 2233 } 2234 else 2235 Assert(pChunk->iFreeHead == UINT16_MAX); 2236 } 2237 else 2238 { 2239 /* 2240 * Large page: Mark all pages as privately allocated (watered down gmmR0AllocatePage). 2241 */ 2242 pChunk->cFree = 0; 2243 pChunk->cPrivate = GMM_CHUNK_NUM_PAGES; 2244 pChunk->iFreeHead = UINT16_MAX; 2245 2246 for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages); iPage++) 2247 { 2248 pChunk->aPages[iPage].Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; 2249 pChunk->aPages[iPage].Private.hGVM = hGVM; 2250 pChunk->aPages[iPage].Private.u2State = GMM_PAGE_STATE_PRIVATE; 2251 } 2252 } 2253 2254 /* 2255 * Zero the memory if it wasn't zeroed by the host already. 2256 * This simplifies keeping secret kernel bits from userland and brings 2257 * everyone to the same level wrt allocation zeroing. 2258 */ 2259 rc = VINF_SUCCESS; 2260 if (!RTR0MemObjWasZeroInitialized(hMemObj)) 2261 { 2262 #ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM 2216 pChunk->hMemObj = hMemObj; 2217 #ifndef VBOX_WITH_LINEAR_HOST_PHYS_MEM 2218 pChunk->pbMapping = pbMapping; 2219 #endif 2220 pChunk->hGVM = hGVM; 2221 pChunk->idNumaNode = gmmR0GetCurrentNumaNodeId(); 2222 pChunk->iChunkMtx = UINT8_MAX; 2223 pChunk->fFlags = fChunkFlags; 2224 pChunk->uidOwner = pSession ? SUPR0GetSessionUid(pSession) : NIL_RTUID; 2225 /*pChunk->cShared = 0; */ 2226 2227 uint32_t const iDstPageFirst = piPage ? *piPage : cPages; 2263 2228 if (!(fChunkFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)) 2264 2229 { 2265 for (uint32_t iPage = 0; iPage < GMM_CHUNK_NUM_PAGES; iPage++) 2230 /* 2231 * Allocate the requested number of pages from the start of the chunk, 2232 * queue the rest (if any) on the free list. 2233 */ 2234 uint32_t const cPagesAlloc = RT_MIN(cPages - iDstPageFirst, GMM_CHUNK_NUM_PAGES); 2235 pChunk->cPrivate = cPagesAlloc; 2236 pChunk->cFree = GMM_CHUNK_NUM_PAGES - cPagesAlloc; 2237 pChunk->iFreeHead = GMM_CHUNK_NUM_PAGES > cPagesAlloc ? cPagesAlloc : UINT16_MAX; 2238 2239 /* Alloc pages: */ 2240 uint32_t const idPageChunk = pChunk->Core.Key << GMM_CHUNKID_SHIFT; 2241 uint32_t iDstPage = iDstPageFirst; 2242 uint32_t iPage; 2243 for (iPage = 0; iPage < cPagesAlloc; iPage++, iDstPage++) 2266 2244 { 2267 void *pvPage = NULL; 2268 rc = SUPR0HCPhysToVirt(RTR0MemObjGetPagePhysAddr(hMemObj, iPage), &pvPage); 2269 AssertRCBreak(rc); 2270 RT_BZERO(pvPage, PAGE_SIZE); 2245 if (paPages[iDstPage].HCPhysGCPhys <= GMM_GCPHYS_LAST) 2246 pChunk->aPages[iPage].Private.pfn = paPages[iDstPage].HCPhysGCPhys >> PAGE_SHIFT; 2247 else 2248 pChunk->aPages[iPage].Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; /* unshareable / unassigned - same thing. */ 2249 pChunk->aPages[iPage].Private.hGVM = hGVM; 2250 pChunk->aPages[iPage].Private.u2State = GMM_PAGE_STATE_PRIVATE; 2251 2252 paPages[iDstPage].HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(hMemObj, iPage); 2253 paPages[iDstPage].fZeroed = true; 2254 paPages[iDstPage].idPage = idPageChunk | iPage; 2255 paPages[iDstPage].idSharedPage = NIL_GMM_PAGEID; 2271 2256 } 2257 *piPage = iDstPage; 2258 2259 /* Build free list: */ 2260 if (iPage < RT_ELEMENTS(pChunk->aPages)) 2261 { 2262 Assert(pChunk->iFreeHead == iPage); 2263 for (; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++) 2264 { 2265 pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE; 2266 pChunk->aPages[iPage].Free.fZeroed = true; 2267 pChunk->aPages[iPage].Free.iNext = iPage + 1; 2268 } 2269 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.u2State = GMM_PAGE_STATE_FREE; 2270 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.fZeroed = true; 2271 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX; 2272 } 2273 else 2274 Assert(pChunk->iFreeHead == UINT16_MAX); 2272 2275 } 2273 2276 else 2274 2277 { 2275 /* Can do the whole large page in one go. */ 2276 void *pvPage = NULL; 2277 rc = SUPR0HCPhysToVirt(RTR0MemObjGetPagePhysAddr(hMemObj, 0), &pvPage); 2278 AssertRC(rc); 2279 if (RT_SUCCESS(rc)) 2280 RT_BZERO(pvPage, GMM_CHUNK_SIZE); 2278 /* 2279 * Large page: Mark all pages as privately allocated (watered down gmmR0AllocatePage). 2280 */ 2281 pChunk->cFree = 0; 2282 pChunk->cPrivate = GMM_CHUNK_NUM_PAGES; 2283 pChunk->iFreeHead = UINT16_MAX; 2284 2285 for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages); iPage++) 2286 { 2287 pChunk->aPages[iPage].Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; 2288 pChunk->aPages[iPage].Private.hGVM = hGVM; 2289 pChunk->aPages[iPage].Private.u2State = GMM_PAGE_STATE_PRIVATE; 2290 } 2281 2291 } 2292 2293 /* 2294 * Zero the memory if it wasn't zeroed by the host already. 2295 * This simplifies keeping secret kernel bits from userland and brings 2296 * everyone to the same level wrt allocation zeroing. 2297 */ 2298 rc = VINF_SUCCESS; 2299 if (!RTR0MemObjWasZeroInitialized(hMemObj)) 2300 { 2301 #ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM 2302 if (!(fChunkFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)) 2303 { 2304 for (uint32_t iPage = 0; iPage < GMM_CHUNK_NUM_PAGES; iPage++) 2305 { 2306 void *pvPage = NULL; 2307 rc = SUPR0HCPhysToVirt(RTR0MemObjGetPagePhysAddr(hMemObj, iPage), &pvPage); 2308 AssertRCBreak(rc); 2309 RT_BZERO(pvPage, PAGE_SIZE); 2310 } 2311 } 2312 else 2313 { 2314 /* Can do the whole large page in one go. */ 2315 void *pvPage = NULL; 2316 rc = SUPR0HCPhysToVirt(RTR0MemObjGetPagePhysAddr(hMemObj, 0), &pvPage); 2317 AssertRC(rc); 2318 if (RT_SUCCESS(rc)) 2319 RT_BZERO(pvPage, GMM_CHUNK_SIZE); 2320 } 2282 2321 #else 2283 RT_BZERO(pbMapping, GMM_CHUNK_SIZE);2322 RT_BZERO(pbMapping, GMM_CHUNK_SIZE); 2284 2323 #endif 2285 } 2286 if (RT_SUCCESS(rc)) 2287 { 2288 *ppChunk = pChunk; 2289 2290 /* 2291 * Allocate a Chunk ID and insert it into the tree. 2292 * This has to be done behind the mutex of course. 2293 */ 2294 rc = gmmR0MutexAcquire(pGMM); 2324 } 2295 2325 if (RT_SUCCESS(rc)) 2296 2326 { 2297 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 2327 *ppChunk = pChunk; 2328 2329 /* 2330 * Allocate a Chunk ID and insert it into the tree. 2331 * This has to be done behind the mutex of course. 2332 */ 2333 rc = gmmR0MutexAcquire(pGMM); 2334 if (RT_SUCCESS(rc)) 2298 2335 { 2299 pChunk->Core.Key = gmmR0AllocateChunkId(pGMM); 2300 if ( pChunk->Core.Key != NIL_GMM_CHUNKID 2301 && pChunk->Core.Key <= GMM_CHUNKID_LAST) 2336 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 2302 2337 { 2303 2338 RTSpinlockAcquire(pGMM->hSpinLockTree); … … 2310 2345 gmmR0LinkChunk(pChunk, pSet); 2311 2346 2312 /* Add the chunk ID to the page descriptors if we allocated anything. */2313 /** @todo Separate out the gmmR0AllocateChunkId() under a different lock2314 * and avoid needing to do this while owning the giant mutex. */2315 if (!(fChunkFlags & GMM_CHUNK_FLAGS_LARGE_PAGE))2316 {2317 uint32_t const idPageChunk = pChunk->Core.Key << GMM_CHUNKID_SHIFT;2318 uint32_t iDstPage = *piPage;2319 while (iDstPage-- > iDstPageFirst)2320 paPages[iDstPage].idPage |= idPageChunk;2321 }2322 2323 2347 LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks)); 2324 2348 GMM_CHECK_SANITY_UPON_LEAVING(pGMM); 2325 2349 return VINF_SUCCESS; 2326 2350 } 2351 2352 /* 2353 * Bail out. 2354 */ 2327 2355 RTSpinlockRelease(pGMM->hSpinLockTree); 2356 rc = VERR_GMM_CHUNK_INSERT; 2328 2357 } 2329 2330 /* 2331 * Bail out. 2332 */ 2333 rc = VERR_GMM_CHUNK_INSERT; 2358 else 2359 rc = VERR_GMM_IS_NOT_SANE; 2360 gmmR0MutexRelease(pGMM); 2334 2361 } 2335 else 2336 rc = VERR_GMM_IS_NOT_SANE; 2337 gmmR0MutexRelease(pGMM); 2362 *ppChunk = NULL; 2338 2363 } 2339 2364 2340 *ppChunk = NULL; 2365 /* Undo any page allocations. */ 2366 if (!(fChunkFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)) 2367 { 2368 uint32_t const cToFree = pChunk->cPrivate; 2369 Assert(*piPage - iDstPageFirst == cToFree); 2370 for (uint32_t iDstPage = iDstPageFirst, iPage = 0; iPage < cToFree; iPage++, iDstPage++) 2371 { 2372 paPages[iDstPageFirst].fZeroed = false; 2373 if (pChunk->aPages[iPage].Private.pfn == GMM_PAGE_PFN_UNSHAREABLE) 2374 paPages[iDstPageFirst].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS; 2375 else 2376 paPages[iDstPageFirst].HCPhysGCPhys = (RTHCPHYS)pChunk->aPages[iPage].Private.pfn << PAGE_SHIFT; 2377 paPages[iDstPageFirst].idPage = NIL_GMM_PAGEID; 2378 paPages[iDstPageFirst].idSharedPage = NIL_GMM_PAGEID; 2379 } 2380 *piPage = iDstPageFirst; 2381 } 2382 2383 gmmR0FreeChunkId(pGMM, pChunk->Core.Key); 2341 2384 } 2342 2343 /* Undo any page allocations. */ 2344 if (!(fChunkFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)) 2345 { 2346 uint32_t const cToFree = pChunk->cPrivate; 2347 Assert(*piPage - iDstPageFirst == cToFree); 2348 for (uint32_t iDstPage = iDstPageFirst, iPage = 0; iPage < cToFree; iPage++, iDstPage++) 2349 { 2350 paPages[iDstPageFirst].fZeroed = false; 2351 if (pChunk->aPages[iPage].Private.pfn == GMM_PAGE_PFN_UNSHAREABLE) 2352 paPages[iDstPageFirst].HCPhysGCPhys = NIL_GMMPAGEDESC_PHYS; 2353 else 2354 paPages[iDstPageFirst].HCPhysGCPhys = (RTHCPHYS)pChunk->aPages[iPage].Private.pfn << PAGE_SHIFT; 2355 paPages[iDstPageFirst].idPage = NIL_GMM_PAGEID; 2356 paPages[iDstPageFirst].idSharedPage = NIL_GMM_PAGEID; 2357 } 2358 *piPage = iDstPageFirst; 2359 } 2360 2385 else 2386 rc = VERR_GMM_CHUNK_INSERT; 2361 2387 RTMemFree(pChunk); 2362 2388 } … … 3523 3549 RTSpinlockRelease(pGMM->hSpinLockTree); 3524 3550 3525 /*3526 * Free the Chunk ID before dropping the locks and freeing the rest.3527 */3528 gmmR0FreeChunkId(pGMM, pChunk->Core.Key);3529 pChunk->Core.Key = NIL_GMM_CHUNKID;3530 3531 3551 pGMM->cFreedChunks++; 3532 3552 3553 /* Drop the lock. */ 3533 3554 gmmR0ChunkMutexRelease(&MtxState, NULL); 3534 3555 if (fRelaxedSem) 3535 3556 gmmR0MutexRelease(pGMM); 3536 3557 3558 /* 3559 * Flush per VM chunk TLBs if we're getting remotely close to a generation wraparound. 3560 */ 3537 3561 if (idFreeGeneration == UINT64_MAX / 4) 3538 3562 gmmR0FreeChunkFlushPerVmTlbs(pGMM); 3563 3564 /* 3565 * Free the Chunk ID and all memory associated with the chunk. 3566 */ 3567 gmmR0FreeChunkId(pGMM, pChunk->Core.Key); 3568 pChunk->Core.Key = NIL_GMM_CHUNKID; 3539 3569 3540 3570 RTMemFree(pChunk->paMappingsX);
Note:
See TracChangeset
for help on using the changeset viewer.