Changeset 92339 in vbox
- Timestamp:
- Nov 10, 2021 9:21:20 PM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r92326 r92339 655 655 */ 656 656 #if defined(VBOX_STRICT) && defined(GMMR0_WITH_SANITY_CHECK) && 0 657 # define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) ( gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)657 # define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (RT_LIKELY(gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0)) 658 658 #else 659 659 # define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (true) … … 2109 2109 * Registers a new chunk of memory. 2110 2110 * 2111 * This is called by gmmR0AllocateOneChunk. 2111 * This is called by gmmR0AllocateOneChunk and GMMR0AllocateLargePage. 2112 * 2113 * In the GMMR0AllocateLargePage case the GMM_CHUNK_FLAGS_LARGE_PAGE flag is 2114 * set and the chunk will be registered as fully allocated to save time. 2112 2115 * 2113 2116 * @returns VBox status code. On success, the giant GMM lock will be held, the … … 2120 2123 * @param pSession Same as @a hGVM. 2121 2124 * @param fChunkFlags The chunk flags, GMM_CHUNK_FLAGS_XXX. 2122 * @param ppChunk Chunk address (out). Optional.2125 * @param ppChunk Chunk address (out). 2123 2126 * 2124 2127 * @remarks The caller must not own the giant GMM mutex. … … 2164 2167 pChunk->pbMapping = pbMapping; 2165 2168 #endif 2166 pChunk->cFree = GMM_CHUNK_NUM_PAGES;2167 2169 pChunk->hGVM = hGVM; 2168 /*pChunk->iFreeHead = 0;*/2169 2170 pChunk->idNumaNode = gmmR0GetCurrentNumaNodeId(); 2170 2171 pChunk->iChunkMtx = UINT8_MAX; 2171 2172 pChunk->fFlags = fChunkFlags; 2172 2173 pChunk->uidOwner = pSession ? SUPR0GetSessionUid(pSession) : NIL_RTUID; 2173 2174 for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++) 2174 /*pChunk->cShared = 0; */ 2175 2176 if (!(fChunkFlags & GMM_CHUNK_FLAGS_LARGE_PAGE)) 2175 2177 { 2176 pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE; 2177 pChunk->aPages[iPage].Free.fZeroed = true; 2178 pChunk->aPages[iPage].Free.iNext = iPage + 1; 2178 /* Queue all pages on the free list. */ 2179 pChunk->cFree = GMM_CHUNK_NUM_PAGES; 2180 /*pChunk->cPrivate = 0; */ 2181 /*pChunk->iFreeHead = 0;*/ 2182 2183 for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++) 2184 { 2185 pChunk->aPages[iPage].Free.u2State = GMM_PAGE_STATE_FREE; 2186 pChunk->aPages[iPage].Free.fZeroed = true; 2187 pChunk->aPages[iPage].Free.iNext = iPage + 1; 2188 } 2189 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.u2State = GMM_PAGE_STATE_FREE; 2190 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.fZeroed = true; 2191 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX; 2179 2192 } 2180 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.u2State = GMM_PAGE_STATE_FREE; 2181 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.fZeroed = true; 2182 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX; 2193 else 2194 { 2195 /* Mark all pages as privately allocated (watered down gmmR0AllocatePage). */ 2196 pChunk->cFree = 0; 2197 pChunk->cPrivate = GMM_CHUNK_NUM_PAGES; 2198 pChunk->iFreeHead = UINT16_MAX; 2199 2200 for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages); iPage++) 2201 { 2202 pChunk->aPages[iPage].Private.pfn = GMM_PAGE_PFN_UNSHAREABLE; 2203 pChunk->aPages[iPage].Private.hGVM = hGVM; 2204 pChunk->aPages[iPage].Private.u2State = GMM_PAGE_STATE_PRIVATE; 2205 } 2206 } 2183 2207 2184 2208 /* … … 2207 2231 if (RT_SUCCESS(rc)) 2208 2232 { 2233 *ppChunk = pChunk; 2234 2209 2235 /* 2210 2236 * Allocate a Chunk ID and insert it into the tree. … … 2231 2257 LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks)); 2232 2258 2233 if (ppChunk)2234 *ppChunk = pChunk;2235 2259 GMM_CHECK_SANITY_UPON_LEAVING(pGMM); 2236 2260 return VINF_SUCCESS; … … 2239 2263 } 2240 2264 2241 /* bail out */ 2265 /* 2266 * Bail out. 2267 */ 2242 2268 rc = VERR_GMM_CHUNK_INSERT; 2243 2269 } … … 2246 2272 gmmR0MutexRelease(pGMM); 2247 2273 } 2274 2275 *ppChunk = NULL; 2248 2276 } 2249 2250 2277 RTMemFree(pChunk); 2251 2278 } … … 3109 3136 LogFlow(("GMMR0AllocateLargePage: pGVM=%p cbPage=%x\n", pGVM, cbPage)); 3110 3137 3138 AssertPtrReturn(pIdPage, VERR_INVALID_PARAMETER); 3139 *pIdPage = NIL_GMM_PAGEID; 3140 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER); 3141 *pHCPhys = NIL_RTHCPHYS; 3111 3142 AssertReturn(cbPage == GMM_CHUNK_SIZE, VERR_INVALID_PARAMETER); 3112 AssertPtrReturn(pIdPage, VERR_INVALID_PARAMETER); 3113 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER); 3114 3115 /* 3116 * Validate, get basics and take the semaphore. 3143 3144 /* 3145 * Validate GVM + idCpu, get basics and take the semaphore. 3117 3146 */ 3118 3147 PGMM pGMM; 3119 3148 GMM_GET_VALID_INSTANCE(pGMM, VERR_GMM_INSTANCE); 3120 3149 int rc = GVMMR0ValidateGVMandEMT(pGVM, idCpu); 3121 if (RT_FAILURE(rc)) 3122 return rc; 3123 3124 *pHCPhys = NIL_RTHCPHYS; 3125 *pIdPage = NIL_GMM_PAGEID; 3126 3127 gmmR0MutexAcquire(pGMM); 3128 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 3129 { 3130 const unsigned cPages = (GMM_CHUNK_SIZE >> PAGE_SHIFT); 3131 if (RT_UNLIKELY( pGVM->gmm.s.Stats.Allocated.cBasePages + pGVM->gmm.s.Stats.cBalloonedPages + cPages 3132 > pGVM->gmm.s.Stats.Reserved.cBasePages)) 3150 if (RT_SUCCESS(rc)) 3151 rc = gmmR0MutexAcquire(pGMM); 3152 if (RT_SUCCESS(rc)) 3153 { 3154 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 3133 3155 { 3134 Log(("GMMR0AllocateLargePage: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n", 3135 pGVM->gmm.s.Stats.Reserved.cBasePages, pGVM->gmm.s.Stats.Allocated.cBasePages, cPages)); 3136 gmmR0MutexRelease(pGMM); 3137 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 3138 } 3139 3140 /* 3141 * Allocate a new large page chunk. 3142 * 3143 * Note! We leave the giant GMM lock temporarily as the allocation might 3144 * take a long time. gmmR0RegisterChunk will retake it (ugly). 3145 */ 3146 AssertCompile(GMM_CHUNK_SIZE == _2M); 3147 gmmR0MutexRelease(pGMM); 3148 3149 RTR0MEMOBJ hMemObj; 3150 rc = RTR0MemObjAllocLarge(&hMemObj, GMM_CHUNK_SIZE, GMM_CHUNK_SIZE, RTMEMOBJ_ALLOC_LARGE_F_FAST); 3151 if (RT_SUCCESS(rc)) 3152 { 3153 PGMMCHUNKFREESET pSet = pGMM->fBoundMemoryMode ? &pGVM->gmm.s.Private : &pGMM->PrivateX; 3154 PGMMCHUNK pChunk; 3155 rc = gmmR0RegisterChunk(pGMM, pSet, hMemObj, pGVM->hSelf, pGVM->pSession, GMM_CHUNK_FLAGS_LARGE_PAGE, &pChunk); 3156 if (RT_SUCCESS(rc)) 3156 /* 3157 * Check the quota. 3158 */ 3159 /** @todo r=bird: Quota checking could be done w/o the giant mutex but using 3160 * a VM specific mutex... */ 3161 if (RT_LIKELY( pGVM->gmm.s.Stats.Allocated.cBasePages + pGVM->gmm.s.Stats.cBalloonedPages + GMM_CHUNK_NUM_PAGES 3162 <= pGVM->gmm.s.Stats.Reserved.cBasePages)) 3157 3163 { 3158 3164 /* 3159 * Allocate all the pages in the chunk. 3165 * Allocate a new large page chunk. 3166 * 3167 * Note! We leave the giant GMM lock temporarily as the allocation might 3168 * take a long time. gmmR0RegisterChunk will retake it (ugly). 3160 3169 */ 3161 /* Unlink the new chunk from the free list. */ 3162 gmmR0UnlinkChunk(pChunk); 3163 3164 /** @todo rewrite this to skip the looping. */ 3165 /* Allocate all pages. */ 3166 GMMPAGEDESC PageDesc; 3167 gmmR0AllocatePage(pChunk, pGVM->hSelf, &PageDesc); 3168 3169 /* Return the first page as we'll use the whole chunk as one big page. */ 3170 *pIdPage = PageDesc.idPage; 3171 *pHCPhys = PageDesc.HCPhysGCPhys; 3172 3173 for (unsigned i = 1; i < cPages; i++) 3174 gmmR0AllocatePage(pChunk, pGVM->hSelf, &PageDesc); 3175 3176 /* Update accounting. */ 3177 pGVM->gmm.s.Stats.Allocated.cBasePages += cPages; 3178 pGVM->gmm.s.Stats.cPrivatePages += cPages; 3179 pGMM->cAllocatedPages += cPages; 3180 3181 gmmR0LinkChunk(pChunk, pSet); 3170 AssertCompile(GMM_CHUNK_SIZE == _2M); 3182 3171 gmmR0MutexRelease(pGMM); 3183 3172 3184 LogFlow(("GMMR0AllocateLargePage: returns VINF_SUCCESS\n")); 3185 return VINF_SUCCESS; 3173 RTR0MEMOBJ hMemObj; 3174 rc = RTR0MemObjAllocLarge(&hMemObj, GMM_CHUNK_SIZE, GMM_CHUNK_SIZE, RTMEMOBJ_ALLOC_LARGE_F_FAST); 3175 if (RT_SUCCESS(rc)) 3176 { 3177 *pHCPhys = RTR0MemObjGetPagePhysAddr(hMemObj, 0); 3178 3179 /* 3180 * Register the chunk as fully allocated. 3181 * Note! As mentioned above, this will return owning the mutex on success. 3182 */ 3183 PGMMCHUNK pChunk = NULL; 3184 PGMMCHUNKFREESET const pSet = pGMM->fBoundMemoryMode ? &pGVM->gmm.s.Private : &pGMM->PrivateX; 3185 rc = gmmR0RegisterChunk(pGMM, pSet, hMemObj, pGVM->hSelf, pGVM->pSession, GMM_CHUNK_FLAGS_LARGE_PAGE, &pChunk); 3186 if (RT_SUCCESS(rc)) 3187 { 3188 /* 3189 * The gmmR0RegisterChunk call already marked all pages allocated, 3190 * so we just have to fill in the return values and update stats now. 3191 */ 3192 *pIdPage = pChunk->Core.Key << GMM_CHUNKID_SHIFT; 3193 3194 /* Update accounting. */ 3195 pGVM->gmm.s.Stats.Allocated.cBasePages += GMM_CHUNK_NUM_PAGES; 3196 pGVM->gmm.s.Stats.cPrivatePages += GMM_CHUNK_NUM_PAGES; 3197 pGMM->cAllocatedPages += GMM_CHUNK_NUM_PAGES; 3198 3199 gmmR0LinkChunk(pChunk, pSet); 3200 gmmR0MutexRelease(pGMM); 3201 3202 LogFlow(("GMMR0AllocateLargePage: returns VINF_SUCCESS\n")); 3203 return VINF_SUCCESS; 3204 } 3205 3206 /* 3207 * Bail out. 3208 */ 3209 RTR0MemObjFree(hMemObj, true /* fFreeMappings */); 3210 *pHCPhys = NIL_RTHCPHYS; 3211 } 3186 3212 } 3187 RTR0MemObjFree(hMemObj, true /* fFreeMappings */); 3213 else 3214 { 3215 Log(("GMMR0AllocateLargePage: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n", 3216 pGVM->gmm.s.Stats.Reserved.cBasePages, pGVM->gmm.s.Stats.Allocated.cBasePages, GMM_CHUNK_NUM_PAGES)); 3217 gmmR0MutexRelease(pGMM); 3218 rc = VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 3219 } 3188 3220 } 3189 }3190 else3191 {3192 gmmR0MutexRelease(pGMM);3193 rc = VERR_GMM_IS_NOT_SANE;3221 else 3222 { 3223 gmmR0MutexRelease(pGMM); 3224 rc = VERR_GMM_IS_NOT_SANE; 3225 } 3194 3226 } 3195 3227
Note:
See TracChangeset
for help on using the changeset viewer.