- Timestamp:
- May 24, 2011 6:43:32 PM (14 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r37203 r37206 514 514 PAVLGCPTRNODECORE pGlobalSharedModuleTree; 515 515 516 /** The fast mutex protecting the GMM cleanup.517 * This is serializes VMs cleaning up their memory, so that we can518 * safely leave the primary mutex (hMtx). */519 RTSEMFASTMUTEX hMtxCleanup;520 516 /** The chunk list. For simplifying the cleanup process. */ 521 517 RTLISTNODE ChunkList; … … 697 693 DECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk); 698 694 static uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo); 699 static void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);695 static bool gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem); 700 696 static void gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage); 701 697 static int gmmR0UnmapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); … … 732 728 if (RT_SUCCESS(rc)) 733 729 { 734 rc = RTSemFastMutexCreate(&pGMM->hMtxCleanup); 730 unsigned iMtx; 731 for (iMtx = 0; iMtx < RT_ELEMENTS(pGMM->aChunkMtx); iMtx++) 732 { 733 rc = RTSemFastMutexCreate(&pGMM->aChunkMtx[iMtx].hMtx); 734 if (RT_FAILURE(rc)) 735 break; 736 } 735 737 if (RT_SUCCESS(rc)) 736 738 { 737 unsigned iMtx; 738 for (iMtx = 0; iMtx < RT_ELEMENTS(pGMM->aChunkMtx); iMtx++) 739 { 740 rc = RTSemFastMutexCreate(&pGMM->aChunkMtx[iMtx].hMtx); 741 if (RT_FAILURE(rc)) 742 break; 743 } 739 /* 740 * Check and see if RTR0MemObjAllocPhysNC works. 741 */ 742 #if 0 /* later, see #3170. */ 743 RTR0MEMOBJ MemObj; 744 rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS); 744 745 if (RT_SUCCESS(rc)) 745 746 { 746 /* 747 * Check and see if RTR0MemObjAllocPhysNC works. 748 */ 749 #if 0 /* later, see #3170. */ 750 RTR0MEMOBJ MemObj; 751 rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS); 752 if (RT_SUCCESS(rc)) 753 { 754 rc = RTR0MemObjFree(MemObj, true); 755 AssertRC(rc); 756 } 757 else if (rc == VERR_NOT_SUPPORTED) 758 pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true; 759 else 760 SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc); 747 rc = RTR0MemObjFree(MemObj, true); 748 AssertRC(rc); 749 } 750 else if (rc == VERR_NOT_SUPPORTED) 751 pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true; 752 else 753 SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc); 761 754 #else 762 755 # if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD) 763 756 pGMM->fLegacyAllocationMode = false; 764 757 # if ARCH_BITS == 32 765 766 767 758 /* Don't reuse possibly partial chunks because of the virtual 759 address space limitation. */ 760 pGMM->fBoundMemoryMode = true; 768 761 # else 769 762 pGMM->fBoundMemoryMode = false; 770 763 # endif 771 764 # else 772 773 765 pGMM->fLegacyAllocationMode = true; 766 pGMM->fBoundMemoryMode = true; 774 767 # endif 775 768 #endif 776 769 777 /*778 * Query system page count and guess a reasonable cMaxPages value.779 */780 pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */781 782 g_pGMM = pGMM;783 LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode));784 return VINF_SUCCESS;785 }786 787 770 /* 788 * Bail out.771 * Query system page count and guess a reasonable cMaxPages value. 789 772 */ 790 while (iMtx-- > 0) 791 RTSemFastMutexDestroy(pGMM->aChunkMtx[iMtx].hMtx); 792 } 773 pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */ 774 775 g_pGMM = pGMM; 776 LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode)); 777 return VINF_SUCCESS; 778 } 779 780 /* 781 * Bail out. 782 */ 783 while (iMtx-- > 0) 784 RTSemFastMutexDestroy(pGMM->aChunkMtx[iMtx].hMtx); 793 785 RTSemFastMutexDestroy(pGMM->hMtx); 794 786 } … … 828 820 RTSemFastMutexDestroy(pGMM->hMtx); 829 821 pGMM->hMtx = NIL_RTSEMFASTMUTEX; 830 RTSemFastMutexDestroy(pGMM->hMtxCleanup);831 pGMM->hMtxCleanup = NIL_RTSEMFASTMUTEX;832 822 833 823 /* Free any chunks still hanging around. */ … … 1114 1104 * 1115 1105 * This only works if gmmR0ChunkMutexAcquire was called with 1116 * GMMR0CHUNK_MTX_KEEP_GIANT. Release will NOT retake the giant 1117 * when dropped this way, the behavior will be like if 1118 * GMMR0CHUNK_MTX_DROP_GIANT was used. 1106 * GMMR0CHUNK_MTX_KEEP_GIANT. gmmR0ChunkMutexRelease will retake the giant 1107 * mutex, i.e. behave as if GMMR0CHUNK_MTX_RETAKE_GIANT was used. 1119 1108 * 1120 1109 * @returns VBox status code (assuming success is ok). … … 1125 1114 AssertReturn(pMtxState->fFlags == GMMR0CHUNK_MTX_KEEP_GIANT, VERR_INTERNAL_ERROR_2); 1126 1115 Assert(pMtxState->pGMM->hMtxOwner == RTThreadNativeSelf()); 1127 pMtxState->fFlags = GMMR0CHUNK_MTX_ DROP_GIANT;1116 pMtxState->fFlags = GMMR0CHUNK_MTX_RETAKE_GIANT; 1128 1117 /** @todo GMM life cycle cleanup (we may race someone 1129 1118 * destroying and cleaning up GMM)? */ … … 1151 1140 #endif 1152 1141 1153 int rc = RTSemFastMutexRequest(pGMM->hMtxCleanup); AssertRC(rc);1154 1142 gmmR0MutexAcquire(pGMM); 1155 1143 uint64_t uLockNanoTS = RTTimeSystemNanoTS(); … … 1175 1163 /* 1176 1164 * Walk the entire pool looking for pages that belong to this VM 1177 * and left 1165 * and leftover mappings. (This'll only catch private pages, 1178 1166 * shared pages will be 'left behind'.) 1179 1167 */ … … 1191 1179 if (gmmR0CleanupVMScanChunk(pGMM, pGVM, pChunk)) 1192 1180 { 1193 gmmR0MutexAcquire(pGMM);1181 /* We left the giant mutex, so reset the yield counters. */ 1194 1182 uLockNanoTS = RTTimeSystemNanoTS(); 1183 iCountDown = 64; 1195 1184 } 1196 1185 else 1197 1186 { 1187 /* Didn't leave it, so do normal yielding. */ 1198 1188 if (!iCountDown) 1199 1189 gmmR0MutexYield(pGMM, &uLockNanoTS); … … 1216 1206 do 1217 1207 { 1208 fRedoFromStart = false; 1218 1209 iCountDown = 10240; 1219 1210 pChunk = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1]; … … 1225 1216 || pChunk->hGVM == pGVM->hSelf)) 1226 1217 { 1227 gmmR0FreeChunk(pGMM, pGVM, pChunk); 1228 iCountDown = 1; 1218 uint64_t const idGenerationOld = pGMM->Private.idGeneration; 1219 if (gmmR0FreeChunk(pGMM, pGVM, pChunk, true /*fRelaxedSem*/)) 1220 { 1221 /* We've left the giant mutex, restart? (+1 for our unlink) */ 1222 fRedoFromStart = pGMM->Private.idGeneration != idGenerationOld + 1; 1223 if (fRedoFromStart) 1224 break; 1225 uLockNanoTS = RTTimeSystemNanoTS(); 1226 iCountDown = 10240; 1227 } 1229 1228 } 1229 1230 /* Advance and maybe yield the lock. */ 1230 1231 pChunk = pNext; 1231 1232 1232 if (--iCountDown == 0) 1233 1233 { … … 1281 1281 GMM_CHECK_SANITY_UPON_LEAVING(pGMM); 1282 1282 gmmR0MutexRelease(pGMM); 1283 RTSemFastMutexRelease(pGMM->hMtxCleanup);1284 1283 1285 1284 LogFlow(("GMMR0CleanupVM: returns\n")); … … 1290 1289 * Scan one chunk for private pages belonging to the specified VM. 1291 1290 * 1292 * @note This function is ugly since may drop the ownership of the giant GMM1293 * mutex!1294 * 1295 * @returns @c true if we've dropped the giant mutex, @c false ifwe didn't.1291 * @note This function may drop the gian mutex! 1292 * 1293 * @returns @c true if we've temporarily dropped the giant mutex, @c false if 1294 * we didn't. 1296 1295 * @param pGMM Pointer to the GMM instance. 1297 1296 * @param pGVM The global VM handle. … … 1410 1409 AssertRC(rc); 1411 1410 } 1411 1412 1412 gmmR0ChunkMutexRelease(&MtxState, pChunk); 1413 1413 return true; … … 2819 2819 2820 2820 /* Release the memory immediately. */ 2821 gmmR0FreeChunk(pGMM, NULL, pChunk );2821 gmmR0FreeChunk(pGMM, NULL, pChunk, false /*fRelaxedSem*/); /** @todo this can be relaxed too! */ 2822 2822 2823 2823 /* Update accounting. */ … … 2868 2868 * unmap and free the chunk in one go. 2869 2869 * @param pChunk The chunk to free. 2870 */ 2871 static void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk) 2870 * @param fRelaxedSem Whether we can release the semaphore while doing the 2871 * freeing (@c true) or not. 2872 */ 2873 static bool gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem) 2872 2874 { 2873 2875 Assert(pChunk->Core.Key != NIL_GMM_CHUNKID); … … 2878 2880 /* 2879 2881 * Cleanup hack! Unmap the chunk from the callers address space. 2882 * This shouldn't happen, so screw lock contention... 2880 2883 */ 2881 2884 if ( pChunk->cMappingsX … … 2894 2897 /* The chunk can be mapped by more than one VM if fBoundMemoryMode is false! */ 2895 2898 Log(("gmmR0FreeChunk: chunk still has %d/%d mappings; don't free!\n", pChunk->cMappingsX)); 2896 } 2897 else 2898 { 2899 /* 2900 * Try free the memory object. 2901 */ 2902 /** @todo drop the giant lock here! */ 2903 int rc = RTR0MemObjFree(pChunk->hMemObj, false /* fFreeMappings */); 2904 if (RT_SUCCESS(rc)) 2905 { 2906 pChunk->hMemObj = NIL_RTR0MEMOBJ; 2907 2908 /* 2909 * Unlink it from everywhere. 2910 */ 2911 gmmR0UnlinkChunk(pChunk); 2912 2913 RTListNodeRemove(&pChunk->ListNode); 2914 2915 PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key); 2916 Assert(pCore == &pChunk->Core); NOREF(pCore); 2917 2918 PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(pChunk->Core.Key)]; 2919 if (pTlbe->pChunk == pChunk) 2920 { 2921 pTlbe->idChunk = NIL_GMM_CHUNKID; 2922 pTlbe->pChunk = NULL; 2923 } 2924 2925 Assert(pGMM->cChunks > 0); 2926 pGMM->cChunks--; 2927 2928 /* 2929 * Free the Chunk ID and struct. 2930 */ 2931 gmmR0FreeChunkId(pGMM, pChunk->Core.Key); 2932 pChunk->Core.Key = NIL_GMM_CHUNKID; 2933 2934 RTMemFree(pChunk->paMappingsX); 2935 pChunk->paMappingsX = NULL; 2936 2937 RTMemFree(pChunk); 2938 pChunk = NULL; /* (for gmmR0ChunkMutexRelease) */ 2939 2940 pGMM->cFreedChunks++; 2941 } 2942 else 2943 AssertRC(rc); 2944 } 2945 2946 gmmR0ChunkMutexRelease(&MtxState, pChunk); 2899 gmmR0ChunkMutexRelease(&MtxState, pChunk); 2900 return false; 2901 } 2902 2903 2904 /* 2905 * Save and trash the handle. 2906 */ 2907 RTR0MEMOBJ const hMemObj = pChunk->hMemObj; 2908 pChunk->hMemObj = NIL_RTR0MEMOBJ; 2909 2910 /* 2911 * Unlink it from everywhere. 2912 */ 2913 gmmR0UnlinkChunk(pChunk); 2914 2915 RTListNodeRemove(&pChunk->ListNode); 2916 2917 PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key); 2918 Assert(pCore == &pChunk->Core); NOREF(pCore); 2919 2920 PGMMCHUNKTLBE pTlbe = &pGMM->ChunkTLB.aEntries[GMM_CHUNKTLB_IDX(pChunk->Core.Key)]; 2921 if (pTlbe->pChunk == pChunk) 2922 { 2923 pTlbe->idChunk = NIL_GMM_CHUNKID; 2924 pTlbe->pChunk = NULL; 2925 } 2926 2927 Assert(pGMM->cChunks > 0); 2928 pGMM->cChunks--; 2929 2930 /* 2931 * Free the Chunk ID before dropping the locks and freeing the rest. 2932 */ 2933 gmmR0FreeChunkId(pGMM, pChunk->Core.Key); 2934 pChunk->Core.Key = NIL_GMM_CHUNKID; 2935 2936 pGMM->cFreedChunks++; 2937 2938 gmmR0ChunkMutexRelease(&MtxState, NULL); 2939 if (fRelaxedSem) 2940 gmmR0MutexRelease(pGMM); 2941 2942 RTMemFree(pChunk->paMappingsX); 2943 pChunk->paMappingsX = NULL; 2944 2945 RTMemFree(pChunk); 2946 2947 int rc = RTR0MemObjFree(hMemObj, false /* fFreeMappings */); 2948 AssertLogRelRC(rc); 2949 2950 if (fRelaxedSem) 2951 gmmR0MutexAcquire(pGMM); 2952 return fRelaxedSem; 2947 2953 } 2948 2954 … … 2997 3003 * a bit... 2998 3004 */ 3005 /** @todo Do this on the way out. */ 2999 3006 if (RT_UNLIKELY( pChunk->cFree == GMM_CHUNK_NUM_PAGES 3000 3007 && pChunk->pFreeNext 3001 3008 && pChunk->pFreePrev /** @todo this is probably misfiring, see reset... */ 3002 3009 && !pGMM->fLegacyAllocationMode)) 3003 gmmR0FreeChunk(pGMM, NULL, pChunk );3010 gmmR0FreeChunk(pGMM, NULL, pChunk, false); 3004 3011 3005 3012 } … … 3732 3739 * @param pChunk Pointer to the chunk to be mapped. 3733 3740 * @param fRelaxedSem Whether we can release the semaphore while doing the 3734 * locking (@c true) or not.3741 * mapping (@c true) or not. 3735 3742 * @param ppvR3 Where to store the ring-3 address of the mapping. 3736 3743 * In the VERR_GMM_CHUNK_ALREADY_MAPPED case, this will be
Note:
See TracChangeset
for help on using the changeset viewer.