Changeset 36984 in vbox for trunk/src/VBox
- Timestamp:
- May 6, 2011 1:50:35 PM (14 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r36970 r36984 162 162 #include <iprt/asm.h> 163 163 #include <iprt/avl.h> 164 #include <iprt/list.h> 164 165 #include <iprt/mem.h> 165 166 #include <iprt/memobj.h> 166 167 #include <iprt/semaphore.h> 167 168 #include <iprt/string.h> 169 #include <iprt/time.h> 168 170 169 171 … … 397 399 * chunks with no free pages. */ 398 400 PGMMCHUNKFREESET pSet; 401 /** List node in the chunk list (GMM::ChunkList). */ 402 RTLISTNODE ListNode; 399 403 /** Pointer to an array of mappings. */ 400 404 PGMMCHUNKMAP paMappings; … … 465 469 /** The number of free pages in the set. */ 466 470 uint64_t cFreePages; 471 /** The generation ID for the set. This is incremented whenever 472 * something is linked or unlinked from this set. */ 473 uint64_t idGeneration; 467 474 /** Chunks ordered by increasing number of free pages. */ 468 475 PGMMCHUNK apLists[GMM_CHUNK_FREE_SET_LISTS]; … … 477 484 /** Magic / eye catcher. GMM_MAGIC */ 478 485 uint32_t u32Magic; 486 /** The number of threads waiting on the mutex. */ 487 uint32_t cMtxContenders; 479 488 /** The fast mutex protecting the GMM. 480 489 * More fine grained locking can be implemented later if necessary. */ 481 RTSEMFASTMUTEX Mtx; 490 RTSEMFASTMUTEX hMtx; 491 #ifdef VBOX_STRICT 492 /** The current mutex owner. */ 493 RTNATIVETHREAD hMtxOwner; 494 #endif 482 495 /** The chunk tree. */ 483 496 PAVLU32NODECORE pChunks; … … 492 505 /** @todo separate trees for distinctly different guest OSes. */ 493 506 PAVLGCPTRNODECORE pGlobalSharedModuleTree; 507 508 /** The fast mutex protecting the GMM cleanup. 509 * This is serializes VMs cleaning up their memory, so that we can 510 * safely leave the primary mutex (hMtx). */ 511 RTSEMFASTMUTEX hMtxCleanup; 512 /** The chunk list. For simplifying the cleanup process. */ 513 RTLISTNODE ChunkList; 494 514 495 515 /** The maximum number of pages we're allowed to allocate. … … 528 548 uint16_t cRegisteredVMs; 529 549 550 /** The number of freed chunks ever. This is used a list generation to 551 * avoid restarting the cleanup scanning when the list wasn't modified. */ 552 uint32_t cFreedChunks; 530 553 /** The previous allocated Chunk ID. 531 554 * Used as a hint to avoid scanning the whole bitmap. */ … … 540 563 541 564 /** The value of GMM::u32Magic (Katsuhiro Otomo). */ 542 #define GMM_MAGIC 0x19540414565 #define GMM_MAGIC UINT32_C(0x19540414) 543 566 544 567 … … 623 646 *******************************************************************************/ 624 647 static DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM); 625 static DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGMM); 626 static DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM); 648 static bool gmmR0CleanupVMScanChunk(PGVM pGVM, PGMMCHUNK pChunk); 627 649 /*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM); 628 650 DECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet); … … 632 654 static void gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage); 633 655 static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); 656 static void gmmR0SharedModuleCleanup(PGMM pGMM, PGVM pGVM); 634 657 635 658 … … 648 671 649 672 /* 650 * Allocate the instance data and the lock (s).673 * Allocate the instance data and the locks. 651 674 */ 652 675 PGMM pGMM = (PGMM)RTMemAllocZ(sizeof(*pGMM)); 653 676 if (!pGMM) 654 677 return VERR_NO_MEMORY; 678 655 679 pGMM->u32Magic = GMM_MAGIC; 656 680 for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++) 657 681 pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID; 682 RTListInit(&pGMM->ChunkList); 658 683 ASMBitSet(&pGMM->bmChunkId[0], NIL_GMM_CHUNKID); 659 684 660 int rc = RTSemFastMutexCreate(&pGMM-> Mtx);685 int rc = RTSemFastMutexCreate(&pGMM->hMtx); 661 686 if (RT_SUCCESS(rc)) 662 687 { 663 /* 664 * Check and see if RTR0MemObjAllocPhysNC works. 665 */ 688 rc = RTSemFastMutexCreate(&pGMM->hMtxCleanup); 689 if (RT_SUCCESS(rc)) 690 { 691 /* 692 * Check and see if RTR0MemObjAllocPhysNC works. 693 */ 666 694 #if 0 /* later, see #3170. */ 667 RTR0MEMOBJ MemObj;668 rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS);669 if (RT_SUCCESS(rc))670 {671 rc = RTR0MemObjFree(MemObj, true);672 AssertRC(rc);673 }674 else if (rc == VERR_NOT_SUPPORTED)675 pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true;676 else677 SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc);695 RTR0MEMOBJ MemObj; 696 rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS); 697 if (RT_SUCCESS(rc)) 698 { 699 rc = RTR0MemObjFree(MemObj, true); 700 AssertRC(rc); 701 } 702 else if (rc == VERR_NOT_SUPPORTED) 703 pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true; 704 else 705 SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc); 678 706 #else 679 707 # if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD) 680 pGMM->fLegacyAllocationMode = false;708 pGMM->fLegacyAllocationMode = false; 681 709 # if ARCH_BITS == 32 682 /* Don't reuse possibly partial chunks because of the virtual address space limitation. */683 pGMM->fBoundMemoryMode = true;710 /* Don't reuse possibly partial chunks because of the virtual address space limitation. */ 711 pGMM->fBoundMemoryMode = true; 684 712 # else 685 pGMM->fBoundMemoryMode = false;713 pGMM->fBoundMemoryMode = false; 686 714 # endif 687 715 # else 688 pGMM->fLegacyAllocationMode = true;689 pGMM->fBoundMemoryMode = true;716 pGMM->fLegacyAllocationMode = true; 717 pGMM->fBoundMemoryMode = true; 690 718 # endif 691 719 #endif 692 720 693 /* 694 * Query system page count and guess a reasonable cMaxPages value. 695 */ 696 pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */ 697 698 g_pGMM = pGMM; 699 LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode)); 700 return VINF_SUCCESS; 701 } 702 721 /* 722 * Query system page count and guess a reasonable cMaxPages value. 723 */ 724 pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */ 725 726 g_pGMM = pGMM; 727 LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode)); 728 return VINF_SUCCESS; 729 } 730 731 RTSemFastMutexDestroy(pGMM->hMtx); 732 } 733 734 pGMM->u32Magic = 0; 703 735 RTMemFree(pGMM); 704 736 SUPR0Printf("GMMR0Init: failed! rc=%d\n", rc); … … 731 763 /* Destroy the fundamentals. */ 732 764 g_pGMM = NULL; 733 pGMM->u32Magic++; 734 RTSemFastMutexDestroy(pGMM->Mtx); 735 pGMM->Mtx = NIL_RTSEMFASTMUTEX; 765 pGMM->u32Magic = ~GMM_MAGIC; 766 RTSemFastMutexDestroy(pGMM->hMtx); 767 pGMM->hMtx = NIL_RTSEMFASTMUTEX; 768 RTSemFastMutexDestroy(pGMM->hMtxCleanup); 769 pGMM->hMtxCleanup = NIL_RTSEMFASTMUTEX; 736 770 737 771 /* free any chunks still hanging around. */ … … 798 832 799 833 /** 834 * Acquires the GMM giant lock. 835 * 836 * @returns Assert status code from RTSemFastMutexRequest. 837 * @param pGMM Pointer to the GMM instance. 838 */ 839 static int gmmR0MutexAcquire(PGMM pGMM) 840 { 841 ASMAtomicIncU32(&pGMM->cMtxContenders); 842 int rc = RTSemFastMutexRequest(pGMM->hMtx); 843 ASMAtomicDecU32(&pGMM->cMtxContenders); 844 AssertRC(rc); 845 #ifdef VBOX_STRICT 846 pGMM->hMtxOwner = RTThreadNativeSelf(); 847 #endif 848 return rc; 849 } 850 851 852 /** 853 * Releases the GMM giant lock. 854 * 855 * @returns Assert status code from RTSemFastMutexRequest. 856 * @param pGMM Pointer to the GMM instance. 857 */ 858 static int gmmR0MutexRelease(PGMM pGMM) 859 { 860 #ifdef VBOX_STRICT 861 pGMM->hMtxOwner = NIL_RTNATIVETHREAD; 862 #endif 863 int rc = RTSemFastMutexRelease(pGMM->hMtx); 864 AssertRC(rc); 865 return rc; 866 } 867 868 869 /** 870 * Yields the GMM giant lock if there is contention and a certain minimum time 871 * has elapsed since we took it. 872 * 873 * @returns @c true if the mutex was yielded, @c false if not. 874 * @param pGMM Pointer to the GMM instance. 875 * @param puLockNanoTS Where the lock acquisition time stamp is kept 876 * (in/out). 877 */ 878 static bool gmmR0MutexYield(PGMM pGMM, uint64_t *puLockNanoTS) 879 { 880 /* 881 * If nobody is contending the mutex, don't bother checking the time. 882 */ 883 if (ASMAtomicReadU32(&pGMM->cMtxContenders) == 0) 884 return false; 885 886 /* 887 * Don't yield if we haven't executed for at least 2 milliseconds. 888 */ 889 uint64_t uNanoNow = RTTimeSystemNanoTS(); 890 if (uNanoNow - *puLockNanoTS < UINT32_C(2000000)) 891 return false; 892 893 /* 894 * Yield the mutex. 895 */ 896 #ifdef VBOX_STRICT 897 pGMM->hMtxOwner = NIL_RTNATIVETHREAD; 898 #endif 899 ASMAtomicIncU32(&pGMM->cMtxContenders); 900 int rc1 = RTSemFastMutexRelease(pGMM->hMtx); AssertRC(rc1); 901 902 RTThreadYield(); 903 904 int rc2 = RTSemFastMutexRequest(pGMM->hMtx); AssertRC(rc2); 905 *puLockNanoTS = RTTimeSystemNanoTS(); 906 ASMAtomicDecU32(&pGMM->cMtxContenders); 907 #ifdef VBOX_STRICT 908 pGMM->hMtxOwner = RTThreadNativeSelf(); 909 #endif 910 911 return true; 912 } 913 914 915 /** 800 916 * Cleans up when a VM is terminating. 801 917 * … … 809 925 GMM_GET_VALID_INSTANCE_VOID(pGMM); 810 926 811 int rc = RTSemFastMutexRequest(pGMM->Mtx); 812 AssertRC(rc); 927 #ifdef VBOX_WITH_PAGE_SHARING 928 /* 929 * Clean up all registered shared modules first. 930 */ 931 gmmR0SharedModuleCleanup(pGMM, pGVM); 932 #endif 933 934 int rc = RTSemFastMutexRequest(pGMM->hMtxCleanup); AssertRC(rc); 935 gmmR0MutexAcquire(pGMM); 936 uint64_t uLockNanoTS = RTTimeSystemNanoTS(); 813 937 GMM_CHECK_SANITY_UPON_ENTERING(pGMM); 814 815 #ifdef VBOX_WITH_PAGE_SHARING816 /* Clean up all registered shared modules. */817 RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM);818 #endif819 938 820 939 /* … … 865 984 /* 866 985 * Walk the entire pool looking for pages that belong to this VM 867 * and left over mappings. (This'll only catch private pages, shared868 * pages will be 'left behind'.)986 * and left over mappings. (This'll only catch private pages, 987 * shared pages will be 'left behind'.) 869 988 */ 870 /** @todo this might be kind of expensive with a lot of VMs and 871 * memory hanging around... */ 872 uint64_t cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */ 873 RTAvlU32DoWithAll(&pGMM->pChunks, true /* fFromLeft */, gmmR0CleanupVMScanChunk, pGVM); 989 uint64_t cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */ 990 991 unsigned iCountDown = 64; 992 bool fRedoFromStart; 993 PGMMCHUNK pChunk; 994 do 995 { 996 fRedoFromStart = false; 997 RTListForEachReverse(&pGMM->ChunkList, pChunk, GMMCHUNK, ListNode) 998 { 999 if ( !gmmR0CleanupVMScanChunk(pGVM, pChunk) 1000 || iCountDown != 0) 1001 iCountDown--; 1002 else 1003 { 1004 iCountDown = 64; 1005 uint32_t const cFreeChunksOld = pGMM->cFreedChunks; 1006 fRedoFromStart = gmmR0MutexYield(pGMM, &uLockNanoTS) 1007 && pGMM->cFreedChunks != cFreeChunksOld; 1008 if (fRedoFromStart) 1009 break; 1010 } 1011 } 1012 } while (fRedoFromStart); 1013 874 1014 if (pGVM->gmm.s.cPrivatePages) 875 1015 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages); 1016 876 1017 pGMM->cAllocatedPages -= cPrivatePages; 877 1018 878 /* free empty chunks. */ 879 if (cPrivatePages) 1019 /* 1020 * Free empty chunks. 1021 */ 1022 do 880 1023 { 881 PGMMCHUNK pCur = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1]; 882 while (pCur) 1024 iCountDown = 10240; 1025 pChunk = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1]; 1026 while (pChunk) 883 1027 { 884 PGMMCHUNK pNext = pC ur->pFreeNext;885 if ( pC ur->cFree == GMM_CHUNK_NUM_PAGES1028 PGMMCHUNK pNext = pChunk->pFreeNext; 1029 if ( pChunk->cFree == GMM_CHUNK_NUM_PAGES 886 1030 && ( !pGMM->fBoundMemoryMode 887 || pCur->hGVM == pGVM->hSelf)) 888 gmmR0FreeChunk(pGMM, pGVM, pCur); 889 pCur = pNext; 1031 || pChunk->hGVM == pGVM->hSelf)) 1032 { 1033 gmmR0FreeChunk(pGMM, pGVM, pChunk); 1034 iCountDown = 1; 1035 } 1036 pChunk = pNext; 1037 1038 if (--iCountDown == 0) 1039 { 1040 uint64_t const idGenerationOld = pGMM->Private.idGeneration; 1041 fRedoFromStart = gmmR0MutexYield(pGMM, &uLockNanoTS) 1042 && pGMM->Private.idGeneration != idGenerationOld; 1043 if (fRedoFromStart) 1044 break; 1045 iCountDown = 10240; 1046 } 890 1047 } 891 } 892 893 /* account for shared pages that weren't freed. */ 1048 } while (fRedoFromStart); 1049 1050 /* 1051 * Account for shared pages that weren't freed. 1052 */ 894 1053 if (pGVM->gmm.s.cSharedPages) 895 1054 { … … 899 1058 } 900 1059 901 /* Clean up balloon statistics in case the VM process crashed. */ 1060 /* 1061 * Clean up balloon statistics in case the VM process crashed. 1062 */ 902 1063 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages); 903 1064 pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages; … … 926 1087 927 1088 GMM_CHECK_SANITY_UPON_LEAVING(pGMM); 928 RTSemFastMutexRelease(pGMM->Mtx); 1089 gmmR0MutexRelease(pGMM); 1090 RTSemFastMutexRelease(pGMM->hMtxCleanup); 929 1091 930 1092 LogFlow(("GMMR0CleanupVM: returns\n")); … … 933 1095 934 1096 /** 935 * RTAvlU32DoWithAll callback. 936 * 937 * @returns 0 938 * @param pNode The node to search. 939 * @param pvGVM Pointer to the shared VM structure. 940 */ 941 static DECLCALLBACK(int) gmmR0CleanupVMScanChunk(PAVLU32NODECORE pNode, void *pvGVM) 942 { 943 PGMMCHUNK pChunk = (PGMMCHUNK)pNode; 944 PGVM pGVM = (PGVM)pvGVM; 945 1097 * Scan one chunk for private pages belonging to the specified VM. 1098 * 1099 * @returns @c true if a mapping was found (and freed), @c false if not. 1100 * @param pGVM The global VM handle. 1101 * @param pChunk The chunk to scan. 1102 */ 1103 static bool gmmR0CleanupVMScanChunk(PGVM pGVM, PGMMCHUNK pChunk) 1104 { 946 1105 /* 947 1106 * Look for pages belonging to the VM. … … 1007 1166 1008 1167 /* 1009 * Look for the mapping belonging to the terminating VM. 1010 */ 1011 for (unsigned i = 0; i < pChunk->cMappings; i++) 1168 * Look for a mapping belonging to the terminating VM. 1169 */ 1170 unsigned cMappings = pChunk->cMappings; 1171 bool fMappingFreed = true; 1172 for (unsigned i = 0; i < cMappings; i++) 1012 1173 if (pChunk->paMappings[i].pGVM == pGVM) 1013 1174 { 1014 1175 RTR0MEMOBJ MemObj = pChunk->paMappings[i].MapObj; 1015 1176 1016 pChunk->cMappings--; 1017 if (i < pChunk->cMappings) 1018 pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings]; 1019 pChunk->paMappings[pChunk->cMappings].pGVM = NULL; 1020 pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ; 1177 cMappings--; 1178 if (i < cMappings) 1179 pChunk->paMappings[i] = pChunk->paMappings[cMappings]; 1180 pChunk->paMappings[cMappings].pGVM = NULL; 1181 pChunk->paMappings[cMappings].MapObj = NIL_RTR0MEMOBJ; 1182 Assert(pChunk->cMappings - 1U == cMappings); 1183 pChunk->cMappings = cMappings; 1021 1184 1022 1185 int rc = RTR0MemObjFree(MemObj, false /* fFreeMappings (NA) */); … … 1027 1190 AssertRC(rc); 1028 1191 } 1192 fMappingFreed = true; 1029 1193 break; 1030 1194 } … … 1050 1214 } 1051 1215 1052 return 0;1216 return fMappingFreed; 1053 1217 } 1054 1218 … … 1146 1310 AssertReturn(enmPriority > GMMPRIORITY_INVALID && enmPriority < GMMPRIORITY_END, VERR_INVALID_PARAMETER); 1147 1311 1148 rc = RTSemFastMutexRequest(pGMM->Mtx); 1149 AssertRC(rc); 1312 gmmR0MutexAcquire(pGMM); 1150 1313 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 1151 1314 { … … 1180 1343 else 1181 1344 rc = VERR_INTERNAL_ERROR_5; 1182 RTSemFastMutexRelease(pGMM->Mtx);1345 gmmR0MutexRelease(pGMM); 1183 1346 LogFlow(("GMMR0InitialReservation: returns %Rrc\n", rc)); 1184 1347 return rc; … … 1242 1405 AssertReturn(cFixedPages, VERR_INVALID_PARAMETER); 1243 1406 1244 rc = RTSemFastMutexRequest(pGMM->Mtx); 1245 AssertRC(rc); 1407 gmmR0MutexAcquire(pGMM); 1246 1408 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 1247 1409 { … … 1275 1437 else 1276 1438 rc = VERR_INTERNAL_ERROR_5; 1277 RTSemFastMutexRelease(pGMM->Mtx);1439 gmmR0MutexRelease(pGMM); 1278 1440 LogFlow(("GMMR0UpdateReservation: returns %Rrc\n", rc)); 1279 1441 return rc; … … 1449 1611 { 1450 1612 pSet->cFreePages -= pChunk->cFree; 1613 pSet->idGeneration++; 1451 1614 1452 1615 PGMMCHUNK pPrev = pChunk->pFreePrev; … … 1497 1660 1498 1661 pSet->cFreePages += pChunk->cFree; 1662 pSet->idGeneration++; 1499 1663 } 1500 1664 } … … 1614 1778 { 1615 1779 pGMM->cChunks++; 1780 RTListAppend(&pGMM->ChunkList, &pChunk->ListNode); 1616 1781 gmmR0LinkChunk(pChunk, pSet); 1617 1782 LogFlow(("gmmR0RegisterChunk: pChunk=%p id=%#x cChunks=%d\n", pChunk, pChunk->Core.Key, pGMM->cChunks)); … … 1662 1827 1663 1828 /* Leave the lock temporarily as the allocation might take long. */ 1664 RTSemFastMutexRelease(pGMM->Mtx);1829 gmmR0MutexRelease(pGMM); 1665 1830 if (enmChunkType == GMMCHUNKTYPE_NON_CONTINUOUS) 1666 1831 rc = RTR0MemObjAllocPhysNC(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS); … … 1668 1833 rc = RTR0MemObjAllocPhysEx(&MemObj, GMM_CHUNK_SIZE, NIL_RTHCPHYS, GMM_CHUNK_SIZE); 1669 1834 1670 /* Grab the lock again. */ 1671 int rc2 = RTSemFastMutexRequest(pGMM->Mtx); 1835 int rc2 = gmmR0MutexAcquire(pGMM); 1672 1836 AssertRCReturn(rc2, rc2); 1673 1837 … … 2079 2243 } 2080 2244 2081 rc = RTSemFastMutexRequest(pGMM->Mtx); 2082 AssertRC(rc); 2245 gmmR0MutexAcquire(pGMM); 2083 2246 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 2084 2247 { … … 2198 2361 else 2199 2362 rc = VERR_INTERNAL_ERROR_5; 2200 RTSemFastMutexRelease(pGMM->Mtx);2363 gmmR0MutexRelease(pGMM); 2201 2364 LogFlow(("GMMR0AllocateHandyPages: returns %Rrc\n", rc)); 2202 2365 return rc; … … 2258 2421 } 2259 2422 2260 rc = RTSemFastMutexRequest(pGMM->Mtx); 2261 AssertRC(rc); 2423 gmmR0MutexAcquire(pGMM); 2262 2424 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 2263 2425 { … … 2287 2449 else 2288 2450 rc = VERR_INTERNAL_ERROR_5; 2289 RTSemFastMutexRelease(pGMM->Mtx);2451 gmmR0MutexRelease(pGMM); 2290 2452 LogFlow(("GMMR0AllocatePages: returns %Rrc\n", rc)); 2291 2453 return rc; … … 2317 2479 return GMMR0AllocatePages(pVM, idCpu, pReq->cPages, &pReq->aPages[0], pReq->enmAccount); 2318 2480 } 2481 2319 2482 2320 2483 /** … … 2360 2523 *pIdPage = NIL_GMM_PAGEID; 2361 2524 2362 rc = RTSemFastMutexRequest(pGMM->Mtx); 2363 AssertRCReturn(rc, rc); 2525 gmmR0MutexAcquire(pGMM); 2364 2526 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 2365 2527 { … … 2372 2534 Log(("GMMR0AllocateLargePage: Reserved=%#llx Allocated+Requested=%#llx+%#x!\n", 2373 2535 pGVM->gmm.s.Reserved.cBasePages, pGVM->gmm.s.Allocated.cBasePages, cPages)); 2374 RTSemFastMutexRelease(pGMM->Mtx);2536 gmmR0MutexRelease(pGMM); 2375 2537 return VERR_GMM_HIT_VM_ACCOUNT_LIMIT; 2376 2538 } … … 2380 2542 if (RT_FAILURE(rc)) 2381 2543 { 2382 RTSemFastMutexRelease(pGMM->Mtx);2544 gmmR0MutexRelease(pGMM); 2383 2545 return rc; 2384 2546 } … … 2406 2568 rc = VERR_INTERNAL_ERROR_5; 2407 2569 2408 RTSemFastMutexRelease(pGMM->Mtx);2570 gmmR0MutexRelease(pGMM); 2409 2571 LogFlow(("GMMR0AllocateLargePage: returns %Rrc\n", rc)); 2410 2572 return rc; … … 2438 2600 return VERR_NOT_SUPPORTED; 2439 2601 2440 rc = RTSemFastMutexRequest(pGMM->Mtx); 2441 AssertRC(rc); 2602 gmmR0MutexAcquire(pGMM); 2442 2603 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 2443 2604 { … … 2447 2608 { 2448 2609 Log(("GMMR0FreeLargePage: allocated=%#llx cPages=%#x!\n", pGVM->gmm.s.Allocated.cBasePages, cPages)); 2449 RTSemFastMutexRelease(pGMM->Mtx);2610 gmmR0MutexRelease(pGMM); 2450 2611 return VERR_GMM_ATTEMPT_TO_FREE_TOO_MUCH; 2451 2612 } 2452 2613 2453 2614 PGMMPAGE pPage = gmmR0GetPage(pGMM, idPage); 2454 if ( RT_LIKELY(pPage)2455 && RT_LIKELY(GMM_PAGE_IS_PRIVATE(pPage)))2456 { 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2615 if (RT_LIKELY( pPage 2616 && GMM_PAGE_IS_PRIVATE(pPage))) 2617 { 2618 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT); 2619 Assert(pChunk); 2620 Assert(pChunk->cFree < GMM_CHUNK_NUM_PAGES); 2621 Assert(pChunk->cPrivate > 0); 2622 2623 /* Release the memory immediately. */ 2624 gmmR0FreeChunk(pGMM, NULL, pChunk); 2625 2626 /* Update accounting. */ 2627 pGVM->gmm.s.Allocated.cBasePages -= cPages; 2628 pGVM->gmm.s.cPrivatePages -= cPages; 2629 pGMM->cAllocatedPages -= cPages; 2469 2630 } 2470 2631 else … … 2474 2635 rc = VERR_INTERNAL_ERROR_5; 2475 2636 2476 RTSemFastMutexRelease(pGMM->Mtx);2637 gmmR0MutexRelease(pGMM); 2477 2638 LogFlow(("GMMR0FreeLargePage: returns %Rrc\n", rc)); 2478 2639 return rc; … … 2502 2663 } 2503 2664 2665 2504 2666 /** 2505 2667 * Frees a chunk, giving it back to the host OS. … … 2546 2708 */ 2547 2709 gmmR0UnlinkChunk(pChunk); 2710 2711 RTListNodeRemove(&pChunk->ListNode); 2548 2712 2549 2713 PAVLU32NODECORE pCore = RTAvlU32Remove(&pGMM->pChunks, pChunk->Core.Key); … … 2570 2734 2571 2735 RTMemFree(pChunk); 2736 2737 pGMM->cFreedChunks++; 2572 2738 } 2573 2739 else … … 2658 2824 2659 2825 #ifdef VBOX_WITH_PAGE_SHARING 2826 2660 2827 /** 2661 2828 * Converts a private page to a shared page, the page is known to exist and be valid and such. … … 2688 2855 } 2689 2856 2857 2690 2858 /** 2691 2859 * Increase the use count of a shared page, the page is known to exist and be valid and such. … … 2706 2874 pGVM->gmm.s.Allocated.cBasePages++; 2707 2875 } 2708 #endif 2876 2877 #endif /* VBOX_WITH_PAGE_SHARING */ 2709 2878 2710 2879 /** … … 2727 2896 gmmR0FreePageWorker(pGMM, pChunk, idPage, pPage); 2728 2897 } 2898 2729 2899 2730 2900 /** … … 2808 2978 Assert(pPage->Shared.cRefs); 2809 2979 if (!--pPage->Shared.cRefs) 2810 {2811 2980 gmmR0FreeSharedPage(pGMM, idPage, pPage); 2812 }2813 2981 else 2814 2982 { … … 2894 3062 * Take the semaphore and call the worker function. 2895 3063 */ 2896 rc = RTSemFastMutexRequest(pGMM->Mtx); 2897 AssertRC(rc); 3064 gmmR0MutexAcquire(pGMM); 2898 3065 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 2899 3066 { … … 2903 3070 else 2904 3071 rc = VERR_INTERNAL_ERROR_5; 2905 RTSemFastMutexRelease(pGMM->Mtx);3072 gmmR0MutexRelease(pGMM); 2906 3073 LogFlow(("GMMR0FreePages: returns %Rrc\n", rc)); 2907 3074 return rc; … … 2978 3145 * Take the semaphore and do some more validations. 2979 3146 */ 2980 rc = RTSemFastMutexRequest(pGMM->Mtx); 2981 AssertRC(rc); 3147 gmmR0MutexAcquire(pGMM); 2982 3148 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 2983 3149 { … … 3071 3237 rc = VERR_INTERNAL_ERROR_5; 3072 3238 3073 RTSemFastMutexRelease(pGMM->Mtx);3239 gmmR0MutexRelease(pGMM); 3074 3240 LogFlow(("GMMR0BalloonedPages: returns %Rrc\n", rc)); 3075 3241 return rc; … … 3164 3330 * Take the semaphore and do some more validations. 3165 3331 */ 3166 rc = RTSemFastMutexRequest(pGMM->Mtx); 3167 AssertRC(rc); 3332 gmmR0MutexAcquire(pGMM); 3168 3333 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 3169 3334 { … … 3176 3341 rc = VERR_INTERNAL_ERROR_5; 3177 3342 3178 RTSemFastMutexRelease(pGMM->Mtx);3343 gmmR0MutexRelease(pGMM); 3179 3344 LogFlow(("GMMR3QueryVMMemoryStats: returns %Rrc\n", rc)); 3180 3345 return rc; 3181 3346 } 3347 3182 3348 3183 3349 /** … … 3196 3362 * Find the mapping and try unmapping it. 3197 3363 */ 3198 for (uint32_t i = 0; i < pChunk->cMappings; i++) 3364 uint32_t cMappings = pChunk->cMappings; 3365 for (uint32_t i = 0; i < cMappings; i++) 3199 3366 { 3200 3367 Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ); … … 3206 3373 { 3207 3374 /* update the record. */ 3208 pChunk->cMappings--; 3209 if (i < pChunk->cMappings) 3210 pChunk->paMappings[i] = pChunk->paMappings[pChunk->cMappings]; 3211 pChunk->paMappings[pChunk->cMappings].MapObj = NIL_RTR0MEMOBJ; 3212 pChunk->paMappings[pChunk->cMappings].pGVM = NULL; 3375 cMappings--; 3376 if (i < cMappings) 3377 pChunk->paMappings[i] = pChunk->paMappings[cMappings]; 3378 pChunk->paMappings[cMappings].MapObj = NIL_RTR0MEMOBJ; 3379 pChunk->paMappings[cMappings].pGVM = NULL; 3380 Assert(pChunk->cMappings - 1U == cMappings); 3381 pChunk->cMappings = cMappings; 3213 3382 } 3214 3383 return rc; … … 3237 3406 static int gmmR0MapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3) 3238 3407 { 3408 Assert(pGMM->hMtxOwner == RTThreadNativeSelf()); 3409 3239 3410 /* 3240 3411 * If we're in legacy mode this is simple. … … 3279 3450 { 3280 3451 /* reallocate the array? assumes few users per chunk (usually one). */ 3281 if ( pChunk->cMappings <= 3 3282 || (pChunk->cMappings & 3) == 0) 3283 { 3284 unsigned cNewSize = pChunk->cMappings <= 3 3285 ? pChunk->cMappings + 1 3286 : pChunk->cMappings + 4; 3452 unsigned iMapping = pChunk->cMappings; 3453 if ( iMapping <= 3 3454 || (iMapping & 3) == 0) 3455 { 3456 unsigned cNewSize = iMapping <= 3 3457 ? iMapping + 1 3458 : iMapping + 4; 3287 3459 Assert(cNewSize < 4 || RT_ALIGN_32(cNewSize, 4) == cNewSize); 3460 if (RT_UNLIKELY(cNewSize > UINT16_MAX)) 3461 { 3462 rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */); AssertRC(rc); 3463 return VERR_GMM_TOO_MANY_CHUNK_MAPPINGS; 3464 } 3465 3288 3466 void *pvMappings = RTMemRealloc(pChunk->paMappings, cNewSize * sizeof(pChunk->paMappings[0])); 3289 3467 if (RT_UNLIKELY(!pvMappings)) 3290 3468 { 3291 rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */); 3292 AssertRC(rc); 3469 rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */); AssertRC(rc); 3293 3470 return VERR_NO_MEMORY; 3294 3471 } … … 3297 3474 3298 3475 /* insert new entry */ 3299 pChunk->paMappings[pChunk->cMappings].MapObj = MapObj; 3300 pChunk->paMappings[pChunk->cMappings].pGVM = pGVM; 3301 pChunk->cMappings++; 3476 pChunk->paMappings[iMapping].MapObj = MapObj; 3477 pChunk->paMappings[iMapping].pGVM = pGVM; 3478 Assert(pChunk->cMappings == iMapping); 3479 pChunk->cMappings = iMapping + 1; 3302 3480 3303 3481 *ppvR3 = RTR0MemObjAddressR3(MapObj); … … 3387 3565 * it it's limits, so, no problem here. 3388 3566 */ 3389 rc = RTSemFastMutexRequest(pGMM->Mtx); 3390 AssertRC(rc); 3567 gmmR0MutexAcquire(pGMM); 3391 3568 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 3392 3569 { … … 3424 3601 else 3425 3602 rc = VERR_INTERNAL_ERROR_5; 3426 RTSemFastMutexRelease(pGMM->Mtx);3603 gmmR0MutexRelease(pGMM); 3427 3604 3428 3605 LogFlow(("GMMR0MapUnmapChunk: returns %Rrc\n", rc)); … … 3491 3668 { 3492 3669 /* Grab the lock. */ 3493 rc = RTSemFastMutexRequest(pGMM->Mtx); 3494 AssertRC(rc); 3670 rc = gmmR0MutexAcquire(pGMM); 3495 3671 if (RT_SUCCESS(rc)) 3496 3672 { … … 3499 3675 */ 3500 3676 rc = gmmR0RegisterChunk(pGMM, &pGMM->Private, MemObj, pGVM->hSelf, GMMCHUNKTYPE_NON_CONTINUOUS); 3501 RTSemFastMutexRelease(pGMM->Mtx);3677 gmmR0MutexRelease(pGMM); 3502 3678 } 3503 3679 … … 3572 3748 * Take the semaphore and do some more validations. 3573 3749 */ 3574 rc = RTSemFastMutexRequest(pGMM->Mtx); 3575 AssertRC(rc); 3750 gmmR0MutexAcquire(pGMM); 3576 3751 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 3577 3752 { … … 3713 3888 3714 3889 end: 3715 RTSemFastMutexRelease(pGMM->Mtx);3890 gmmR0MutexRelease(pGMM); 3716 3891 return rc; 3717 3892 #else … … 3772 3947 * Take the semaphore and do some more validations. 3773 3948 */ 3774 rc = RTSemFastMutexRequest(pGMM->Mtx); 3775 AssertRC(rc); 3949 gmmR0MutexAcquire(pGMM); 3776 3950 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 3777 3951 { … … 3829 4003 rc = VERR_INTERNAL_ERROR_5; 3830 4004 3831 RTSemFastMutexRelease(pGMM->Mtx);4005 gmmR0MutexRelease(pGMM); 3832 4006 return rc; 3833 4007 #else … … 4027 4201 } 4028 4202 4029 /** 4030 * RTAvlU32Destroy callback. 4031 * 4032 * @returns 0 4203 4204 /** 4205 * RTAvlGCPtrDestroy callback. 4206 * 4207 * @returns 0 or VERR_INTERNAL_ERROR. 4033 4208 * @param pNode The node to destroy. 4034 4209 * @param pvGVM The GVM handle. … … 4036 4211 static DECLCALLBACK(int) gmmR0CleanupSharedModule(PAVLGCPTRNODECORE pNode, void *pvGVM) 4037 4212 { 4038 PGVM pGVM = (PGVM)pvGVM; 4039 PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)pNode; 4040 PGMM pGMM; 4041 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 4213 PGVM pGVM = (PGVM)pvGVM; 4214 PGMMSHAREDMODULEPERVM pRecVM = (PGMMSHAREDMODULEPERVM)pNode; 4042 4215 4043 4216 Assert(pRecVM->pGlobalModule || pRecVM->fCollision); … … 4045 4218 { 4046 4219 PGMMSHAREDMODULE pRec = pRecVM->pGlobalModule; 4047 Assert (pRec);4220 AssertPtr(pRec); 4048 4221 Assert(pRec->cUsers); 4049 4222 … … 4052 4225 if (pRec->cUsers == 0) 4053 4226 { 4054 for (u nsignedi = 0; i < pRec->cRegions; i++)4227 for (uint32_t i = 0; i < pRec->cRegions; i++) 4055 4228 if (pRec->aRegions[i].paHCPhysPageID) 4056 4229 RTMemFree(pRec->aRegions[i].paHCPhysPageID); 4057 4230 4058 4231 /* Remove from the tree and free memory. */ 4232 PGMM pGMM; 4233 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 4059 4234 RTAvlGCPtrRemove(&pGMM->pGlobalSharedModuleTree, pRec->Core.Key); 4060 4235 RTMemFree(pRec); … … 4063 4238 RTMemFree(pRecVM); 4064 4239 return 0; 4240 } 4241 4242 4243 /** 4244 * Used by GMMR0CleanupVM to clean up shared modules. 4245 * 4246 * This is called without taking the GMM lock so that it can be yielded as 4247 * needed here. 4248 * 4249 * @param pGMM The GMM handle. 4250 * @param pGVM The global VM handle. 4251 */ 4252 static void gmmR0SharedModuleCleanup(PGMM pGMM, PGVM pGVM) 4253 { 4254 gmmR0MutexAcquire(pGMM); 4255 GMM_CHECK_SANITY_UPON_ENTERING(pGMM); 4256 4257 RTAvlGCPtrDestroy(&pGVM->gmm.s.pSharedModuleTree, gmmR0CleanupSharedModule, pGVM); 4258 4259 gmmR0MutexRelease(pGMM); 4065 4260 } 4066 4261 … … 4090 4285 * Take the semaphore and do some more validations. 4091 4286 */ 4092 rc = RTSemFastMutexRequest(pGMM->Mtx); 4093 AssertRC(rc); 4287 gmmR0MutexAcquire(pGMM); 4094 4288 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 4095 4289 { … … 4103 4297 rc = VERR_INTERNAL_ERROR_5; 4104 4298 4105 RTSemFastMutexRelease(pGMM->Mtx);4299 gmmR0MutexRelease(pGMM); 4106 4300 return rc; 4107 4301 #else … … 4159 4353 * Take the semaphore and do some more validations. 4160 4354 */ 4161 int rc = RTSemFastMutexRequest(pGMM->Mtx); 4162 AssertRC(rc); 4355 gmmR0MutexAcquire(pGMM); 4163 4356 if (!GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 4164 4357 rc = VERR_INTERNAL_ERROR_5; … … 4183 4376 GMM_GET_VALID_INSTANCE(pGMM, VERR_INTERNAL_ERROR); 4184 4377 4185 RTSemFastMutexRelease(pGMM->Mtx);4378 gmmR0MutexRelease(pGMM); 4186 4379 return VINF_SUCCESS; 4187 4380 } … … 4213 4406 * Take the semaphore and do some more validations. 4214 4407 */ 4215 rc = RTSemFastMutexRequest(pGMM->Mtx); 4216 AssertRC(rc); 4408 gmmR0MutexAcquire(pGMM); 4217 4409 # endif 4218 4410 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) … … 4237 4429 4238 4430 # ifndef DEBUG_sandervl 4239 RTSemFastMutexRelease(pGMM->Mtx);4431 gmmR0MutexRelease(pGMM); 4240 4432 # endif 4241 4433 return rc; … … 4326 4518 * Take the semaphore and do some more validations. 4327 4519 */ 4328 int rc = RTSemFastMutexRequest(pGMM->Mtx); 4329 AssertRC(rc); 4520 int rc = gmmR0MutexAcquire(pGMM); 4330 4521 if (GMM_CHECK_SANITY_UPON_ENTERING(pGMM)) 4331 4522 { … … 4372 4563 4373 4564 end: 4374 RTSemFastMutexRelease(pGMM->Mtx);4565 gmmR0MutexRelease(pGMM); 4375 4566 return rc; 4376 4567 }
Note:
See TracChangeset
for help on using the changeset viewer.