Changeset 37192 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 24, 2011 12:06:38 PM (14 years ago)
- svn:sync-xref-src-repo-rev:
- 71888
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r37178 r37192 364 364 { 365 365 /** The mapping object. */ 366 RTR0MEMOBJ MapObj;366 RTR0MEMOBJ hMapObj; 367 367 /** The VM owning the mapping. */ 368 368 PGVM pGVM; … … 386 386 { 387 387 /** The AVL node core. 388 * The Key is the chunk ID. */388 * The Key is the chunk ID. (Giant mtx.) */ 389 389 AVLU32NODECORE Core; 390 390 /** The memory object. 391 391 * Either from RTR0MemObjAllocPhysNC or RTR0MemObjLockUser depending on 392 * what the host can dish up with. */ 393 RTR0MEMOBJ MemObj; 394 /** Pointer to the next chunk in the free list. */ 392 * what the host can dish up with. (Chunk mtx protects mapping accesses 393 * and related frees.) */ 394 RTR0MEMOBJ hMemObj; 395 /** Pointer to the next chunk in the free list. (Giant mtx.) */ 395 396 PGMMCHUNK pFreeNext; 396 /** Pointer to the previous chunk in the free list. */397 /** Pointer to the previous chunk in the free list. (Giant mtx.) */ 397 398 PGMMCHUNK pFreePrev; 398 399 /** Pointer to the free set this chunk belongs to. NULL for 399 * chunks with no free pages. */400 * chunks with no free pages. (Giant mtx.) */ 400 401 PGMMCHUNKFREESET pSet; 401 /** List node in the chunk list (GMM::ChunkList). */402 /** List node in the chunk list (GMM::ChunkList). (Giant mtx.) */ 402 403 RTLISTNODE ListNode; 403 /** Pointer to an array of mappings. */ 404 PGMMCHUNKMAP paMappings; 405 /** The number of mappings. */ 406 uint16_t cMappings; 407 /** The number of mapping operations that is in progress without owning 408 * the semaphore. */ 409 uint16_t volatile cMappingsInProgress; 410 /** The head of the list of free pages. UINT16_MAX is the NIL value. */ 404 /** Pointer to an array of mappings. (Chunk mtx.) */ 405 PGMMCHUNKMAP paMappingsX; 406 /** The number of mappings. (Chunk mtx.) */ 407 uint16_t cMappingsX; 408 /** The mapping lock this chunk is using using. UINT16_MAX if nobody is 409 * mapping or freeing anything. (Giant mtx.) */ 410 uint8_t volatile iMemLock; 411 /** Flags field reserved for future use (like eliminating enmType). 412 * (Giant mtx.) */ 413 uint8_t fFlags; 414 /** The head of the list of free pages. UINT16_MAX is the NIL value. 415 * (Giant mtx.) */ 411 416 uint16_t iFreeHead; 412 /** The number of free pages. */417 /** The number of free pages. (Giant mtx.) */ 413 418 uint16_t cFree; 414 419 /** The GVM handle of the VM that first allocated pages from this chunk, this 415 420 * is used as a preference when there are several chunks to choose from. 416 * When in bound memory mode this isn't a preference any longer. */ 421 * When in bound memory mode this isn't a preference any longer. (Giant 422 * mtx.) */ 417 423 uint16_t hGVM; 418 /** The ID of the NUMA node the memory mostly resides on. (Reserved for419 * future use.) */424 /** The ID of the NUMA node the memory mostly resides on. (Reserved for 425 * future use.) (Giant mtx.) */ 420 426 uint16_t idNumaNode; 421 /** The number of private pages. */427 /** The number of private pages. (Giant mtx.) */ 422 428 uint16_t cPrivate; 423 /** The number of shared pages. */429 /** The number of shared pages. (Giant mtx.) */ 424 430 uint16_t cShared; 425 /** Chunk type */431 /** Chunk type. (Giant mtx.) */ 426 432 GMMCHUNKTYPE enmType; 427 /** The pages. */433 /** The pages. (Giant mtx.) */ 428 434 GMMPAGE aPages[GMM_CHUNK_SIZE >> PAGE_SHIFT]; 429 435 } GMMCHUNK; … … 567 573 * The NIL id (0) is marked allocated. */ 568 574 uint32_t bmChunkId[(GMM_CHUNKID_LAST + 1 + 31) / 32]; 575 576 /** The index of the next mutex to use. */ 577 uint32_t iNextChunkMtx; 578 /** Chunk locks for reducing lock contention without having to allocate 579 * one lock per chunk. */ 580 RTSEMFASTMUTEX ahChunkMtx[64]; 569 581 } GMM; 570 582 /** Pointer to the GMM instance. */ … … 573 585 /** The value of GMM::u32Magic (Katsuhiro Otomo). */ 574 586 #define GMM_MAGIC UINT32_C(0x19540414) 587 588 589 /** 590 * GMM chunk mutex state. 591 * 592 * This is returned by gmmR0ChunkMutexAcquire and is used by the other 593 * gmmR0ChunkMutex* methods. 594 */ 595 typedef struct GMMR0CHUNKMTXSTATE 596 { 597 PGMM pGMM; 598 /** The index of the chunk mutex. */ 599 uint8_t iChunkMtx; 600 /** The relevant flags (GMMR0CHUNK_MTX_XXX). */ 601 uint8_t fFlags; 602 } GMMR0CHUNKMTXSTATE; 603 /** Pointer to a chunk mutex state. */ 604 typedef GMMR0CHUNKMTXSTATE *PGMMR0CHUNKMTXSTATE; 605 606 /** @name GMMR0CHUNK_MTX_XXX 607 * @{ */ 608 #define GMMR0CHUNK_MTX_INVALID UINT32_C(0) 609 #define GMMR0CHUNK_MTX_KEEP_GIANT UINT32_C(1) 610 #define GMMR0CHUNK_MTX_RETAKE_GIANT UINT32_C(2) 611 #define GMMR0CHUNK_MTX_DROP_GIANT UINT32_C(3) 612 #define GMMR0CHUNK_MTX_END UINT32_C(4) 613 /** @} */ 575 614 576 615 … … 655 694 *******************************************************************************/ 656 695 static DECLCALLBACK(int) gmmR0TermDestroyChunk(PAVLU32NODECORE pNode, void *pvGMM); 657 static bool gmmR0CleanupVMScanChunk(PGVM pGVM, PGMMCHUNK pChunk); 658 /*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM); 696 static bool gmmR0CleanupVMScanChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); 659 697 DECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet); 660 698 DECLINLINE(void) gmmR0UnlinkChunk(PGMMCHUNK pChunk); … … 662 700 static void gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); 663 701 static void gmmR0FreeSharedPage(PGMM pGMM, uint32_t idPage, PGMMPAGE pPage); 664 static int gmmR0UnmapChunk (PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk);702 static int gmmR0UnmapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); 665 703 static void gmmR0SharedModuleCleanup(PGMM pGMM, PGVM pGVM); 666 704 … … 698 736 if (RT_SUCCESS(rc)) 699 737 { 700 /* 701 * Check and see if RTR0MemObjAllocPhysNC works. 702 */ 703 #if 0 /* later, see #3170. */ 704 RTR0MEMOBJ MemObj; 705 rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS); 738 unsigned iMtx; 739 for (iMtx = 0; iMtx < RT_ELEMENTS(pGMM->ahChunkMtx); iMtx++) 740 { 741 rc = RTSemFastMutexCreate(&pGMM->ahChunkMtx[iMtx]); 742 if (RT_FAILURE(rc)) 743 break; 744 } 706 745 if (RT_SUCCESS(rc)) 707 746 { 708 rc = RTR0MemObjFree(MemObj, true); 709 AssertRC(rc); 710 } 711 else if (rc == VERR_NOT_SUPPORTED) 712 pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true; 713 else 714 SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc); 747 /* 748 * Check and see if RTR0MemObjAllocPhysNC works. 749 */ 750 #if 0 /* later, see #3170. */ 751 RTR0MEMOBJ MemObj; 752 rc = RTR0MemObjAllocPhysNC(&MemObj, _64K, NIL_RTHCPHYS); 753 if (RT_SUCCESS(rc)) 754 { 755 rc = RTR0MemObjFree(MemObj, true); 756 AssertRC(rc); 757 } 758 else if (rc == VERR_NOT_SUPPORTED) 759 pGMM->fLegacyAllocationMode = pGMM->fBoundMemoryMode = true; 760 else 761 SUPR0Printf("GMMR0Init: RTR0MemObjAllocPhysNC(,64K,Any) -> %d!\n", rc); 715 762 #else 716 763 # if defined(RT_OS_WINDOWS) || (defined(RT_OS_SOLARIS) && ARCH_BITS == 64) || defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD) 717 pGMM->fLegacyAllocationMode = false;764 pGMM->fLegacyAllocationMode = false; 718 765 # if ARCH_BITS == 32 719 /* Don't reuse possibly partial chunks because of the virtual address space limitation. */ 720 pGMM->fBoundMemoryMode = true; 766 /* Don't reuse possibly partial chunks because of the virtual 767 address space limitation. */ 768 pGMM->fBoundMemoryMode = true; 721 769 # else 722 pGMM->fBoundMemoryMode = false;770 pGMM->fBoundMemoryMode = false; 723 771 # endif 724 772 # else 725 pGMM->fLegacyAllocationMode = true;726 pGMM->fBoundMemoryMode = true;773 pGMM->fLegacyAllocationMode = true; 774 pGMM->fBoundMemoryMode = true; 727 775 # endif 728 776 #endif 729 777 778 /* 779 * Query system page count and guess a reasonable cMaxPages value. 780 */ 781 pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */ 782 783 g_pGMM = pGMM; 784 LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode)); 785 return VINF_SUCCESS; 786 } 787 730 788 /* 731 * Query system page count and guess a reasonable cMaxPages value.789 * Bail out. 732 790 */ 733 pGMM->cMaxPages = UINT32_MAX; /** @todo IPRT function for query ram size and such. */ 734 735 g_pGMM = pGMM; 736 LogFlow(("GMMInit: pGMM=%p fLegacyAllocationMode=%RTbool fBoundMemoryMode=%RTbool\n", pGMM, pGMM->fLegacyAllocationMode, pGMM->fBoundMemoryMode)); 737 return VINF_SUCCESS; 738 } 739 791 while (iMtx-- > 0) 792 RTSemFastMutexDestroy(pGMM->ahChunkMtx[iMtx]); 793 } 740 794 RTSemFastMutexDestroy(pGMM->hMtx); 741 795 } … … 778 832 pGMM->hMtxCleanup = NIL_RTSEMFASTMUTEX; 779 833 780 /* free any chunks still hanging around. */834 /* Free any chunks still hanging around. */ 781 835 RTAvlU32Destroy(&pGMM->pChunks, gmmR0TermDestroyChunk, pGMM); 782 836 783 /* finally the instance data itself. */ 837 /* Destroy the chunk locks. */ 838 for (unsigned iMtx = 0; iMtx++ < RT_ELEMENTS(pGMM->ahChunkMtx); iMtx++) 839 { 840 RTSemFastMutexDestroy(pGMM->ahChunkMtx[iMtx]); 841 pGMM->ahChunkMtx[iMtx] = NIL_RTSEMFASTMUTEX; 842 } 843 844 /* Finally the instance data itself. */ 784 845 RTMemFree(pGMM); 785 846 LogFlow(("GMMTerm: done\n")); … … 800 861 if (pChunk->cFree != (GMM_CHUNK_SIZE >> PAGE_SHIFT)) 801 862 SUPR0Printf("GMMR0Term: %p/%#x: cFree=%d cPrivate=%d cShared=%d cMappings=%d\n", pChunk, 802 pChunk->Core.Key, pChunk->cFree, pChunk->cPrivate, pChunk->cShared, pChunk->cMappings); 803 Assert(pChunk->cMappingsInProgress == 0); 804 805 int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */); 863 pChunk->Core.Key, pChunk->cFree, pChunk->cPrivate, pChunk->cShared, pChunk->cMappingsX); 864 865 int rc = RTR0MemObjFree(pChunk->hMemObj, true /* fFreeMappings */); 806 866 if (RT_FAILURE(rc)) 807 867 { 808 868 SUPR0Printf("GMMR0Term: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk, 809 pChunk->Core.Key, pChunk-> MemObj, rc, pChunk->cMappings);869 pChunk->Core.Key, pChunk->hMemObj, rc, pChunk->cMappingsX); 810 870 AssertRC(rc); 811 871 } 812 pChunk-> MemObj = NIL_RTR0MEMOBJ;813 814 RTMemFree(pChunk->paMappings );815 pChunk->paMappings = NULL;872 pChunk->hMemObj = NIL_RTR0MEMOBJ; 873 874 RTMemFree(pChunk->paMappingsX); 875 pChunk->paMappingsX = NULL; 816 876 817 877 RTMemFree(pChunk); … … 920 980 921 981 return true; 982 } 983 984 985 /** 986 * Acquires a chunk lock. 987 * 988 * The caller must own the giant lock. 989 * 990 * @returns Assert status code from RTSemFastMutexRequest. 991 * @param pMtxState The chunk mutex state info. (Avoids 992 * passing the same flags and stuff around 993 * for subsequent release and drop-giant 994 * calls.) 995 * @param pGMM Pointer to the GMM instance. 996 * @param pChunk Pointer to the chunk. 997 * @param fFlags Flags regarding the giant lock, GMMR0CHUNK_MTX_XXX. 998 */ 999 static int gmmR0ChunkMutexAcquire(PGMMR0CHUNKMTXSTATE pMtxState, PGMM pGMM, PGMMCHUNK pChunk, uint32_t fFlags) 1000 { 1001 Assert(fFlags > GMMR0CHUNK_MTX_INVALID && fFlags < GMMR0CHUNK_MTX_END); 1002 pMtxState->pGMM = pGMM; 1003 pMtxState->fFlags = (uint8_t)fFlags; 1004 1005 /* 1006 * Get the lock index. 1007 */ 1008 Assert(pGMM->hMtxOwner == RTThreadNativeSelf()); 1009 uint32_t iChunkMtx = pChunk->iMemLock; 1010 if (iChunkMtx == UINT8_MAX) 1011 { 1012 /** @todo skip mutexes that are currently owned. */ 1013 iChunkMtx = pGMM->iNextChunkMtx++; 1014 iChunkMtx %= RT_ELEMENTS(pGMM->ahChunkMtx); 1015 pChunk->iMemLock = iChunkMtx; 1016 } 1017 AssertCompile(RT_ELEMENTS(pGMM->ahChunkMtx) < UINT8_MAX); 1018 pMtxState->iChunkMtx = (uint8_t)iChunkMtx; 1019 1020 /* 1021 * Drop the giant? 1022 */ 1023 if (fFlags != GMMR0CHUNK_MTX_KEEP_GIANT) 1024 { 1025 /** @todo GMM life cycle cleanup (we may race someone 1026 * destroying and cleaning up GMM)? */ 1027 gmmR0MutexRelease(pGMM); 1028 } 1029 1030 /* 1031 * Take the chunk mutex. 1032 */ 1033 int rc = RTSemFastMutexRequest(pGMM->ahChunkMtx[iChunkMtx]); 1034 AssertRC(rc); 1035 return rc; 1036 } 1037 1038 1039 /** 1040 * Releases the GMM giant lock. 1041 * 1042 * @returns Assert status code from RTSemFastMutexRequest. 1043 * @param pGMM Pointer to the GMM instance. 1044 * @param pChunk Pointer to the chunk if it's still 1045 * alive, NULL if it isn't. This is 1046 * inteded for later optimizations where we 1047 * will deassociate the chunk mutex if 1048 * considered safe. 1049 */ 1050 static int gmmR0ChunkMutexRelease(PGMMR0CHUNKMTXSTATE pMtxState, PGMMCHUNK pChunk) 1051 { 1052 int rc = RTSemFastMutexRelease(pMtxState->pGMM->ahChunkMtx[pMtxState->iChunkMtx]); 1053 AssertRC(rc); 1054 if (pMtxState->fFlags == GMMR0CHUNK_MTX_RETAKE_GIANT) 1055 rc = gmmR0MutexAcquire(pMtxState->pGMM); 1056 pMtxState->pGMM = NULL; 1057 return rc; 1058 } 1059 1060 1061 /** 1062 * Drops the giant GMM lock we kept in gmmR0ChunkMutexAcquire while keeping the 1063 * chunk locked. 1064 * 1065 * This only works if gmmR0ChunkMutexAcquire was called with 1066 * GMMR0CHUNK_MTX_KEEP_GIANT. Release will NOT retake the giant 1067 * when dropped this way, the behavior will be like if 1068 * GMMR0CHUNK_MTX_DROP_GIANT was used. 1069 * 1070 * @returns VBox status code (assuming success is ok). 1071 * @param pMtxState Pointer to the chunk mutex state. 1072 */ 1073 static int gmmR0ChunkMutexDropGiant(PGMMR0CHUNKMTXSTATE pMtxState) 1074 { 1075 AssertReturn(pMtxState->fFlags == GMMR0CHUNK_MTX_KEEP_GIANT, VERR_INTERNAL_ERROR_2); 1076 Assert(pMtxState->pGMM->hMtxOwner == RTThreadNativeSelf()); 1077 pMtxState->fFlags = GMMR0CHUNK_MTX_DROP_GIANT; 1078 /** @todo GMM life cycle cleanup (we may race someone 1079 * destroying and cleaning up GMM)? */ 1080 return gmmR0MutexRelease(pMtxState->pGMM); 922 1081 } 923 1082 … … 963 1122 Assert(pGMM->cRegisteredVMs); 964 1123 pGMM->cRegisteredVMs--; 965 #if 0 /* disabled so it won't hide bugs. */ 966 if (!pGMM->cRegisteredVMs) 967 { 968 RTAvlU32Destroy(&pGMM->pChunks, gmmR0CleanupVMDestroyChunk, pGMM); 969 970 for (unsigned i = 0; i < RT_ELEMENTS(pGMM->ChunkTLB.aEntries); i++) 1124 1125 /* 1126 * Walk the entire pool looking for pages that belong to this VM 1127 * and left over mappings. (This'll only catch private pages, 1128 * shared pages will be 'left behind'.) 1129 */ 1130 uint64_t cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */ 1131 1132 unsigned iCountDown = 64; 1133 bool fRedoFromStart; 1134 PGMMCHUNK pChunk; 1135 do 1136 { 1137 fRedoFromStart = false; 1138 RTListForEachReverse(&pGMM->ChunkList, pChunk, GMMCHUNK, ListNode) 971 1139 { 972 pGMM->ChunkTLB.aEntries[i].idChunk = NIL_GMM_CHUNKID; 973 pGMM->ChunkTLB.aEntries[i].pChunk = NULL; 974 } 975 976 memset(&pGMM->Private, 0, sizeof(pGMM->Private)); 977 memset(&pGMM->Shared, 0, sizeof(pGMM->Shared)); 978 979 memset(&pGMM->bmChunkId[0], 0, sizeof(pGMM->bmChunkId)); 980 ASMBitSet(&pGMM->bmChunkId[0], NIL_GMM_CHUNKID); 981 982 pGMM->cReservedPages = 0; 983 pGMM->cOverCommittedPages = 0; 984 pGMM->cAllocatedPages = 0; 985 pGMM->cSharedPages = 0; 986 pGMM->cDuplicatePages = 0; 987 pGMM->cLeftBehindSharedPages = 0; 988 pGMM->cChunks = 0; 989 pGMM->cBalloonedPages = 0; 990 } 991 else 992 #endif 993 { 994 /* 995 * Walk the entire pool looking for pages that belong to this VM 996 * and left over mappings. (This'll only catch private pages, 997 * shared pages will be 'left behind'.) 998 */ 999 uint64_t cPrivatePages = pGVM->gmm.s.cPrivatePages; /* save */ 1000 1001 unsigned iCountDown = 64; 1002 bool fRedoFromStart; 1003 PGMMCHUNK pChunk; 1004 do 1005 { 1006 fRedoFromStart = false; 1007 RTListForEachReverse(&pGMM->ChunkList, pChunk, GMMCHUNK, ListNode) 1140 uint32_t const cFreeChunksOld = pGMM->cFreedChunks; 1141 if (gmmR0CleanupVMScanChunk(pGMM, pGVM, pChunk)) 1008 1142 { 1009 if ( !gmmR0CleanupVMScanChunk(pGVM, pChunk) 1010 || iCountDown != 0) 1143 gmmR0MutexAcquire(pGMM); 1144 uLockNanoTS = RTTimeSystemNanoTS(); 1145 } 1146 else 1147 { 1148 if (!iCountDown) 1149 gmmR0MutexYield(pGMM, &uLockNanoTS); 1150 else 1011 1151 iCountDown--; 1012 else1013 {1014 iCountDown = 64;1015 uint32_t const cFreeChunksOld = pGMM->cFreedChunks;1016 fRedoFromStart = gmmR0MutexYield(pGMM, &uLockNanoTS)1017 && pGMM->cFreedChunks != cFreeChunksOld;1018 if (fRedoFromStart)1019 break;1020 }1021 1152 } 1022 } while (fRedoFromStart); 1023 1024 if (pGVM->gmm.s.cPrivatePages) 1025 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages); 1026 1027 pGMM->cAllocatedPages -= cPrivatePages; 1028 1029 /* 1030 * Free empty chunks. 1031 */ 1032 do 1033 { 1034 iCountDown = 10240; 1035 pChunk = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1]; 1036 while (pChunk) 1037 { 1038 PGMMCHUNK pNext = pChunk->pFreeNext; 1039 if ( pChunk->cFree == GMM_CHUNK_NUM_PAGES 1040 && ( !pGMM->fBoundMemoryMode 1041 || pChunk->hGVM == pGVM->hSelf)) 1042 { 1043 gmmR0FreeChunk(pGMM, pGVM, pChunk); 1044 iCountDown = 1; 1045 } 1046 pChunk = pNext; 1047 1048 if (--iCountDown == 0) 1049 { 1050 uint64_t const idGenerationOld = pGMM->Private.idGeneration; 1051 fRedoFromStart = gmmR0MutexYield(pGMM, &uLockNanoTS) 1052 && pGMM->Private.idGeneration != idGenerationOld; 1053 if (fRedoFromStart) 1054 break; 1055 iCountDown = 10240; 1056 } 1057 } 1058 } while (fRedoFromStart); 1059 1060 /* 1061 * Account for shared pages that weren't freed. 1062 */ 1063 if (pGVM->gmm.s.cSharedPages) 1064 { 1065 Assert(pGMM->cSharedPages >= pGVM->gmm.s.cSharedPages); 1066 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages); 1067 pGMM->cLeftBehindSharedPages += pGVM->gmm.s.cSharedPages; 1068 } 1069 1070 /* 1071 * Clean up balloon statistics in case the VM process crashed. 1072 */ 1073 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages); 1074 pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages; 1075 1076 /* 1077 * Update the over-commitment management statistics. 1078 */ 1079 pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages 1080 + pGVM->gmm.s.Reserved.cFixedPages 1081 + pGVM->gmm.s.Reserved.cShadowPages; 1082 switch (pGVM->gmm.s.enmPolicy) 1083 { 1084 case GMMOCPOLICY_NO_OC: 1085 break; 1086 default: 1087 /** @todo Update GMM->cOverCommittedPages */ 1153 if (pGMM->cFreedChunks != cFreeChunksOld) 1088 1154 break; 1089 1155 } 1156 } while (fRedoFromStart); 1157 1158 if (pGVM->gmm.s.cPrivatePages) 1159 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x has %#x private pages that cannot be found!\n", pGVM->hSelf, pGVM->gmm.s.cPrivatePages); 1160 1161 pGMM->cAllocatedPages -= cPrivatePages; 1162 1163 /* 1164 * Free empty chunks. 1165 */ 1166 do 1167 { 1168 iCountDown = 10240; 1169 pChunk = pGMM->Private.apLists[RT_ELEMENTS(pGMM->Private.apLists) - 1]; 1170 while (pChunk) 1171 { 1172 PGMMCHUNK pNext = pChunk->pFreeNext; 1173 if ( pChunk->cFree == GMM_CHUNK_NUM_PAGES 1174 && ( !pGMM->fBoundMemoryMode 1175 || pChunk->hGVM == pGVM->hSelf)) 1176 { 1177 gmmR0FreeChunk(pGMM, pGVM, pChunk); 1178 iCountDown = 1; 1179 } 1180 pChunk = pNext; 1181 1182 if (--iCountDown == 0) 1183 { 1184 uint64_t const idGenerationOld = pGMM->Private.idGeneration; 1185 fRedoFromStart = gmmR0MutexYield(pGMM, &uLockNanoTS) 1186 && pGMM->Private.idGeneration != idGenerationOld; 1187 if (fRedoFromStart) 1188 break; 1189 iCountDown = 10240; 1190 } 1191 } 1192 } while (fRedoFromStart); 1193 1194 /* 1195 * Account for shared pages that weren't freed. 1196 */ 1197 if (pGVM->gmm.s.cSharedPages) 1198 { 1199 Assert(pGMM->cSharedPages >= pGVM->gmm.s.cSharedPages); 1200 SUPR0Printf("GMMR0CleanupVM: hGVM=%#x left %#x shared pages behind!\n", pGVM->hSelf, pGVM->gmm.s.cSharedPages); 1201 pGMM->cLeftBehindSharedPages += pGVM->gmm.s.cSharedPages; 1202 } 1203 1204 /* 1205 * Clean up balloon statistics in case the VM process crashed. 1206 */ 1207 Assert(pGMM->cBalloonedPages >= pGVM->gmm.s.cBalloonedPages); 1208 pGMM->cBalloonedPages -= pGVM->gmm.s.cBalloonedPages; 1209 1210 /* 1211 * Update the over-commitment management statistics. 1212 */ 1213 pGMM->cReservedPages -= pGVM->gmm.s.Reserved.cBasePages 1214 + pGVM->gmm.s.Reserved.cFixedPages 1215 + pGVM->gmm.s.Reserved.cShadowPages; 1216 switch (pGVM->gmm.s.enmPolicy) 1217 { 1218 case GMMOCPOLICY_NO_OC: 1219 break; 1220 default: 1221 /** @todo Update GMM->cOverCommittedPages */ 1222 break; 1090 1223 } 1091 1224 } … … 1107 1240 * Scan one chunk for private pages belonging to the specified VM. 1108 1241 * 1109 * @returns @c true if a mapping was found (and freed), @c false if not. 1242 * @note This function is ugly since may drop the ownership of the giant GMM 1243 * mutex! 1244 * 1245 * @returns @c true if we've dropped the giant mutex, @c false if we didn't. 1246 * @param pGMM Pointer to the GMM instance. 1110 1247 * @param pGVM The global VM handle. 1111 1248 * @param pChunk The chunk to scan. 1112 1249 */ 1113 static bool gmmR0CleanupVMScanChunk(PG VM pGVM, PGMMCHUNK pChunk)1250 static bool gmmR0CleanupVMScanChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk) 1114 1251 { 1115 1252 /* … … 1176 1313 1177 1314 /* 1315 * If not in bound memory mode, we should reset the hGVM field 1316 * if it has our handle in it. 1317 */ 1318 if (pChunk->hGVM == pGVM->hSelf) 1319 { 1320 if (!g_pGMM->fBoundMemoryMode) 1321 pChunk->hGVM = NIL_GVM_HANDLE; 1322 else if (pChunk->cFree != GMM_CHUNK_NUM_PAGES) 1323 { 1324 SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: cFree=%#x - it should be 0 in bound mode!\n", 1325 pChunk, pChunk->Core.Key, pChunk->cFree); 1326 AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in bound mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree)); 1327 1328 gmmR0UnlinkChunk(pChunk); 1329 pChunk->cFree = GMM_CHUNK_NUM_PAGES; 1330 gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private); 1331 } 1332 } 1333 1334 /* 1178 1335 * Look for a mapping belonging to the terminating VM. 1179 1336 */ 1180 unsigned cMappings = pChunk->cMappings; 1181 bool fMappingFreed = true; 1337 GMMR0CHUNKMTXSTATE MtxState; 1338 gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, GMMR0CHUNK_MTX_KEEP_GIANT); 1339 unsigned cMappings = pChunk->cMappingsX; 1182 1340 for (unsigned i = 0; i < cMappings; i++) 1183 if (pChunk->paMappings[i].pGVM == pGVM) 1184 { 1185 RTR0MEMOBJ MemObj = pChunk->paMappings[i].MapObj; 1341 if (pChunk->paMappingsX[i].pGVM == pGVM) 1342 { 1343 gmmR0ChunkMutexDropGiant(&MtxState); 1344 1345 RTR0MEMOBJ hMemObj = pChunk->paMappingsX[i].hMapObj; 1186 1346 1187 1347 cMappings--; 1188 1348 if (i < cMappings) 1189 pChunk->paMappings[i] = pChunk->paMappings[cMappings]; 1190 pChunk->paMappings[cMappings].pGVM = NULL; 1191 pChunk->paMappings[cMappings].MapObj = NIL_RTR0MEMOBJ; 1192 Assert(pChunk->cMappings - 1U == cMappings); 1193 pChunk->cMappings = cMappings; 1194 1195 /** @todo Leave the GMM mutex when doing this, it's expensive. */ 1196 int rc = RTR0MemObjFree(MemObj, false /* fFreeMappings (NA) */); 1349 pChunk->paMappingsX[i] = pChunk->paMappingsX[cMappings]; 1350 pChunk->paMappingsX[cMappings].pGVM = NULL; 1351 pChunk->paMappingsX[cMappings].hMapObj = NIL_RTR0MEMOBJ; 1352 Assert(pChunk->cMappingsX - 1U == cMappings); 1353 pChunk->cMappingsX = cMappings; 1354 1355 int rc = RTR0MemObjFree(hMemObj, false /* fFreeMappings (NA) */); 1197 1356 if (RT_FAILURE(rc)) 1198 1357 { 1199 1358 SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n", 1200 pChunk, pChunk->Core.Key, i, MemObj, rc);1359 pChunk, pChunk->Core.Key, i, hMemObj, rc); 1201 1360 AssertRC(rc); 1202 1361 } 1203 fMappingFreed = true; 1204 break; 1205 } 1206 1207 /* 1208 * If not in bound memory mode, we should reset the hGVM field 1209 * if it has our handle in it. 1210 */ 1211 if (pChunk->hGVM == pGVM->hSelf) 1212 { 1213 if (!g_pGMM->fBoundMemoryMode) 1214 pChunk->hGVM = NIL_GVM_HANDLE; 1215 else if (pChunk->cFree != GMM_CHUNK_NUM_PAGES) 1216 { 1217 SUPR0Printf("gmmR0CleanupVMScanChunk: %p/%#x: cFree=%#x - it should be 0 in bound mode!\n", 1218 pChunk, pChunk->Core.Key, pChunk->cFree); 1219 AssertMsgFailed(("%p/%#x: cFree=%#x - it should be 0 in bound mode!\n", pChunk, pChunk->Core.Key, pChunk->cFree)); 1220 1221 gmmR0UnlinkChunk(pChunk); 1222 pChunk->cFree = GMM_CHUNK_NUM_PAGES; 1223 gmmR0LinkChunk(pChunk, pChunk->cShared ? &g_pGMM->Shared : &g_pGMM->Private); 1224 } 1225 } 1226 1227 return fMappingFreed; 1228 } 1229 1230 1231 /** 1232 * RTAvlU32Destroy callback for GMMR0CleanupVM. 1233 * 1234 * @returns 0 1235 * @param pNode The node (allocation chunk) to destroy. 1236 * @param pvGVM Pointer to the shared VM structure. 1237 */ 1238 /*static*/ DECLCALLBACK(int) gmmR0CleanupVMDestroyChunk(PAVLU32NODECORE pNode, void *pvGVM) 1239 { 1240 PGMMCHUNK pChunk = (PGMMCHUNK)pNode; 1241 PGVM pGVM = (PGVM)pvGVM; 1242 1243 for (unsigned i = 0; i < pChunk->cMappings; i++) 1244 { 1245 if (pChunk->paMappings[i].pGVM != pGVM) 1246 SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: pGVM=%p expected %p\n", pChunk, 1247 pChunk->Core.Key, i, pChunk->paMappings[i].pGVM, pGVM); 1248 int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */); 1249 if (RT_FAILURE(rc)) 1250 { 1251 SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: mapping #%x: RTRMemObjFree(%p,false) -> %d \n", pChunk, 1252 pChunk->Core.Key, i, pChunk->paMappings[i].MapObj, rc); 1253 AssertRC(rc); 1254 } 1255 } 1256 1257 int rc = RTR0MemObjFree(pChunk->MemObj, true /* fFreeMappings */); 1258 if (RT_FAILURE(rc)) 1259 { 1260 SUPR0Printf("gmmR0CleanupVMDestroyChunk: %p/%#x: RTRMemObjFree(%p,true) -> %d (cMappings=%d)\n", pChunk, 1261 pChunk->Core.Key, pChunk->MemObj, rc, pChunk->cMappings); 1262 AssertRC(rc); 1263 } 1264 pChunk->MemObj = NIL_RTR0MEMOBJ; 1265 1266 RTMemFree(pChunk->paMappings); 1267 pChunk->paMappings = NULL; 1268 1269 RTMemFree(pChunk); 1270 return 0; 1362 gmmR0ChunkMutexRelease(&MtxState, pChunk); 1363 return true; 1364 } 1365 1366 gmmR0ChunkMutexRelease(&MtxState, pChunk); 1367 return false; 1271 1368 } 1272 1369 … … 1606 1703 PGMMCHUNK pChunk = gmmR0GetChunk(pGMM, idPage >> GMM_CHUNKID_SHIFT); 1607 1704 if (RT_LIKELY(pChunk)) 1608 return RTR0MemObjGetPagePhysAddr(pChunk-> MemObj, idPage & GMM_PAGEID_IDX_MASK);1705 return RTR0MemObjGetPagePhysAddr(pChunk->hMemObj, idPage & GMM_PAGEID_IDX_MASK); 1609 1706 return NIL_RTHCPHYS; 1610 1707 } … … 1764 1861 * Initialize it. 1765 1862 */ 1766 pChunk-> MemObj= MemObj;1767 pChunk->cFree = GMM_CHUNK_NUM_PAGES;1768 pChunk->hGVM = hGVM;1863 pChunk->hMemObj = MemObj; 1864 pChunk->cFree = GMM_CHUNK_NUM_PAGES; 1865 pChunk->hGVM = hGVM; 1769 1866 /*pChunk->iFreeHead = 0;*/ 1770 pChunk->idNumaNode = GMM_CHUNK_NUMA_ID_UNKNOWN;1771 pChunk->enmType = enmChunkType;1867 pChunk->idNumaNode = GMM_CHUNK_NUMA_ID_UNKNOWN; 1868 pChunk->enmType = enmChunkType; 1772 1869 for (unsigned iPage = 0; iPage < RT_ELEMENTS(pChunk->aPages) - 1; iPage++) 1773 1870 { … … 1776 1873 } 1777 1874 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.u2State = GMM_PAGE_STATE_FREE; 1778 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX;1875 pChunk->aPages[RT_ELEMENTS(pChunk->aPages) - 1].Free.iNext = UINT16_MAX; 1779 1876 1780 1877 /* … … 1990 2087 1991 2088 /* update the page descriptor. */ 1992 pPageDesc->HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(pChunk-> MemObj, iPage);2089 pPageDesc->HCPhysGCPhys = RTR0MemObjGetPagePhysAddr(pChunk->hMemObj, iPage); 1993 2090 Assert(pPageDesc->HCPhysGCPhys != NIL_RTHCPHYS); 1994 2091 pPageDesc->idPage = (pChunk->Core.Key << GMM_CHUNKID_SHIFT) | iPage; … … 2688 2785 Assert(pChunk->Core.Key != NIL_GMM_CHUNKID); 2689 2786 2787 GMMR0CHUNKMTXSTATE MtxState; 2788 gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, GMMR0CHUNK_MTX_KEEP_GIANT); 2789 2690 2790 /* 2691 2791 * Cleanup hack! Unmap the chunk from the callers address space. 2692 2792 */ 2693 if ( pChunk->cMappings 2793 if ( pChunk->cMappingsX 2794 && !pGMM->fLegacyAllocationMode 2694 2795 && pGVM) 2695 gmmR0UnmapChunk (pGMM, pGVM, pChunk);2796 gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk); 2696 2797 2697 2798 /* … … 2700 2801 * it won't be a likely candidate for allocations. 2701 2802 */ 2702 if ( pChunk->cMappings 2703 || pChunk->cMappingsInProgress) 2803 if (pChunk->cMappingsX) 2704 2804 { 2705 2805 /** @todo R0 -> VM request */ 2706 2806 /* The chunk can be mapped by more than one VM if fBoundMemoryMode is false! */ 2707 Log(("gmmR0FreeChunk: chunk still has %d/%d mappings; don't free!\n", pChunk->cMappings , pChunk->cMappingsInProgress));2807 Log(("gmmR0FreeChunk: chunk still has %d/%d mappings; don't free!\n", pChunk->cMappingsX)); 2708 2808 } 2709 2809 else … … 2712 2812 * Try free the memory object. 2713 2813 */ 2714 int rc = RTR0MemObjFree(pChunk->MemObj, false /* fFreeMappings */); 2814 /** @todo drop the giant lock here! */ 2815 int rc = RTR0MemObjFree(pChunk->hMemObj, false /* fFreeMappings */); 2715 2816 if (RT_SUCCESS(rc)) 2716 2817 { 2717 pChunk-> MemObj = NIL_RTR0MEMOBJ;2818 pChunk->hMemObj = NIL_RTR0MEMOBJ; 2718 2819 2719 2820 /* … … 2743 2844 pChunk->Core.Key = NIL_GMM_CHUNKID; 2744 2845 2745 RTMemFree(pChunk->paMappings );2746 pChunk->paMappings = NULL;2846 RTMemFree(pChunk->paMappingsX); 2847 pChunk->paMappingsX = NULL; 2747 2848 2748 2849 RTMemFree(pChunk); 2850 pChunk = NULL; /* (for gmmR0ChunkMutexRelease) */ 2749 2851 2750 2852 pGMM->cFreedChunks++; … … 2753 2855 AssertRC(rc); 2754 2856 } 2857 2858 gmmR0ChunkMutexRelease(&MtxState, pChunk); 2755 2859 } 2756 2860 … … 2806 2910 if (RT_UNLIKELY( pChunk->cFree == GMM_CHUNK_NUM_PAGES 2807 2911 && pChunk->pFreeNext 2808 && pChunk->pFreePrev 2912 && pChunk->pFreePrev /** @todo this is probably misfiring, see reset... */ 2809 2913 && !pGMM->fLegacyAllocationMode)) 2810 2914 gmmR0FreeChunk(pGMM, NULL, pChunk); … … 3361 3465 3362 3466 /** 3363 * Unmaps a chunk previously mapped into the address space of the current process. 3467 * Worker for gmmR0UnmapChunk and gmmr0FreeChunk. 3468 * 3469 * Don't call this in legacy allocation mode! 3364 3470 * 3365 3471 * @returns VBox status code. … … 3368 3474 * @param pChunk Pointer to the chunk to be unmapped. 3369 3475 */ 3370 static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk) 3371 { 3372 if (!pGMM->fLegacyAllocationMode) 3373 { 3374 /* 3375 * Find the mapping and try unmapping it. 3376 */ 3377 uint32_t cMappings = pChunk->cMappings; 3378 for (uint32_t i = 0; i < cMappings; i++) 3379 { 3380 Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ); 3381 if (pChunk->paMappings[i].pGVM == pGVM) 3476 static int gmmR0UnmapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk) 3477 { 3478 Assert(!pGMM->fLegacyAllocationMode); 3479 3480 /* 3481 * Find the mapping and try unmapping it. 3482 */ 3483 uint32_t cMappings = pChunk->cMappingsX; 3484 for (uint32_t i = 0; i < cMappings; i++) 3485 { 3486 Assert(pChunk->paMappingsX[i].pGVM && pChunk->paMappingsX[i].hMapObj != NIL_RTR0MEMOBJ); 3487 if (pChunk->paMappingsX[i].pGVM == pGVM) 3488 { 3489 /* unmap */ 3490 int rc = RTR0MemObjFree(pChunk->paMappingsX[i].hMapObj, false /* fFreeMappings (NA) */); 3491 if (RT_SUCCESS(rc)) 3382 3492 { 3383 /* unmap */ 3384 int rc = RTR0MemObjFree(pChunk->paMappings[i].MapObj, false /* fFreeMappings (NA) */); 3385 if (RT_SUCCESS(rc)) 3386 { 3387 /* update the record. */ 3388 cMappings--; 3389 if (i < cMappings) 3390 pChunk->paMappings[i] = pChunk->paMappings[cMappings]; 3391 pChunk->paMappings[cMappings].MapObj = NIL_RTR0MEMOBJ; 3392 pChunk->paMappings[cMappings].pGVM = NULL; 3393 Assert(pChunk->cMappings - 1U == cMappings); 3394 pChunk->cMappings = cMappings; 3395 } 3396 return rc; 3493 /* update the record. */ 3494 cMappings--; 3495 if (i < cMappings) 3496 pChunk->paMappingsX[i] = pChunk->paMappingsX[cMappings]; 3497 pChunk->paMappingsX[cMappings].hMapObj = NIL_RTR0MEMOBJ; 3498 pChunk->paMappingsX[cMappings].pGVM = NULL; 3499 Assert(pChunk->cMappingsX - 1U == cMappings); 3500 pChunk->cMappingsX = cMappings; 3397 3501 } 3398 } 3399 }3400 else if (pChunk->hGVM == pGVM->hSelf)3401 return VINF_SUCCESS;3502 3503 return rc; 3504 } 3505 } 3402 3506 3403 3507 Log(("gmmR0UnmapChunk: Chunk %#x is not mapped into pGVM=%p/%#x\n", pChunk->Core.Key, pGVM, pGVM->hSelf)); 3404 3508 return VERR_GMM_CHUNK_NOT_MAPPED; 3509 } 3510 3511 3512 /** 3513 * Unmaps a chunk previously mapped into the address space of the current process. 3514 * 3515 * @returns VBox status code. 3516 * @param pGMM Pointer to the GMM instance data. 3517 * @param pGVM Pointer to the Global VM structure. 3518 * @param pChunk Pointer to the chunk to be unmapped. 3519 */ 3520 static int gmmR0UnmapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem) 3521 { 3522 if (!pGMM->fLegacyAllocationMode) 3523 { 3524 /* 3525 * Lock the chunk and if possible leave the giant GMM lock. 3526 */ 3527 GMMR0CHUNKMTXSTATE MtxState; 3528 int rc = gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, 3529 fRelaxedSem ? GMMR0CHUNK_MTX_RETAKE_GIANT : GMMR0CHUNK_MTX_KEEP_GIANT); 3530 if (RT_SUCCESS(rc)) 3531 { 3532 rc = gmmR0UnmapChunkLocked(pGMM, pGVM, pChunk); 3533 gmmR0ChunkMutexRelease(&MtxState, pChunk); 3534 } 3535 return rc; 3536 } 3537 3538 if (pChunk->hGVM == pGVM->hSelf) 3539 return VINF_SUCCESS; 3540 3541 Log(("gmmR0UnmapChunk: Chunk %#x is not mapped into pGVM=%p/%#x (legacy)\n", pChunk->Core.Key, pGVM, pGVM->hSelf)); 3542 return VERR_GMM_CHUNK_NOT_MAPPED; 3543 } 3544 3545 3546 /** 3547 * Worker for gmmR0MapChunk. 3548 * 3549 * @returns VBox status code. 3550 * @param pGMM Pointer to the GMM instance data. 3551 * @param pGVM Pointer to the Global VM structure. 3552 * @param pChunk Pointer to the chunk to be mapped. 3553 * @param ppvR3 Where to store the ring-3 address of the mapping. 3554 * In the VERR_GMM_CHUNK_ALREADY_MAPPED case, this will be 3555 * contain the address of the existing mapping. 3556 */ 3557 static int gmmR0MapChunkLocked(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3) 3558 { 3559 /* 3560 * If we're in legacy mode this is simple. 3561 */ 3562 if (pGMM->fLegacyAllocationMode) 3563 { 3564 if (pChunk->hGVM != pGVM->hSelf) 3565 { 3566 Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3)); 3567 return VERR_GMM_CHUNK_NOT_FOUND; 3568 } 3569 3570 *ppvR3 = RTR0MemObjAddressR3(pChunk->hMemObj); 3571 return VINF_SUCCESS; 3572 } 3573 3574 /* 3575 * Check to see if the chunk is already mapped. 3576 */ 3577 for (uint32_t i = 0; i < pChunk->cMappingsX; i++) 3578 { 3579 Assert(pChunk->paMappingsX[i].pGVM && pChunk->paMappingsX[i].hMapObj != NIL_RTR0MEMOBJ); 3580 if (pChunk->paMappingsX[i].pGVM == pGVM) 3581 { 3582 *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappingsX[i].hMapObj); 3583 Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3)); 3584 #ifdef VBOX_WITH_PAGE_SHARING 3585 /* The ring-3 chunk cache can be out of sync; don't fail. */ 3586 return VINF_SUCCESS; 3587 #else 3588 return VERR_GMM_CHUNK_ALREADY_MAPPED; 3589 #endif 3590 } 3591 } 3592 3593 /* 3594 * Do the mapping. 3595 */ 3596 RTR0MEMOBJ hMapObj; 3597 int rc = RTR0MemObjMapUser(&hMapObj, pChunk->hMemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS); 3598 if (RT_SUCCESS(rc)) 3599 { 3600 /* reallocate the array? assumes few users per chunk (usually one). */ 3601 unsigned iMapping = pChunk->cMappingsX; 3602 if ( iMapping <= 3 3603 || (iMapping & 3) == 0) 3604 { 3605 unsigned cNewSize = iMapping <= 3 3606 ? iMapping + 1 3607 : iMapping + 4; 3608 Assert(cNewSize < 4 || RT_ALIGN_32(cNewSize, 4) == cNewSize); 3609 if (RT_UNLIKELY(cNewSize > UINT16_MAX)) 3610 { 3611 rc = RTR0MemObjFree(hMapObj, false /* fFreeMappings (NA) */); AssertRC(rc); 3612 return VERR_GMM_TOO_MANY_CHUNK_MAPPINGS; 3613 } 3614 3615 void *pvMappings = RTMemRealloc(pChunk->paMappingsX, cNewSize * sizeof(pChunk->paMappingsX[0])); 3616 if (RT_UNLIKELY(!pvMappings)) 3617 { 3618 rc = RTR0MemObjFree(hMapObj, false /* fFreeMappings (NA) */); AssertRC(rc); 3619 return VERR_NO_MEMORY; 3620 } 3621 pChunk->paMappingsX = (PGMMCHUNKMAP)pvMappings; 3622 } 3623 3624 /* insert new entry */ 3625 pChunk->paMappingsX[iMapping].hMapObj = hMapObj; 3626 pChunk->paMappingsX[iMapping].pGVM = pGVM; 3627 Assert(pChunk->cMappingsX == iMapping); 3628 pChunk->cMappingsX = iMapping + 1; 3629 3630 *ppvR3 = RTR0MemObjAddressR3(hMapObj); 3631 } 3632 3633 return rc; 3405 3634 } 3406 3635 … … 3421 3650 static int gmmR0MapChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem, PRTR3PTR ppvR3) 3422 3651 { 3423 Assert(pGMM->hMtxOwner == RTThreadNativeSelf()); 3424 3425 /* 3426 * If we're in legacy mode this is simple. 3427 */ 3428 if (pGMM->fLegacyAllocationMode) 3429 { 3430 if (pChunk->hGVM != pGVM->hSelf) 3431 { 3432 Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3)); 3433 return VERR_GMM_CHUNK_NOT_FOUND; 3434 } 3435 3436 *ppvR3 = RTR0MemObjAddressR3(pChunk->MemObj); 3437 return VINF_SUCCESS; 3438 } 3439 3440 /* 3441 * Check to see if the chunk is already mapped. 3442 */ 3443 for (uint32_t i = 0; i < pChunk->cMappings; i++) 3444 { 3445 Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ); 3446 if (pChunk->paMappings[i].pGVM == pGVM) 3447 { 3448 *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj); 3449 Log(("gmmR0MapChunk: chunk %#x is already mapped at %p!\n", pChunk->Core.Key, *ppvR3)); 3450 #ifdef VBOX_WITH_PAGE_SHARING 3451 /* The ring-3 chunk cache can be out of sync; don't fail. */ 3452 return VINF_SUCCESS; 3453 #else 3454 return VERR_GMM_CHUNK_ALREADY_MAPPED; 3455 #endif 3456 } 3457 } 3458 3459 /* 3460 * Do the mapping. Leave the semaphore when possible since mapping memory 3461 * into the user process can be very expensive. 3462 * 3463 * ASSUMES that all mappers will hold the PGM lock and therefore prevent 3464 * other threads from mapping the memory into the same process. 3465 */ 3466 RTR0MEMOBJ MapObj; 3467 int rc; 3468 if ( !fRelaxedSem 3469 || pChunk->cFree == GMM_CHUNK_NUM_PAGES) 3470 rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS); 3471 else 3472 { 3473 pChunk->cMappingsInProgress++; 3474 gmmR0MutexRelease(pGMM); 3475 Assert(PGMIsLockOwner(pGVM->pVM)); 3476 3477 rc = RTR0MemObjMapUser(&MapObj, pChunk->MemObj, (RTR3PTR)-1, 0, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS); 3478 3479 int rc2 = gmmR0MutexAcquire(pGMM); AssertRC(rc2); 3480 pChunk->cMappingsInProgress--; 3481 } 3652 /* 3653 * Take the chunk lock and leave the giant GMM lock when possible, then 3654 * call the worker function. 3655 */ 3656 GMMR0CHUNKMTXSTATE MtxState; 3657 int rc = gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, 3658 fRelaxedSem ? GMMR0CHUNK_MTX_RETAKE_GIANT : GMMR0CHUNK_MTX_KEEP_GIANT); 3482 3659 if (RT_SUCCESS(rc)) 3483 3660 { 3484 /* reallocate the array? assumes few users per chunk (usually one). */ 3485 unsigned iMapping = pChunk->cMappings; 3486 if ( iMapping <= 3 3487 || (iMapping & 3) == 0) 3488 { 3489 unsigned cNewSize = iMapping <= 3 3490 ? iMapping + 1 3491 : iMapping + 4; 3492 Assert(cNewSize < 4 || RT_ALIGN_32(cNewSize, 4) == cNewSize); 3493 if (RT_UNLIKELY(cNewSize > UINT16_MAX)) 3494 { 3495 rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */); AssertRC(rc); 3496 return VERR_GMM_TOO_MANY_CHUNK_MAPPINGS; 3497 } 3498 3499 void *pvMappings = RTMemRealloc(pChunk->paMappings, cNewSize * sizeof(pChunk->paMappings[0])); 3500 if (RT_UNLIKELY(!pvMappings)) 3501 { 3502 rc = RTR0MemObjFree(MapObj, false /* fFreeMappings (NA) */); AssertRC(rc); 3503 return VERR_NO_MEMORY; 3504 } 3505 pChunk->paMappings = (PGMMCHUNKMAP)pvMappings; 3506 } 3507 3508 /* insert new entry */ 3509 pChunk->paMappings[iMapping].MapObj = MapObj; 3510 pChunk->paMappings[iMapping].pGVM = pGVM; 3511 Assert(pChunk->cMappings == iMapping); 3512 pChunk->cMappings = iMapping + 1; 3513 3514 *ppvR3 = RTR0MemObjAddressR3(MapObj); 3661 rc = gmmR0MapChunkLocked(pGMM, pGVM, pChunk, ppvR3); 3662 gmmR0ChunkMutexRelease(&MtxState, pChunk); 3515 3663 } 3516 3664 … … 3519 3667 3520 3668 3669 3521 3670 /** 3522 3671 * Check if a chunk is mapped into the specified VM 3523 3672 * 3524 3673 * @returns mapped yes/no 3674 * @param pGMM Pointer to the GMM instance. 3525 3675 * @param pGVM Pointer to the Global VM structure. 3526 3676 * @param pChunk Pointer to the chunk to be mapped. 3527 3677 * @param ppvR3 Where to store the ring-3 address of the mapping. 3528 3678 */ 3529 static int gmmR0IsChunkMapped(PG VM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3)3530 { 3531 /*3532 * Check to see if the chunk is already mapped.3533 */3534 for (uint32_t i = 0; i < pChunk->cMappings; i++)3535 {3536 Assert(pChunk->paMappings[i].pGVM && pChunk->paMappings[i].MapObj != NIL_RTR0MEMOBJ);3537 if (pChunk->paMappings[i].pGVM == pGVM)3538 {3539 *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappings[i].MapObj);3679 static int gmmR0IsChunkMapped(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, PRTR3PTR ppvR3) 3680 { 3681 GMMR0CHUNKMTXSTATE MtxState; 3682 gmmR0ChunkMutexAcquire(&MtxState, pGMM, pChunk, GMMR0CHUNK_MTX_KEEP_GIANT); 3683 for (uint32_t i = 0; i < pChunk->cMappingsX; i++) 3684 { 3685 Assert(pChunk->paMappingsX[i].pGVM && pChunk->paMappingsX[i].hMapObj != NIL_RTR0MEMOBJ); 3686 if (pChunk->paMappingsX[i].pGVM == pGVM) 3687 { 3688 *ppvR3 = RTR0MemObjAddressR3(pChunk->paMappingsX[i].hMapObj); 3689 gmmR0ChunkMutexRelease(&MtxState, pChunk); 3540 3690 return true; 3541 3691 } 3542 3692 } 3543 3693 *ppvR3 = NULL; 3694 gmmR0ChunkMutexRelease(&MtxState, pChunk); 3544 3695 return false; 3545 3696 } … … 3613 3764 } 3614 3765 } 3766 /** @todo split this operation, the bail out might (theoretcially) not be 3767 * entirely safe. */ 3615 3768 3616 3769 if ( idChunkUnmap != NIL_GMM_CHUNKID … … 3619 3772 PGMMCHUNK pUnmap = gmmR0GetChunk(pGMM, idChunkUnmap); 3620 3773 if (RT_LIKELY(pUnmap)) 3621 rc = gmmR0UnmapChunk(pGMM, pGVM, pUnmap );3774 rc = gmmR0UnmapChunk(pGMM, pGVM, pUnmap, true /*fRelaxedSem*/); 3622 3775 else 3623 3776 { … … 3627 3780 3628 3781 if (RT_FAILURE(rc) && pMap) 3629 gmmR0UnmapChunk(pGMM, pGVM, pMap );3782 gmmR0UnmapChunk(pGMM, pGVM, pMap, false /*fRelaxedSem*/); 3630 3783 } 3631 3784 … … 4174 4327 if (pChunk) 4175 4328 { 4176 if (!gmmR0IsChunkMapped(pG VM, pChunk, (PRTR3PTR)&pbChunk))4329 if (!gmmR0IsChunkMapped(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk)) 4177 4330 { 4178 4331 Log(("GMMR0SharedModuleCheckPage: Invalid idPage=%#x #3\n", pPageDesc->uHCPhysPageId)); … … 4196 4349 4197 4350 /* Get the virtual address of the physical page; map the chunk into the VM process if not already done. */ 4198 if (!gmmR0IsChunkMapped(pG VM, pChunk, (PRTR3PTR)&pbChunk))4351 if (!gmmR0IsChunkMapped(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk)) 4199 4352 { 4200 4353 Log(("Map chunk into process!\n")); … … 4496 4649 4497 4650 /* Only take chunks not mapped into this VM process; not entirely correct. */ 4498 if (!gmmR0IsChunkMapped(pG VM, pChunk, (PRTR3PTR)&pbChunk))4651 if (!gmmR0IsChunkMapped(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk)) 4499 4652 { 4500 4653 int rc = gmmR0MapChunk(pGMM, pGVM, pChunk, false /*fRelaxedSem*/, (PRTR3PTR)&pbChunk); … … 4518 4671 } 4519 4672 } 4520 gmmR0UnmapChunk(pGMM, pGVM, pChunk );4673 gmmR0UnmapChunk(pGMM, pGVM, pChunk, false /*fRelaxedSem*/); 4521 4674 } 4522 4675 } … … 4559 4712 if (pChunk) 4560 4713 { 4561 if (gmmR0IsChunkMapped(pG VM, pChunk, (PRTR3PTR)&pbChunk))4714 if (gmmR0IsChunkMapped(pGMM, pGVM, pChunk, (PRTR3PTR)&pbChunk)) 4562 4715 { 4563 4716 uint8_t *pbSourcePage = pbChunk + ((pReq->idPage & GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
Note:
See TracChangeset
for help on using the changeset viewer.