Changeset 22757 in vbox
- Timestamp:
- Sep 3, 2009 5:22:53 PM (15 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PDMAsyncCompletionFile.cpp
r22309 r22757 236 236 } 237 237 238 staticint pdmacFileAioMgrAddEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)238 int pdmacFileAioMgrAddEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint) 239 239 { 240 240 int rc; … … 343 343 || (enmTransfer == PDMACTASKFILETRANSFER_WRITE)); 344 344 345 pTaskFile->cbTransferLeft = cbTransfer; 345 ASMAtomicWriteS32(&pTaskFile->cbTransferLeft, cbTransfer); 346 ASMAtomicWriteBool(&pTaskFile->fCompleted, false); 346 347 347 348 for (unsigned i = 0; i < cSegments; i++) … … 366 367 AssertMsg(!cbTransfer, ("Incomplete transfer %u bytes left\n", cbTransfer)); 367 368 369 if (ASMAtomicReadS32(&pTaskFile->cbTransferLeft) == 0 370 && !ASMAtomicXchgBool(&pTaskFile->fCompleted, true)) 371 pdmR3AsyncCompletionCompleteTask(pTask); 372 368 373 return VINF_SUCCESS; 369 374 } … … 376 381 * @param ppAioMgr Where to store the pointer to the new async I/O manager on success. 377 382 */ 378 staticint pdmacFileAioMgrCreate(PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass, PPPDMACEPFILEMGR ppAioMgr)383 int pdmacFileAioMgrCreate(PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass, PPPDMACEPFILEMGR ppAioMgr) 379 384 { 380 385 int rc = VINF_SUCCESS; … … 577 582 } 578 583 584 pEpFile->cbFile = cbSize; 585 579 586 RTFileClose(File); 580 587 } … … 737 744 PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint; 738 745 739 return RTFileGetSize(pEpFile->File, pcbSize); 746 *pcbSize = ASMAtomicReadU64(&pEpFile->cbFile); 747 748 return VINF_SUCCESS; 740 749 } 741 750 -
trunk/src/VBox/VMM/PDMAsyncCompletionFileCache.cpp
r22310 r22757 52 52 #include "PDMAsyncCompletionFileInternal.h" 53 53 54 #ifdef VBOX_STRICT 55 # define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) \ 56 do \ 57 { \ 58 AssertMsg(RTCritSectIsOwner(&pCache->CritSect), \ 59 ("Thread does not own critical section\n"));\ 60 } while(0); 61 #else 62 # define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) do { } while(0); 63 #endif 64 54 65 /******************************************************************************* 55 66 * Internal Functions * 56 67 *******************************************************************************/ 57 68 static void pdmacFileCacheTaskCompleted(PPDMACTASKFILE pTask, void *pvUser); 69 70 DECLINLINE(void) pdmacFileEpCacheEntryRelease(PPDMACFILECACHEENTRY pEntry) 71 { 72 AssertMsg(pEntry->cRefs > 0, ("Trying to release a not referenced entry\n")); 73 ASMAtomicDecU32(&pEntry->cRefs); 74 } 75 76 DECLINLINE(void) pdmacFileEpCacheEntryRef(PPDMACFILECACHEENTRY pEntry) 77 { 78 ASMAtomicIncU32(&pEntry->cRefs); 79 } 58 80 59 81 /** … … 217 239 size_t cbEvicted = 0; 218 240 241 PDMACFILECACHE_IS_CRITSECT_OWNER(pCache); 242 219 243 AssertMsg(cbData > 0, ("Evicting 0 bytes not possible\n")); 220 244 AssertMsg( !pGhostListDst … … 233 257 234 258 /* We can't evict pages which are currently in progress */ 235 if (!(pCurr->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS)) 259 if (!(pCurr->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS) 260 && (ASMAtomicReadU32(&pCurr->cRefs) == 0)) 236 261 { 237 LogFlow(("Evicting entry %#p (%u bytes)\n", pCurr, pCurr->cbData)); 238 if (pCurr->pbData) 262 /* Ok eviction candidate. Grab the endpoint semaphore and check again 263 * because somebody else might have raced us. */ 264 PPDMACFILEENDPOINTCACHE pEndpointCache = &pCurr->pEndpoint->DataCache; 265 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT); 266 267 if (!(pCurr->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS) 268 && (ASMAtomicReadU32(&pCurr->cRefs) == 0)) 239 269 { 240 RTMemPageFree(pCurr->pbData); 241 pCurr->pbData = NULL; 270 LogFlow(("Evicting entry %#p (%u bytes)\n", pCurr, pCurr->cbData)); 271 if (pCurr->pbData) 272 { 273 RTMemPageFree(pCurr->pbData); 274 pCurr->pbData = NULL; 275 } 276 277 cbEvicted += pCurr->cbData; 278 279 if (pGhostListDst) 280 { 281 pdmacFileCacheEntryAddToList(pGhostListDst, pCurr); 282 } 283 else 284 { 285 /* Delete the entry from the AVL tree it is assigned to. */ 286 STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache); 287 RTAvlrFileOffsetRemove(pCurr->pEndpoint->DataCache.pTree, pCurr->Core.Key); 288 STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache); 289 290 pdmacFileCacheEntryRemoveFromList(pCurr); 291 pCache->cbCached -= pCurr->cbData; 292 RTMemFree(pCurr); 293 } 242 294 } 243 244 cbEvicted += pCurr->cbData; 245 246 if (pGhostListDst) 247 { 248 pdmacFileCacheEntryAddToList(pGhostListDst, pCurr); 249 } 250 else 251 { 252 /* Delete the entry from the AVL tree it is assigned to. */ 253 STAM_PROFILE_ADV_START(&pCache->StatTreeRemove, Cache); 254 RTAvlrFileOffsetRemove(pCurr->pEndpoint->DataCache.pTree, pCurr->Core.Key); 255 STAM_PROFILE_ADV_STOP(&pCache->StatTreeRemove, Cache); 256 257 pdmacFileCacheEntryRemoveFromList(pCurr); 258 pCache->cbCached -= pCurr->cbData; 259 RTMemFree(pCurr); 260 } 295 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries); 261 296 } 262 297 else … … 269 304 static size_t pdmacFileCacheReplace(PPDMACFILECACHEGLOBAL pCache, size_t cbData, PPDMACFILELRULIST pEntryList) 270 305 { 306 PDMACFILECACHE_IS_CRITSECT_OWNER(pCache); 307 271 308 if ( (pCache->LruRecentlyUsed.cbCached) 272 309 && ( (pCache->LruRecentlyUsed.cbCached > pCache->uAdaptVal) … … 298 335 { 299 336 size_t cbRemoved = ~0; 337 338 PDMACFILECACHE_IS_CRITSECT_OWNER(pCache); 300 339 301 340 if ((pCache->LruRecentlyUsed.cbCached + pCache->LruRecentlyGhost.cbCached) >= pCache->cbMax) … … 346 385 int32_t uUpdateVal = 0; 347 386 387 PDMACFILECACHE_IS_CRITSECT_OWNER(pCache); 388 348 389 /* Update parameters */ 349 390 if (pEntry->pList == &pCache->LruRecentlyGhost) … … 436 477 PPDMACFILECACHEENTRY pEntry = (PPDMACFILECACHEENTRY)pvUser; 437 478 PPDMACFILECACHEGLOBAL pCache = pEntry->pCache; 438 439 RTCritSectEnter(&pCache->CritSect); 440 479 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pEntry->pEndpoint; 480 481 /* Reference the entry now as we are clearing the I/O in progres flag 482 * which protects the entry till now. */ 483 pdmacFileEpCacheEntryRef(pEntry); 484 485 RTSemRWRequestWrite(pEndpoint->DataCache.SemRWEntries, RT_INDEFINITE_WAIT); 441 486 pEntry->fFlags &= ~PDMACFILECACHE_ENTRY_IO_IN_PROGRESS; 442 487 … … 503 548 pdmacFileCacheWriteToEndpoint(pEntry); 504 549 505 RTCritSectLeave(&pCache->CritSect); 550 RTSemRWReleaseWrite(pEndpoint->DataCache.SemRWEntries); 551 552 /* Dereference so that it isn't protected anymore except we issued anyother write for it. */ 553 pdmacFileEpCacheEntryRelease(pEntry); 506 554 } 507 555 … … 648 696 PPDMACFILEENDPOINTCACHE pEndpointCache = &pEndpoint->DataCache; 649 697 650 pEndpointCache->pTree = (PAVLRFOFFTREE)RTMemAllocZ(sizeof(AVLRFOFFTREE));651 698 pEndpointCache->pCache = &pClassFile->Cache; 652 699 653 return VINF_SUCCESS; 700 int rc = RTSemRWCreate(&pEndpointCache->SemRWEntries); 701 if (RT_SUCCESS(rc)) 702 { 703 pEndpointCache->pTree = (PAVLRFOFFTREE)RTMemAllocZ(sizeof(AVLRFOFFTREE)); 704 if (!pEndpointCache->pTree) 705 { 706 rc = VERR_NO_MEMORY; 707 RTSemRWDestroy(pEndpointCache->SemRWEntries); 708 } 709 } 710 711 return rc; 654 712 } 655 713 … … 690 748 691 749 /* Make sure nobody is accessing the cache while we delete the tree. */ 750 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT); 692 751 RTCritSectEnter(&pCache->CritSect); 693 752 RTAvlrFileOffsetDestroy(pEndpointCache->pTree, pdmacFileEpCacheEntryDestroy, pCache); 694 753 RTCritSectLeave(&pCache->CritSect); 754 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries); 755 756 RTSemRWDestroy(pEndpointCache->SemRWEntries); 757 } 758 759 static PPDMACFILECACHEENTRY pdmacFileEpCacheGetCacheEntryByOffset(PPDMACFILEENDPOINTCACHE pEndpointCache, RTFOFF off) 760 { 761 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache; 762 PPDMACFILECACHEENTRY pEntry = NULL; 763 764 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache); 765 766 RTSemRWRequestRead(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT); 767 pEntry = (PPDMACFILECACHEENTRY)RTAvlrFileOffsetRangeGet(pEndpointCache->pTree, off); 768 if (pEntry) 769 pdmacFileEpCacheEntryRef(pEntry); 770 RTSemRWReleaseRead(pEndpointCache->SemRWEntries); 771 772 STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache); 773 774 return pEntry; 775 } 776 777 static PPDMACFILECACHEENTRY pdmacFileEpCacheGetCacheBestFitEntryByOffset(PPDMACFILEENDPOINTCACHE pEndpointCache, RTFOFF off) 778 { 779 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache; 780 PPDMACFILECACHEENTRY pEntry = NULL; 781 782 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache); 783 784 RTSemRWRequestRead(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT); 785 pEntry = (PPDMACFILECACHEENTRY)RTAvlrFileOffsetGetBestFit(pEndpointCache->pTree, off, true); 786 if (pEntry) 787 pdmacFileEpCacheEntryRef(pEntry); 788 RTSemRWReleaseRead(pEndpointCache->SemRWEntries); 789 790 STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache); 791 792 return pEntry; 793 } 794 795 static void pdmacFileEpCacheInsertEntry(PPDMACFILEENDPOINTCACHE pEndpointCache, PPDMACFILECACHEENTRY pEntry) 796 { 797 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache; 798 799 STAM_PROFILE_ADV_START(&pCache->StatTreeInsert, Cache); 800 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT); 801 bool fInserted = RTAvlrFileOffsetInsert(pEndpointCache->pTree, &pEntry->Core); 802 AssertMsg(fInserted, ("Node was not inserted into tree\n")); 803 STAM_PROFILE_ADV_STOP(&pCache->StatTreeInsert, Cache); 804 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries); 695 805 } 696 806 … … 741 851 ASMAtomicWriteBool(&pTask->fCompleted, true); 742 852 743 RTCritSectEnter(&pCache->CritSect);744 745 853 int iSegCurr = 0; 746 854 uint8_t *pbSegBuf = (uint8_t *)paSegments[iSegCurr].pvSeg; … … 751 859 size_t cbToRead; 752 860 753 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache); 754 pEntry = (PPDMACFILECACHEENTRY)RTAvlrFileOffsetRangeGet(pEndpointCache->pTree, off); 755 STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache); 861 pEntry = pdmacFileEpCacheGetCacheEntryByOffset(pEndpointCache, off); 756 862 757 863 /* … … 793 899 && !(pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY)) 794 900 { 795 /* Entry didn't completed yet. Append to the list */ 796 while (cbToRead) 901 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT); 902 /* Check again. The completion callback might have raced us. */ 903 904 if ( (pEntry->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS) 905 && !(pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY)) 797 906 { 798 PPDMACFILETASKSEG pSeg = (PPDMACFILETASKSEG)RTMemAllocZ(sizeof(PDMACFILETASKSEG)); 799 800 pSeg->pTask = pTask; 801 pSeg->uBufOffset = OffDiff; 802 pSeg->cbTransfer = RT_MIN(cbToRead, cbSegLeft); 803 pSeg->pvBuf = pbSegBuf; 804 pSeg->fWrite = false; 805 806 ADVANCE_SEGMENT_BUFFER(pSeg->cbTransfer); 807 808 pSeg->pNext = pEntry->pHead; 809 pEntry->pHead = pSeg; 810 811 off += pSeg->cbTransfer; 812 cbToRead -= pSeg->cbTransfer; 813 OffDiff += pSeg->cbTransfer; 907 /* Entry didn't completed yet. Append to the list */ 908 while (cbToRead) 909 { 910 PPDMACFILETASKSEG pSeg = (PPDMACFILETASKSEG)RTMemAllocZ(sizeof(PDMACFILETASKSEG)); 911 912 pSeg->pTask = pTask; 913 pSeg->uBufOffset = OffDiff; 914 pSeg->cbTransfer = RT_MIN(cbToRead, cbSegLeft); 915 pSeg->pvBuf = pbSegBuf; 916 pSeg->fWrite = false; 917 918 ADVANCE_SEGMENT_BUFFER(pSeg->cbTransfer); 919 920 pSeg->pNext = pEntry->pHead; 921 pEntry->pHead = pSeg; 922 923 off += pSeg->cbTransfer; 924 cbToRead -= pSeg->cbTransfer; 925 OffDiff += pSeg->cbTransfer; 926 } 927 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries); 928 } 929 else 930 { 931 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries); 932 933 /* Read as much as we can from the entry. */ 934 while (cbToRead) 935 { 936 size_t cbCopy = RT_MIN(cbSegLeft, cbToRead); 937 938 memcpy(pbSegBuf, pEntry->pbData + OffDiff, cbCopy); 939 940 ADVANCE_SEGMENT_BUFFER(cbCopy); 941 942 cbToRead -= cbCopy; 943 off += cbCopy; 944 OffDiff += cbCopy; 945 ASMAtomicSubS32(&pTask->cbTransferLeft, cbCopy); 946 } 814 947 } 815 948 } … … 833 966 834 967 /* Move this entry to the top position */ 968 RTCritSectEnter(&pCache->CritSect); 835 969 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry); 970 RTCritSectLeave(&pCache->CritSect); 836 971 } 837 972 else 838 973 { 974 RTCritSectEnter(&pCache->CritSect); 839 975 pdmacFileCacheUpdate(pCache, pEntry); 840 976 pdmacFileCacheReplace(pCache, pEntry->cbData, pEntry->pList); … … 842 978 /* Move the entry to T2 and fetch it to the cache. */ 843 979 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry); 980 RTCritSectLeave(&pCache->CritSect); 844 981 845 982 pEntry->pbData = (uint8_t *)RTMemPageAlloc(pEntry->cbData); … … 871 1008 pdmacFileCacheReadFromEndpoint(pEntry); 872 1009 } 1010 pdmacFileEpCacheEntryRelease(pEntry); 873 1011 } 874 1012 else 875 1013 { 876 1014 /* No entry found for this offset. Get best fit entry and fetch the data to the cache. */ 877 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache); 878 PPDMACFILECACHEENTRY pEntryBestFit = (PPDMACFILECACHEENTRY)RTAvlrFileOffsetGetBestFit(pEndpointCache->pTree, off, true); 879 STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache); 1015 PPDMACFILECACHEENTRY pEntryBestFit = pdmacFileEpCacheGetCacheBestFitEntryByOffset(pEndpointCache, off); 880 1016 881 1017 LogFlow(("%sbest fit entry for off=%RTfoff (BestFit=%RTfoff BestFitEnd=%RTfoff BestFitSize=%u)\n", … … 887 1023 888 1024 if (pEntryBestFit && ((off + (RTFOFF)cbRead) > pEntryBestFit->Core.Key)) 1025 { 889 1026 cbToRead = pEntryBestFit->Core.Key - off; 1027 pdmacFileEpCacheEntryRelease(pEntryBestFit); 1028 } 890 1029 else 891 1030 cbToRead = cbRead; … … 898 1037 STAM_COUNTER_INC(&pCache->cPartialHits); 899 1038 1039 RTCritSectEnter(&pCache->CritSect); 900 1040 size_t cbRemoved = pdmacFileCacheEvict(pCache, cbToRead); 1041 RTCritSectLeave(&pCache->CritSect); 901 1042 902 1043 if (cbRemoved >= cbToRead) … … 911 1052 pEntryNew->pCache = pCache; 912 1053 pEntryNew->fFlags = 0; 1054 pEntryNew->cRefs = 1; /* We are using it now. */ 913 1055 pEntryNew->pList = NULL; 914 1056 pEntryNew->cbData = cbToRead; … … 916 1058 pEntryNew->pbData = (uint8_t *)RTMemPageAlloc(cbToRead); 917 1059 AssertPtr(pEntryNew->pbData); 1060 1061 RTCritSectEnter(&pCache->CritSect); 918 1062 pdmacFileCacheEntryAddToList(&pCache->LruRecentlyUsed, pEntryNew); 919 920 STAM_PROFILE_ADV_START(&pCache->StatTreeInsert, Cache); 921 bool fInserted = RTAvlrFileOffsetInsert(pEndpoint->DataCache.pTree, &pEntryNew->Core); 922 AssertMsg(fInserted, ("Node was not inserted into tree\n")); 923 STAM_PROFILE_ADV_STOP(&pCache->StatTreeInsert, Cache); 924 1063 RTCritSectLeave(&pCache->CritSect); 1064 1065 pdmacFileEpCacheInsertEntry(pEndpointCache, pEntryNew); 925 1066 uint32_t uBufOffset = 0; 926 1067 … … 947 1088 948 1089 pdmacFileCacheReadFromEndpoint(pEntryNew); 1090 pdmacFileEpCacheEntryRelease(pEntryNew); /* it is protected by the I/O in progress flag now. */ 949 1091 } 950 1092 else … … 987 1129 pdmR3AsyncCompletionCompleteTask(&pTask->Core); 988 1130 989 RTCritSectLeave(&pCache->CritSect);990 991 1131 return rc; 992 1132 } … … 1019 1159 ASMAtomicWriteBool(&pTask->fCompleted, true); 1020 1160 1021 RTCritSectEnter(&pCache->CritSect);1022 1023 1161 int iSegCurr = 0; 1024 1162 uint8_t *pbSegBuf = (uint8_t *)paSegments[iSegCurr].pvSeg; … … 1029 1167 size_t cbToWrite; 1030 1168 1031 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache); 1032 pEntry = (PPDMACFILECACHEENTRY)RTAvlrFileOffsetRangeGet(pEndpointCache->pTree, off); 1033 STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache); 1169 pEntry = pdmacFileEpCacheGetCacheEntryByOffset(pEndpointCache, off); 1034 1170 1035 1171 if (pEntry) … … 1060 1196 if (pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY) 1061 1197 { 1062 AssertMsg(pEntry->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS, 1063 ("Entry is dirty but not in progress\n")); 1064 1065 /* The data isn't written to the file yet */ 1066 while (cbToWrite) 1198 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT); 1199 /* Check again. The completion callback might have raced us. */ 1200 1201 if (pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY) 1067 1202 { 1068 PPDMACFILETASKSEG pSeg = (PPDMACFILETASKSEG)RTMemAllocZ(sizeof(PDMACFILETASKSEG)); 1069 1070 pSeg->pTask = pTask; 1071 pSeg->uBufOffset = OffDiff; 1072 pSeg->cbTransfer = RT_MIN(cbToWrite, cbSegLeft); 1073 pSeg->pvBuf = pbSegBuf; 1074 pSeg->fWrite = true; 1075 1076 ADVANCE_SEGMENT_BUFFER(pSeg->cbTransfer); 1077 1078 pSeg->pNext = pEntry->pHead; 1079 pEntry->pHead = pSeg; 1080 1081 off += pSeg->cbTransfer; 1082 OffDiff += pSeg->cbTransfer; 1083 cbToWrite -= pSeg->cbTransfer; 1203 AssertMsg(pEntry->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS, 1204 ("Entry is dirty but not in progress\n")); 1205 1206 /* The data isn't written to the file yet */ 1207 while (cbToWrite) 1208 { 1209 PPDMACFILETASKSEG pSeg = (PPDMACFILETASKSEG)RTMemAllocZ(sizeof(PDMACFILETASKSEG)); 1210 1211 pSeg->pTask = pTask; 1212 pSeg->uBufOffset = OffDiff; 1213 pSeg->cbTransfer = RT_MIN(cbToWrite, cbSegLeft); 1214 pSeg->pvBuf = pbSegBuf; 1215 pSeg->fWrite = true; 1216 1217 ADVANCE_SEGMENT_BUFFER(pSeg->cbTransfer); 1218 1219 pSeg->pNext = pEntry->pHead; 1220 pEntry->pHead = pSeg; 1221 1222 off += pSeg->cbTransfer; 1223 OffDiff += pSeg->cbTransfer; 1224 cbToWrite -= pSeg->cbTransfer; 1225 } 1226 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries); 1227 } 1228 else 1229 { 1230 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries); 1231 1232 AssertMsg(!(pEntry->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS), 1233 ("Entry is not dirty but in progress\n")); 1234 1235 /* Write as much as we can into the entry and update the file. */ 1236 while (cbToWrite) 1237 { 1238 size_t cbCopy = RT_MIN(cbSegLeft, cbToWrite); 1239 1240 memcpy(pEntry->pbData + OffDiff, pbSegBuf, cbCopy); 1241 1242 ADVANCE_SEGMENT_BUFFER(cbCopy); 1243 1244 cbToWrite-= cbCopy; 1245 off += cbCopy; 1246 OffDiff += cbCopy; 1247 ASMAtomicSubS32(&pTask->cbTransferLeft, cbCopy); 1248 } 1249 1250 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY; 1251 pdmacFileCacheWriteToEndpoint(pEntry); 1084 1252 } 1085 1253 } … … 1109 1277 1110 1278 /* Move this entry to the top position */ 1279 RTCritSectEnter(&pCache->CritSect); 1111 1280 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry); 1281 RTCritSectLeave(&pCache->CritSect); 1112 1282 } 1113 1283 else 1114 1284 { 1285 RTCritSectEnter(&pCache->CritSect); 1115 1286 pdmacFileCacheUpdate(pCache, pEntry); 1116 1287 pdmacFileCacheReplace(pCache, pEntry->cbData, pEntry->pList); … … 1118 1289 /* Move the entry to T2 and fetch it to the cache. */ 1119 1290 pdmacFileCacheEntryAddToList(&pCache->LruFrequentlyUsed, pEntry); 1291 RTCritSectLeave(&pCache->CritSect); 1120 1292 1121 1293 pEntry->pbData = (uint8_t *)RTMemPageAlloc(pEntry->cbData); … … 1148 1320 pdmacFileCacheReadFromEndpoint(pEntry); 1149 1321 } 1322 1323 /* Release the reference. If it is still needed the I/O in progress flag should protect it now. */ 1324 pdmacFileEpCacheEntryRelease(pEntry); 1150 1325 } 1151 1326 else … … 1154 1329 * No entry found. Write directly into file. 1155 1330 */ 1156 STAM_PROFILE_ADV_START(&pCache->StatTreeGet, Cache); 1157 PPDMACFILECACHEENTRY pEntryBestFit = (PPDMACFILECACHEENTRY)RTAvlrFileOffsetGetBestFit(pEndpointCache->pTree, off, true); 1158 STAM_PROFILE_ADV_STOP(&pCache->StatTreeGet, Cache); 1331 PPDMACFILECACHEENTRY pEntryBestFit = pdmacFileEpCacheGetCacheBestFitEntryByOffset(pEndpointCache, off); 1159 1332 1160 1333 LogFlow(("%sbest fit entry for off=%RTfoff (BestFit=%RTfoff BestFitEnd=%RTfoff BestFitSize=%u)\n", … … 1166 1339 1167 1340 if (pEntryBestFit && ((off + (RTFOFF)cbWrite) > pEntryBestFit->Core.Key)) 1341 { 1168 1342 cbToWrite = pEntryBestFit->Core.Key - off; 1343 pdmacFileEpCacheEntryRelease(pEntryBestFit); 1344 } 1169 1345 else 1170 1346 cbToWrite = cbWrite; … … 1202 1378 pdmR3AsyncCompletionCompleteTask(&pTask->Core); 1203 1379 1204 RTCritSectLeave(&pCache->CritSect);1205 1206 1380 return VINF_SUCCESS; 1207 1381 } -
trunk/src/VBox/VMM/PDMAsyncCompletionFileFailsafe.cpp
r22309 r22757 47 47 if (pCurr->enmTransferType == PDMACTASKFILETRANSFER_READ) 48 48 { 49 if (RT_UNLIKELY((pCurr->Off + pCurr->DataSeg.cbSeg) > pEndpoint->cbFile)) 50 { 51 ASMAtomicWriteU64(&pEndpoint->cbFile, pCurr->Off + pCurr->DataSeg.cbSeg); 52 RTFileSetSize(pEndpoint->File, pCurr->Off + pCurr->DataSeg.cbSeg); 53 } 54 49 55 rc = RTFileReadAt(pEndpoint->File, pCurr->Off, 50 56 pCurr->DataSeg.pvSeg, -
trunk/src/VBox/VMM/PDMAsyncCompletionFileInternal.h
r22309 r22757 134 134 /** List of endpoints assigned to this manager. */ 135 135 R3PTRTYPE(PPDMASYNCCOMPLETIONENDPOINTFILE) pEndpointsHead; 136 /** Number of endpoints assigned to the manager. */ 137 unsigned cEndpoints; 136 138 /** Number of requests active currently. */ 137 139 unsigned cRequestsActive; … … 221 223 /** Flags for this entry. Combinations of PDMACFILECACHE_* #defines */ 222 224 uint32_t fFlags; 225 /** Reference counter. Prevents eviction of the entry if > 0. */ 226 volatile uint32_t cRefs; 223 227 /** Size of the entry. */ 224 228 size_t cbData; … … 298 302 /** AVL tree managing cache entries. */ 299 303 PAVLRFOFFTREE pTree; 300 /** Critical section protecting the tree. */301 RT CRITSECT CritSect;304 /** R/W semaphore protecting cached entries for this endpoint. */ 305 RTSEMRW SemRWEntries; 302 306 /** Pointer to the gobal cache data */ 303 307 PPDMACFILECACHEGLOBAL pCache; … … 383 387 /** Size of the underlying file. 384 388 * Updated while data is appended. */ 385 uint64_tcbFile;389 volatile uint64_t cbFile; 386 390 /** Flag whether caching is enabled for this file. */ 387 391 bool fCaching; … … 444 448 /** Current number of processed requests for the current update period. */ 445 449 unsigned cReqsProcessed; 450 /** Flag whether the endpoint is about to be moved to another manager. */ 451 bool fMoving; 452 /** Destination I/O manager. */ 453 PPDMACEPFILEMGR pAioMgrDst; 446 454 } AioMgr; 447 455 } PDMASYNCCOMPLETIONENDPOINTFILE; … … 514 522 void pdmacFileAioMgrNormalDestroy(PPDMACEPFILEMGR pAioMgr); 515 523 524 int pdmacFileAioMgrCreate(PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass, PPPDMACEPFILEMGR ppAioMgr); 525 526 int pdmacFileAioMgrAddEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint); 527 516 528 PPDMACTASKFILE pdmacFileEpGetNewTasks(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint); 517 529 PPDMACTASKFILE pdmacFileTaskAlloc(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint); -
trunk/src/VBox/VMM/PDMAsyncCompletionFileNormal.cpp
r22309 r22757 22 22 #define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION 23 23 #include <iprt/types.h> 24 #include <iprt/asm.h> 24 25 #include <iprt/file.h> 25 26 #include <iprt/mem.h> … … 75 76 76 77 RTMemFree(pAioMgr->pahReqsFree); 78 } 79 80 /** 81 * Sorts the endpoint list with insertion sort. 82 */ 83 static void pdmacFileAioMgrNormalEndpointsSortByLoad(PPDMACEPFILEMGR pAioMgr) 84 { 85 PPDMASYNCCOMPLETIONENDPOINTFILE pEpPrev, pEpCurr, pEpNextToSort; 86 87 pEpPrev = pAioMgr->pEndpointsHead; 88 pEpCurr = pEpPrev->AioMgr.pEndpointNext; 89 90 while (pEpCurr) 91 { 92 /* Remember the next element to sort because the list might change. */ 93 pEpNextToSort = pEpCurr->AioMgr.pEndpointNext; 94 95 /* Unlink the current element from the list. */ 96 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev; 97 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext; 98 99 if (pPrev) 100 pPrev->AioMgr.pEndpointNext = pNext; 101 else 102 pAioMgr->pEndpointsHead = pNext; 103 104 if (pNext) 105 pNext->AioMgr.pEndpointPrev = pPrev; 106 107 /* Go back until we reached the place to insert the current endpoint into. */ 108 while (pEpPrev && (pEpPrev->AioMgr.cReqsPerSec < pEpCurr->AioMgr.cReqsPerSec)) 109 pEpPrev = pEpPrev->AioMgr.pEndpointPrev; 110 111 /* Link the endpoint into the list. */ 112 if (pEpPrev) 113 pNext = pEpPrev->AioMgr.pEndpointNext; 114 else 115 pNext = pAioMgr->pEndpointsHead; 116 117 pEpCurr->AioMgr.pEndpointNext = pNext; 118 pEpCurr->AioMgr.pEndpointPrev = pEpPrev; 119 pNext->AioMgr.pEndpointPrev = pEpCurr; 120 if (pEpPrev) 121 pEpPrev->AioMgr.pEndpointNext = pEpCurr; 122 else 123 pAioMgr->pEndpointsHead = pEpCurr; 124 125 pEpCurr = pEpNextToSort; 126 } 127 128 #ifdef DEBUG 129 /* Validate sorting alogrithm */ 130 unsigned cEndpoints = 0; 131 pEpCurr = pAioMgr->pEndpointsHead; 132 133 AssertMsg(pEpCurr, ("No endpoint in the list?\n")); 134 AssertMsg(!pEpCurr->AioMgr.pEndpointPrev, ("First element in the list points to previous element\n")); 135 136 while (pEpCurr) 137 { 138 cEndpoints++; 139 140 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext; 141 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev; 142 143 Assert(!pNext || pNext->AioMgr.cReqsPerSec <= pEpCurr->AioMgr.cReqsPerSec); 144 Assert(!pPrev || pPrev->AioMgr.cReqsPerSec >= pEpCurr->AioMgr.cReqsPerSec); 145 146 pEpCurr = pNext; 147 } 148 149 AssertMsg(cEndpoints == pAioMgr->cEndpoints, ("Endpoints lost during sort!\n")); 150 151 #endif 152 } 153 154 /** 155 * Removes an endpoint from the currently assigned manager. 156 * 157 * @returns TRUE if there are still requests pending on the current manager for this endpoint. 158 * FALSE otherwise. 159 * @param pEndpointRemove The endpoint to remove. 160 */ 161 static bool pdmacFileAioMgrNormalRemoveEndpoint(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove) 162 { 163 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev; 164 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointRemove->AioMgr.pEndpointNext; 165 PPDMACEPFILEMGR pAioMgr = pEndpointRemove->pAioMgr; 166 167 pAioMgr->cEndpoints--; 168 169 if (pPrev) 170 pPrev->AioMgr.pEndpointNext = pNext; 171 else 172 pAioMgr->pEndpointsHead = pNext; 173 174 if (pNext) 175 pNext->AioMgr.pEndpointPrev = pPrev; 176 177 /* Make sure that there is no request pending on this manager for the endpoint. */ 178 if (!pEndpointRemove->AioMgr.cRequestsActive) 179 { 180 Assert(!pEndpointRemove->pFlushReq); 181 182 /* Reopen the file so that the new endpoint can reassociate with the file */ 183 RTFileClose(pEndpointRemove->File); 184 int rc = RTFileOpen(&pEndpointRemove->File, pEndpointRemove->Core.pszUri, pEndpointRemove->fFlags); 185 AssertRC(rc); 186 return false; 187 } 188 189 return true; 190 } 191 192 /** 193 * Creates a new I/O manager and spreads the I/O load of the endpoints 194 * between the given I/O manager and the new one. 195 * 196 * @returns nothing. 197 * @param pAioMgr The I/O manager with high I/O load. 198 */ 199 static void pdmacFileAioMgrNormalBalanceLoad(PPDMACEPFILEMGR pAioMgr) 200 { 201 PPDMACEPFILEMGR pAioMgrNew = NULL; 202 int rc = VINF_SUCCESS; 203 204 /* Splitting can't be done with only one open endpoint. */ 205 if (pAioMgr->cEndpoints > 1) 206 { 207 rc = pdmacFileAioMgrCreate((PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass, 208 &pAioMgrNew); 209 if (RT_SUCCESS(rc)) 210 { 211 /* We will sort the list by request count per second. */ 212 pdmacFileAioMgrNormalEndpointsSortByLoad(pAioMgr); 213 214 /* Now move some endpoints to the new manager. */ 215 unsigned cReqsHere = pAioMgr->pEndpointsHead->AioMgr.cReqsPerSec; 216 unsigned cReqsOther = 0; 217 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead->AioMgr.pEndpointNext; 218 219 while (pCurr) 220 { 221 if (cReqsHere <= cReqsOther) 222 { 223 /* 224 * The other manager has more requests to handle now. 225 * We will keep the current endpoint. 226 */ 227 Log(("Keeping endpoint %#p{%s} with %u reqs/s\n", pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec)); 228 cReqsHere += pCurr->AioMgr.cReqsPerSec; 229 pCurr = pCurr->AioMgr.pEndpointNext; 230 } 231 else 232 { 233 /* Move to other endpoint. */ 234 Log(("Moving endpoint %#p{%s} with %u reqs/s to other manager\n", pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec)); 235 cReqsOther += pCurr->AioMgr.cReqsPerSec; 236 237 PPDMASYNCCOMPLETIONENDPOINTFILE pMove = pCurr; 238 239 pCurr = pCurr->AioMgr.pEndpointNext; 240 241 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pMove); 242 243 if (fReqsPending) 244 { 245 pMove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING; 246 pMove->AioMgr.fMoving = true; 247 pMove->AioMgr.pAioMgrDst = pAioMgrNew; 248 } 249 else 250 { 251 pMove->AioMgr.fMoving = false; 252 pMove->AioMgr.pAioMgrDst = NULL; 253 pdmacFileAioMgrAddEndpoint(pAioMgrNew, pMove); 254 } 255 } 256 } 257 } 258 else 259 { 260 /* Don't process further but leave a log entry about reduced performance. */ 261 LogRel(("AIOMgr: Could not create new I/O manager (rc=%Rrc). Expect reduced performance\n", rc)); 262 } 263 } 77 264 } 78 265 … … 106 293 RTFILEAIOREQ apReqs[20]; 107 294 unsigned cRequests = 0; 295 unsigned cMaxRequests = PDMACEPFILEMGR_REQS_MAX - pAioMgr->cRequestsActive; 108 296 int rc = VINF_SUCCESS; 109 297 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass; … … 113 301 114 302 /* Go through the list and queue the requests until we get a flush request */ 115 while (pTaskHead && !pEndpoint->pFlushReq )303 while (pTaskHead && !pEndpoint->pFlushReq && (cMaxRequests > 0)) 116 304 { 117 305 PPDMACTASKFILE pCurr = pTaskHead; … … 205 393 206 394 if (pCurr->enmTransferType == PDMACTASKFILETRANSFER_WRITE) 395 { 396 /* Grow the file if needed. */ 397 if (RT_UNLIKELY((pCurr->Off + pCurr->DataSeg.cbSeg) > pEndpoint->cbFile)) 398 { 399 ASMAtomicWriteU64(&pEndpoint->cbFile, pCurr->Off + pCurr->DataSeg.cbSeg); 400 RTFileSetSize(pEndpoint->File, pCurr->Off + pCurr->DataSeg.cbSeg); 401 } 402 207 403 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File, 208 404 pCurr->Off, pvBuf, pCurr->DataSeg.cbSeg, pCurr); 405 } 209 406 else 210 407 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File, … … 214 411 apReqs[cRequests] = hReq; 215 412 pEndpoint->AioMgr.cReqsProcessed++; 413 cMaxRequests--; 216 414 cRequests++; 217 415 if (cRequests == RT_ELEMENTS(apReqs)) … … 238 436 pAioMgr->cRequestsActive += cRequests; 239 437 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, apReqs, cRequests); 240 if (RT_FAILURE(rc)) 241 { 242 /* Not enough ressources on this context anymore. */ 243 /* @todo implement */ 244 AssertMsgFailed(("Implement\n")); 245 } 438 AssertMsgReturn(RT_SUCCESS(rc), ("Could not submit %u requests %Rrc\n", cRequests, rc), rc); 439 } 440 441 if (RT_UNLIKELY(!cMaxRequests && pTaskHead && !pEndpoint->pFlushReq)) 442 { 443 /* 444 * The I/O manager has no room left for more requests 445 * but there are still requests to process. 446 * Create a new I/O manager and let it handle some endpoints. 447 */ 448 449 /* Add the rest of the tasks to the pending list first */ 450 if (!pEndpoint->AioMgr.pReqsPendingHead) 451 { 452 Assert(!pEndpoint->AioMgr.pReqsPendingTail); 453 pEndpoint->AioMgr.pReqsPendingHead = pTaskHead; 454 } 455 else 456 { 457 Assert(pEndpoint->AioMgr.pReqsPendingTail); 458 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTaskHead; 459 } 460 461 /* Update the tail. */ 462 while (pTaskHead->pNext) 463 pTaskHead = pTaskHead->pNext; 464 465 pEndpoint->AioMgr.pReqsPendingTail = pTaskHead; 466 467 pdmacFileAioMgrNormalBalanceLoad(pAioMgr); 246 468 } 247 469 … … 322 544 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pEndpointNew->File); 323 545 fNotifyWaiter = true; 546 pAioMgr->cEndpoints++; 324 547 break; 325 548 } … … 329 552 AssertMsg(VALID_PTR(pEndpointRemove), ("Removing endpoint event without a endpoint to remove\n")); 330 553 331 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev;332 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointRemove->AioMgr.pEndpointNext;333 334 554 pEndpointRemove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING; 335 336 if (pPrev) 337 pPrev->AioMgr.pEndpointNext = pNext; 338 else 339 pAioMgr->pEndpointsHead = pNext; 340 341 if (pNext) 342 pNext->AioMgr.pEndpointPrev = pPrev; 343 344 /* Make sure that there is no request pending on this manager for the endpoint. */ 345 if (!pEndpointRemove->AioMgr.cRequestsActive) 346 { 347 Assert(!pEndpointRemove->pFlushReq); 348 349 /* Reopen the file so that the new endpoint can reassociate with the file */ 350 RTFileClose(pEndpointRemove->File); 351 rc = RTFileOpen(&pEndpointRemove->File, pEndpointRemove->Core.pszUri, pEndpointRemove->fFlags); 352 AssertRC(rc); 353 } 555 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointRemove); 354 556 break; 355 557 } … … 364 566 365 567 pEndpointClose->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING; 366 367 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointClose->AioMgr.pEndpointPrev; 368 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointClose->AioMgr.pEndpointNext; 369 370 if (pPrev) 371 pPrev->AioMgr.pEndpointNext = pNext; 372 else 373 pAioMgr->pEndpointsHead = pNext; 374 375 if (pNext) 376 pNext->AioMgr.pEndpointPrev = pPrev; 377 378 if (!pEndpointClose->AioMgr.cRequestsActive) 379 { 380 Assert(!pEndpointClose->pFlushReq); 381 382 /* Reopen the file to deassociate it from the endpoint. */ 383 RTFileClose(pEndpointClose->File); 384 rc = RTFileOpen(&pEndpointClose->File, pEndpointClose->Core.pszUri, pEndpointClose->fFlags); 385 AssertRC(rc); 386 fNotifyWaiter = true; 387 } 568 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointClose); 388 569 break; 389 570 } … … 483 664 RTFILEAIOREQ apReqs[20]; 484 665 uint32_t cReqsCompleted = 0; 485 486 rc = RTFileAioCtxWait(pAioMgr->hAioCtx, 1, RT_INDEFINITE_WAIT, apReqs, 666 size_t cReqsWait; 667 668 if (pAioMgr->cRequestsActive > RT_ELEMENTS(apReqs)) 669 cReqsWait = RT_ELEMENTS(apReqs); 670 else 671 cReqsWait = pAioMgr->cRequestsActive; 672 673 rc = RTFileAioCtxWait(pAioMgr->hAioCtx, 674 cReqsWait, 675 RT_INDEFINITE_WAIT, apReqs, 487 676 RT_ELEMENTS(apReqs), &cReqsCompleted); 488 CHECK_RC(pAioMgr, rc); 677 if (RT_FAILURE(rc) && (rc != VERR_INTERRUPTED)) 678 CHECK_RC(pAioMgr, rc); 489 679 490 680 for (uint32_t i = 0; i < cReqsCompleted; i++) … … 547 737 else if (!pEndpoint->AioMgr.cRequestsActive) 548 738 { 549 Assert(pAioMgr->fBlockingEventPending); 550 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false); 551 552 /* Release the waiting thread. */ 553 rc = RTSemEventSignal(pAioMgr->EventSemBlock); 739 /* Reopen the file so that the new endpoint can reassociate with the file */ 740 RTFileClose(pEndpoint->File); 741 rc = RTFileOpen(&pEndpoint->File, pEndpoint->Core.pszUri, pEndpoint->fFlags); 554 742 AssertRC(rc); 743 744 if (pEndpoint->AioMgr.fMoving) 745 { 746 pEndpoint->AioMgr.fMoving = false; 747 pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint); 748 } 749 else 750 { 751 Assert(pAioMgr->fBlockingEventPending); 752 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false); 753 754 /* Release the waiting thread. */ 755 rc = RTSemEventSignal(pAioMgr->EventSemBlock); 756 AssertRC(rc); 757 } 555 758 } 556 759 }
Note:
See TracChangeset
for help on using the changeset viewer.