Changeset 22309 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 17, 2009 8:59:28 PM (15 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 1 added
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r22140 r22309 170 170 PDMAsyncCompletionFile.cpp \ 171 171 PDMAsyncCompletionFileFailsafe.cpp \ 172 PDMAsyncCompletionFileNormal.cpp 172 PDMAsyncCompletionFileNormal.cpp \ 173 PDMAsyncCompletionFileCache.cpp 173 174 endif 174 175 -
trunk/src/VBox/VMM/PDMAsyncCompletion.cpp
r21496 r22309 883 883 } 884 884 885 static PPDMASYNCCOMPLETIONENDPOINT pdmR3AsyncCompletionFindEndpointWithUri(PPDMASYNCCOMPLETIONEPCLASS pEndpointClass, 886 const char *pszUri) 887 { 888 PPDMASYNCCOMPLETIONENDPOINT pEndpoint = pEndpointClass->pEndpointsHead; 889 890 while (pEndpoint) 891 { 892 if (!RTStrCmp(pEndpoint->pszUri, pszUri)) 893 return pEndpoint; 894 895 pEndpoint = pEndpoint->pNext; 896 } 897 898 return NULL; 899 } 900 885 901 VMMR3DECL(int) PDMR3AsyncCompletionEpCreateForFile(PPPDMASYNCCOMPLETIONENDPOINT ppEndpoint, 886 902 const char *pszFilename, uint32_t fFlags, … … 907 923 AssertMsg(pEndpointClass, ("File endpoint class was not initialized\n")); 908 924 909 rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_ASYNC_COMPLETION, 910 pEndpointClass->pEndpointOps->cbEndpoint, 911 (void **)&pEndpoint); 912 if (RT_SUCCESS(rc)) 913 { 914 915 /* Initialize common parts. */ 916 pEndpoint->pNext = NULL; 917 pEndpoint->pPrev = NULL; 918 pEndpoint->pEpClass = pEndpointClass; 919 pEndpoint->pTasksFreeHead = NULL; 920 pEndpoint->pTasksFreeTail = NULL; 921 pEndpoint->cTasksCached = 0; 922 pEndpoint->uTaskIdNext = 0; 923 pEndpoint->fTaskIdWraparound = false; 924 pEndpoint->pTemplate = pTemplate; 925 pEndpoint->iSlotStart = pEndpointClass->cEndpoints % RT_ELEMENTS(pEndpointClass->apTaskCache); 926 927 /* Init the cache. */ 925 /* Search for a already opened endpoint for this file. */ 926 pEndpoint = pdmR3AsyncCompletionFindEndpointWithUri(pEndpointClass, pszFilename); 927 928 if(!pEndpoint) 929 { 928 930 rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_ASYNC_COMPLETION, 929 pEndpointClass->pEndpointOps->cb Task,930 (void **)&pEndpoint ->pTasksFreeHead);931 pEndpointClass->pEndpointOps->cbEndpoint, 932 (void **)&pEndpoint); 931 933 if (RT_SUCCESS(rc)) 932 934 { 933 pEndpoint->pTasksFreeTail = pEndpoint->pTasksFreeHead; 934 935 /* Call the initializer for the endpoint. */ 936 rc = pEndpointClass->pEndpointOps->pfnEpInitialize(pEndpoint, pszFilename, fFlags); 937 if (RT_SUCCESS(rc)) 935 936 /* Initialize common parts. */ 937 pEndpoint->pNext = NULL; 938 pEndpoint->pPrev = NULL; 939 pEndpoint->pEpClass = pEndpointClass; 940 pEndpoint->pTasksFreeHead = NULL; 941 pEndpoint->pTasksFreeTail = NULL; 942 pEndpoint->cTasksCached = 0; 943 pEndpoint->uTaskIdNext = 0; 944 pEndpoint->fTaskIdWraparound = false; 945 pEndpoint->pTemplate = pTemplate; 946 pEndpoint->iSlotStart = pEndpointClass->cEndpoints % RT_ELEMENTS(pEndpointClass->apTaskCache); 947 pEndpoint->pszUri = RTStrDup(pszFilename); 948 pEndpoint->cUsers = 1; 949 if (pEndpoint->pszUri) 938 950 { 939 /* Link it into the list of endpoints. */ 940 rc = RTCritSectEnter(&pEndpointClass->CritSect); 941 AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc)); 942 943 pEndpoint->pNext = pEndpointClass->pEndpointsHead; 944 if (pEndpointClass->pEndpointsHead) 945 pEndpointClass->pEndpointsHead->pPrev = pEndpoint; 946 947 pEndpointClass->pEndpointsHead = pEndpoint; 948 pEndpointClass->cEndpoints++; 949 950 rc = RTCritSectLeave(&pEndpointClass->CritSect); 951 AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc)); 952 953 /* Reference the template. */ 954 ASMAtomicIncU32(&pTemplate->cUsed); 955 956 *ppEndpoint = pEndpoint; 957 958 LogFlowFunc((": Created endpoint for %s: rc=%Rrc\n", pszFilename, rc)); 959 return VINF_SUCCESS; 951 /* Init the cache. */ 952 rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_ASYNC_COMPLETION, 953 pEndpointClass->pEndpointOps->cbTask, 954 (void **)&pEndpoint->pTasksFreeHead); 955 if (RT_SUCCESS(rc)) 956 { 957 pEndpoint->pTasksFreeTail = pEndpoint->pTasksFreeHead; 958 959 /* Call the initializer for the endpoint. */ 960 rc = pEndpointClass->pEndpointOps->pfnEpInitialize(pEndpoint, pszFilename, fFlags); 961 if (RT_SUCCESS(rc)) 962 { 963 /* Link it into the list of endpoints. */ 964 rc = RTCritSectEnter(&pEndpointClass->CritSect); 965 AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc)); 966 967 pEndpoint->pNext = pEndpointClass->pEndpointsHead; 968 if (pEndpointClass->pEndpointsHead) 969 pEndpointClass->pEndpointsHead->pPrev = pEndpoint; 970 971 pEndpointClass->pEndpointsHead = pEndpoint; 972 pEndpointClass->cEndpoints++; 973 974 rc = RTCritSectLeave(&pEndpointClass->CritSect); 975 AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc)); 976 977 /* Reference the template. */ 978 ASMAtomicIncU32(&pTemplate->cUsed); 979 980 *ppEndpoint = pEndpoint; 981 982 LogFlowFunc((": Created endpoint for %s: rc=%Rrc\n", pszFilename, rc)); 983 return VINF_SUCCESS; 984 } 985 MMR3HeapFree(pEndpoint->pTasksFreeHead); 986 RTStrFree(pEndpoint->pszUri); 987 } 988 else 989 rc = VERR_NO_MEMORY; 960 990 } 961 MMR3HeapFree(pEndpoint->pTasksFreeHead); 962 } 963 MMR3HeapFree(pEndpoint); 991 MMR3HeapFree(pEndpoint); 992 } 993 } 994 else 995 { 996 /* Endpoint found. */ 997 pEndpoint->cUsers++; 998 999 *ppEndpoint = pEndpoint; 1000 return VINF_SUCCESS; 964 1001 } 965 1002 … … 976 1013 AssertReturnVoid(VALID_PTR(pEndpoint)); 977 1014 978 PPDMASYNCCOMPLETIONEPCLASS pEndpointClass = pEndpoint->pEpClass; 979 pEndpointClass->pEndpointOps->pfnEpClose(pEndpoint); 980 981 /* Free cached tasks. */ 982 PPDMASYNCCOMPLETIONTASK pTask = pEndpoint->pTasksFreeHead; 983 984 while (pTask) 985 { 986 PPDMASYNCCOMPLETIONTASK pTaskFree = pTask; 987 pTask = pTask->pNext; 988 MMR3HeapFree(pTaskFree); 989 } 990 991 /* Drop reference from the template. */ 992 ASMAtomicDecU32(&pEndpoint->pTemplate->cUsed); 993 994 /* Unlink the endpoint from the list. */ 995 int rc = RTCritSectEnter(&pEndpointClass->CritSect); 996 AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc)); 997 998 PPDMASYNCCOMPLETIONENDPOINT pEndpointNext = pEndpoint->pNext; 999 PPDMASYNCCOMPLETIONENDPOINT pEndpointPrev = pEndpoint->pPrev; 1000 1001 if (pEndpointPrev) 1002 pEndpointPrev->pNext = pEndpointNext; 1003 else 1004 pEndpointClass->pEndpointsHead = pEndpointNext; 1005 if (pEndpointNext) 1006 pEndpointNext->pPrev = pEndpointPrev; 1007 1008 pEndpointClass->cEndpoints--; 1009 1010 rc = RTCritSectLeave(&pEndpointClass->CritSect); 1011 AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc)); 1012 1013 MMR3HeapFree(pEndpoint); 1015 pEndpoint->cUsers--; 1016 1017 /* If the last user closed the endpoint we will free it. */ 1018 if (!pEndpoint->cUsers) 1019 { 1020 PPDMASYNCCOMPLETIONEPCLASS pEndpointClass = pEndpoint->pEpClass; 1021 pEndpointClass->pEndpointOps->pfnEpClose(pEndpoint); 1022 1023 /* Free cached tasks. */ 1024 PPDMASYNCCOMPLETIONTASK pTask = pEndpoint->pTasksFreeHead; 1025 1026 while (pTask) 1027 { 1028 PPDMASYNCCOMPLETIONTASK pTaskFree = pTask; 1029 pTask = pTask->pNext; 1030 MMR3HeapFree(pTaskFree); 1031 } 1032 1033 /* Drop reference from the template. */ 1034 ASMAtomicDecU32(&pEndpoint->pTemplate->cUsed); 1035 1036 /* Unlink the endpoint from the list. */ 1037 int rc = RTCritSectEnter(&pEndpointClass->CritSect); 1038 AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc)); 1039 1040 PPDMASYNCCOMPLETIONENDPOINT pEndpointNext = pEndpoint->pNext; 1041 PPDMASYNCCOMPLETIONENDPOINT pEndpointPrev = pEndpoint->pPrev; 1042 1043 if (pEndpointPrev) 1044 pEndpointPrev->pNext = pEndpointNext; 1045 else 1046 pEndpointClass->pEndpointsHead = pEndpointNext; 1047 if (pEndpointNext) 1048 pEndpointNext->pPrev = pEndpointPrev; 1049 1050 pEndpointClass->cEndpoints--; 1051 1052 rc = RTCritSectLeave(&pEndpointClass->CritSect); 1053 AssertMsg(RT_SUCCESS(rc), ("Failed to enter critical section rc=%Rrc\n", rc)); 1054 1055 RTStrFree(pEndpoint->pszUri); 1056 MMR3HeapFree(pEndpoint); 1057 } 1014 1058 } 1015 1059 -
trunk/src/VBox/VMM/PDMAsyncCompletionFile.cpp
r21496 r22309 45 45 46 46 /** 47 * Frees a task segment47 * Frees a task. 48 48 * 49 49 * @returns nothing. 50 50 * @param pEndpoint Pointer to the endpoint the segment was for. 51 * @param p Seg The segmentto free.51 * @param pTask The task to free. 52 52 */ 53 void pdmacFile SegmentFree(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,54 PPDMACTASKFILE SEG pSeg)53 void pdmacFileTaskFree(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, 54 PPDMACTASKFILE pTask) 55 55 { 56 56 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass; 57 57 58 LogFlowFunc((": pEndpoint=%p p Seg=%p\n", pEndpoint, pSeg));58 LogFlowFunc((": pEndpoint=%p pTask=%p\n", pEndpoint, pTask)); 59 59 60 60 /* Try the per endpoint cache first. */ 61 if (pEndpoint->c SegmentsCached < pEpClass->cSegmentsCacheMax)61 if (pEndpoint->cTasksCached < pEpClass->cTasksCacheMax) 62 62 { 63 63 /* Add it to the list. */ 64 pSeg->pPrev = NULL; 65 pEndpoint->pSegmentsFreeTail->pNext = pSeg; 66 pEndpoint->pSegmentsFreeTail = pSeg; 67 ASMAtomicIncU32(&pEndpoint->cSegmentsCached); 64 pEndpoint->pTasksFreeTail->pNext = pTask; 65 pEndpoint->pTasksFreeTail = pTask; 66 ASMAtomicIncU32(&pEndpoint->cTasksCached); 68 67 } 69 68 else if (false) … … 73 72 else 74 73 { 75 Log(("Freeing segment %p because all caches are full\n", pSeg));76 MMR3HeapFree(p Seg);74 Log(("Freeing task %p because all caches are full\n", pTask)); 75 MMR3HeapFree(pTask); 77 76 } 78 77 } … … 84 83 * @param pEndpoint Pointer to the endpoint 85 84 */ 86 PPDMACTASKFILE SEG pdmacFileSegmentAlloc(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)87 { 88 PPDMACTASKFILE SEG pSeg= NULL;85 PPDMACTASKFILE pdmacFileTaskAlloc(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint) 86 { 87 PPDMACTASKFILE pTask = NULL; 89 88 90 89 /* Try the small per endpoint cache first. */ 91 if (pEndpoint->p SegmentsFreeHead == pEndpoint->pSegmentsFreeTail)90 if (pEndpoint->pTasksFreeHead == pEndpoint->pTasksFreeTail) 92 91 { 93 92 /* Try the bigger endpoint class cache. */ … … 106 105 } while (iSlot != pEndpoint->iSlotStart); 107 106 #endif 108 if (!p Seg)107 if (!pTask) 109 108 { 110 109 /* … … 113 112 */ 114 113 int rc = MMR3HeapAllocZEx(pEndpointClass->Core.pVM, MM_TAG_PDM_ASYNC_COMPLETION, 115 sizeof(PDMACTASKFILE SEG),116 (void **)&p Seg);114 sizeof(PDMACTASKFILE), 115 (void **)&pTask); 117 116 if (RT_FAILURE(rc)) 118 p Seg= NULL;119 120 LogFlow(("Allocated segment %p\n", pSeg));117 pTask = NULL; 118 119 LogFlow(("Allocated task %p\n", pTask)); 121 120 } 122 121 #if 0 … … 161 160 { 162 161 /* Grab a free task from the head. */ 163 AssertMsg(pEndpoint->cSegmentsCached > 0, ("No segments cached but list contains more than one element\n")); 164 165 pSeg = pEndpoint->pSegmentsFreeHead; 166 pEndpoint->pSegmentsFreeHead = pSeg->pNext; 167 ASMAtomicDecU32(&pEndpoint->cSegmentsCached); 168 } 169 170 pSeg->pNext = NULL; 171 pSeg->pPrev = NULL; 172 173 return pSeg; 174 } 175 176 PPDMASYNCCOMPLETIONTASK pdmacFileEpGetNewTasks(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint) 177 { 178 PPDMASYNCCOMPLETIONTASK pTasks = NULL; 162 AssertMsg(pEndpoint->cTasksCached > 0, ("No tasks cached but list contains more than one element\n")); 163 164 pTask = pEndpoint->pTasksFreeHead; 165 pEndpoint->pTasksFreeHead = pTask->pNext; 166 ASMAtomicDecU32(&pEndpoint->cTasksCached); 167 } 168 169 pTask->pNext = NULL; 170 171 return pTask; 172 } 173 174 PPDMACTASKFILE pdmacFileEpGetNewTasks(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint) 175 { 176 PPDMACTASKFILE pTasks = NULL; 179 177 180 178 /* 181 179 * Get pending tasks. 182 180 */ 183 pTasks = (PPDMA SYNCCOMPLETIONTASK)ASMAtomicXchgPtr((void * volatile *)&pEndpoint->pTasksNewHead, NULL);181 pTasks = (PPDMACTASKFILE)ASMAtomicXchgPtr((void * volatile *)&pEndpoint->pTasksNewHead, NULL); 184 182 185 183 /* Reverse the list to process in FIFO order. */ 186 184 if (pTasks) 187 185 { 188 PPDMA SYNCCOMPLETIONTASKpTask = pTasks;186 PPDMACTASKFILE pTask = pTasks; 189 187 190 188 pTasks = NULL; … … 192 190 while (pTask) 193 191 { 194 PPDMA SYNCCOMPLETIONTASKpCur = pTask;192 PPDMACTASKFILE pCur = pTask; 195 193 pTask = pTask->pNext; 196 194 pCur->pNext = pTasks; … … 290 288 int rc; 291 289 292 ASMAtomicXchgBool(&pAioMgr->fShutdown, true);293 294 290 rc = RTCritSectEnter(&pAioMgr->CritSectBlockingEvent); 295 291 AssertRCReturn(rc, rc); … … 302 298 } 303 299 304 static int pdmacFileEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONTASKFILE pTask)305 { 306 PPDMA SYNCCOMPLETIONTASKpNext;300 int pdmacFileEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask) 301 { 302 PPDMACTASKFILE pNext; 307 303 do 308 304 { 309 305 pNext = pEndpoint->pTasksNewHead; 310 pTask-> Core.pNext = pNext;306 pTask->pNext = pNext; 311 307 } while (!ASMAtomicCmpXchgPtr((void * volatile *)&pEndpoint->pTasksNewHead, (void *)pTask, (void *)pNext)); 312 308 … … 316 312 } 317 313 318 static int pdmacFileEpTaskInitiate(PPDMASYNCCOMPLETIONTASK pTask, 319 PPDMASYNCCOMPLETIONENDPOINT pEndpoint, RTFOFF off, 320 PCPDMDATASEG paSegments, size_t cSegments, 321 size_t cbTransfer, PDMACTASKFILETRANSFER enmTransfer) 314 void pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser) 315 { 316 PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pvUser; 317 318 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH) 319 { 320 pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core); 321 } 322 else 323 { 324 uint32_t uOld = ASMAtomicSubU32(&pTaskFile->cbTransferLeft, pTask->DataSeg.cbSeg); 325 326 if (!(uOld - pTask->DataSeg.cbSeg) 327 && !ASMAtomicXchgBool(&pTaskFile->fCompleted, true)) 328 pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core); 329 } 330 } 331 332 int pdmacFileEpTaskInitiate(PPDMASYNCCOMPLETIONTASK pTask, 333 PPDMASYNCCOMPLETIONENDPOINT pEndpoint, RTFOFF off, 334 PCPDMDATASEG paSegments, size_t cSegments, 335 size_t cbTransfer, PDMACTASKFILETRANSFER enmTransfer) 322 336 { 323 337 int rc = VINF_SUCCESS; … … 329 343 || (enmTransfer == PDMACTASKFILETRANSFER_WRITE)); 330 344 331 pTaskFile->enmTransferType = enmTransfer; 332 pTaskFile->u.DataTransfer.cSegments = cSegments; 333 pTaskFile->u.DataTransfer.pSegmentsHead = NULL; 334 pTaskFile->u.DataTransfer.off = off; 335 pTaskFile->u.DataTransfer.cbTransfer = cbTransfer; 336 337 PPDMACTASKFILESEG pSeg = pdmacFileSegmentAlloc(pEpFile); 338 339 pTaskFile->u.DataTransfer.pSegmentsHead = pSeg; 345 pTaskFile->cbTransferLeft = cbTransfer; 340 346 341 347 for (unsigned i = 0; i < cSegments; i++) 342 348 { 343 pSeg->DataSeg.cbSeg = paSegments[i].cbSeg; 344 pSeg->DataSeg.pvSeg = paSegments[i].pvSeg; 345 pSeg->pTask = pTaskFile; 346 347 if (i < (cSegments-1)) 348 { 349 /* Allocate new segment. */ 350 PPDMACTASKFILESEG pSegNext = pdmacFileSegmentAlloc(pEpFile); 351 AssertPtr(pSeg); 352 pSeg->pNext = pSegNext; 353 pSeg = pSegNext; 354 } 355 } 356 357 /* Send it off to the I/O manager. */ 358 pdmacFileEpAddTask(pEpFile, pTaskFile); 349 PPDMACTASKFILE pIoTask = pdmacFileTaskAlloc(pEpFile); 350 AssertPtr(pIoTask); 351 352 pIoTask->pEndpoint = pEpFile; 353 pIoTask->enmTransferType = enmTransfer; 354 pIoTask->Off = off; 355 pIoTask->DataSeg.cbSeg = paSegments[i].cbSeg; 356 pIoTask->DataSeg.pvSeg = paSegments[i].pvSeg; 357 pIoTask->pvUser = pTaskFile; 358 pIoTask->pfnCompleted = pdmacFileEpTaskCompleted; 359 360 /* Send it off to the I/O manager. */ 361 pdmacFileEpAddTask(pEpFile, pIoTask); 362 off += paSegments[i].cbSeg; 363 cbTransfer -= paSegments[i].cbSeg; 364 } 365 366 AssertMsg(!cbTransfer, ("Incomplete transfer %u bytes left\n", cbTransfer)); 359 367 360 368 return VINF_SUCCESS; … … 390 398 { 391 399 /* Init the rest of the manager. */ 392 rc = pdmacFileAioMgrNormalInit(pAioMgrNew); 400 if (!pAioMgrNew->fFailsafe) 401 rc = pdmacFileAioMgrNormalInit(pAioMgrNew); 402 393 403 if (RT_SUCCESS(rc)) 394 404 { 405 pAioMgrNew->enmState = PDMACEPFILEMGRSTATE_RUNNING; 406 395 407 rc = RTThreadCreateF(&pAioMgrNew->Thread, 396 408 pAioMgrNew->fFailsafe … … 445 457 static void pdmacFileAioMgrDestroy(PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile, PPDMACEPFILEMGR pAioMgr) 446 458 { 447 /* A normal manager may have still endpoints attached and has to return them. */448 Assert(pAioMgr->fFailsafe);449 459 int rc = pdmacFileAioMgrShutdown(pAioMgr); 450 460 AssertRC(rc); … … 502 512 /* Init critical section. */ 503 513 rc = RTCritSectInit(&pEpClassFile->CritSect); 514 if (RT_SUCCESS(rc)) 515 { 516 /* Init cache structure */ 517 rc = pdmacFileCacheInit(pEpClassFile, pCfgNode); 518 if (RT_FAILURE(rc)) 519 RTCritSectDelete(&pEpClassFile->CritSect); 520 } 521 504 522 return rc; 505 523 } … … 534 552 535 553 if (!pEpClassFile->fFailsafe) 536 fFileFlags |= (RTFILE_O_ASYNC_IO | RTFILE_O_NO_CACHE); 537 538 pEpFile->pszFilename = RTStrDup(pszUri); 539 if (!pEpFile->pszFilename) 540 return VERR_NO_MEMORY; 554 { 555 fFileFlags |= (RTFILE_O_ASYNC_IO | RTFILE_O_WRITE_THROUGH); 556 557 /* 558 * We only disable the cache if the size of the file is a multiple of 512. 559 * Certain hosts like Windows, Linux and Solaris require that transfer sizes 560 * are aligned to the volume sector size. 561 * If not we just make sure that the data is written to disk with RTFILE_O_WRITE_THROUGH 562 * which will trash the host cache but ensures that the host cache will not 563 * contain dirty buffers. 564 */ 565 RTFILE File = NIL_RTFILE; 566 567 rc = RTFileOpen(&File, pszUri, RTFILE_O_OPEN | RTFILE_O_READ | RTFILE_O_DENY_NONE); 568 if (RT_SUCCESS(rc)) 569 { 570 uint64_t cbSize; 571 572 rc = RTFileGetSize(File, &cbSize); 573 if (RT_SUCCESS(rc) && ((cbSize % 512) == 0)) 574 { 575 fFileFlags &= ~RTFILE_O_WRITE_THROUGH; 576 fFileFlags |= RTFILE_O_NO_CACHE; 577 } 578 579 RTFileClose(File); 580 } 581 } 582 541 583 pEpFile->fFlags = fFileFlags; 542 584 585 /* Open with final flags. */ 543 586 rc = RTFileOpen(&pEpFile->File, pszUri, fFileFlags); 544 587 if (RT_SUCCESS(rc)) … … 546 589 /* Initialize the segment cache */ 547 590 rc = MMR3HeapAllocZEx(pEpClassFile->Core.pVM, MM_TAG_PDM_ASYNC_COMPLETION, 548 sizeof(PDMACTASKFILE SEG),549 (void **)&pEpFile->p SegmentsFreeHead);591 sizeof(PDMACTASKFILE), 592 (void **)&pEpFile->pTasksFreeHead); 550 593 if (RT_SUCCESS(rc)) 551 594 { 552 595 PPDMACEPFILEMGR pAioMgr = NULL; 553 596 554 pEpFile->p SegmentsFreeTail = pEpFile->pSegmentsFreeHead;555 pEpFile->c SegmentsCached = 0;597 pEpFile->pTasksFreeTail = pEpFile->pTasksFreeHead; 598 pEpFile->cTasksCached = 0; 556 599 557 600 if (pEpClassFile->fFailsafe) … … 566 609 { 567 610 pEpFile->fCaching = true; 611 rc = pdmacFileEpCacheInit(pEpFile, pEpClassFile); 612 if (RT_FAILURE(rc)) 613 { 614 LogRel(("AIOMgr: Endpoint for \"%s\" was opened with caching but initializing cache failed. Disabled caching\n", pszUri)); 615 pEpFile->fCaching = false; 616 } 568 617 } 569 618 … … 580 629 } 581 630 631 pEpFile->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE; 632 582 633 /* Assign the endpoint to the thread. */ 583 634 rc = pdmacFileAioMgrAddEndpoint(pAioMgr, pEpFile); 584 635 if (RT_FAILURE(rc)) 585 MMR3HeapFree(pEpFile->p SegmentsFreeHead);636 MMR3HeapFree(pEpFile->pTasksFreeHead); 586 637 } 587 638 … … 590 641 } 591 642 592 if (RT_FAILURE(rc))593 RTStrFree(pEpFile->pszFilename);594 595 643 return rc; 596 644 } … … 603 651 /* Make sure that all tasks finished for this endpoint. */ 604 652 int rc = pdmacFileAioMgrCloseEndpoint(pEpFile->pAioMgr, pEpFile); 605 AssertRC(rc);606 607 /* Remove the endpoint from the thread. */608 rc = pdmacFileAioMgrRemoveEndpoint(pEpFile->pAioMgr, pEpFile);609 653 AssertRC(rc); 610 654 … … 616 660 pdmacFileAioMgrDestroy(pEpClassFile, pEpFile->pAioMgr); 617 661 618 /* Free cached segments. */619 PPDMACTASKFILE SEG pSeg = pEpFile->pSegmentsFreeHead;620 621 while (p Seg)622 { 623 PPDMACTASKFILE SEG pSegFree = pSeg;624 p Seg = pSeg->pNext;625 MMR3HeapFree(p SegFree);662 /* Free cached tasks. */ 663 PPDMACTASKFILE pTask = pEpFile->pTasksFreeHead; 664 665 while (pTask) 666 { 667 PPDMACTASKFILE pTaskFree = pTask; 668 pTask = pTask->pNext; 669 MMR3HeapFree(pTaskFree); 626 670 } 627 671 628 672 /* Free the cached data. */ 629 Assert(!pEpFile->fCaching); 673 if (pEpFile->fCaching) 674 pdmacFileEpCacheDestroy(pEpFile); 630 675 631 676 return VINF_SUCCESS; … … 637 682 size_t cbRead) 638 683 { 639 return pdmacFileEpTaskInitiate(pTask, pEndpoint, off, paSegments, cSegments, cbRead, 640 PDMACTASKFILETRANSFER_READ); 684 PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint; 685 686 if (pEpFile->fCaching) 687 return pdmacFileEpCacheRead(pEpFile, (PPDMASYNCCOMPLETIONTASKFILE)pTask, 688 off, paSegments, cSegments, cbRead); 689 else 690 return pdmacFileEpTaskInitiate(pTask, pEndpoint, off, paSegments, cSegments, cbRead, 691 PDMACTASKFILETRANSFER_READ); 641 692 } 642 693 … … 646 697 size_t cbWrite) 647 698 { 648 return pdmacFileEpTaskInitiate(pTask, pEndpoint, off, paSegments, cSegments, cbWrite, 649 PDMACTASKFILETRANSFER_WRITE); 699 PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEndpoint; 700 701 if (RT_UNLIKELY(pEpFile->fReadonly)) 702 return VERR_NOT_SUPPORTED; 703 704 if (pEpFile->fCaching) 705 return pdmacFileEpCacheWrite(pEpFile, (PPDMASYNCCOMPLETIONTASKFILE)pTask, 706 off, paSegments, cSegments, cbWrite); 707 else 708 return pdmacFileEpTaskInitiate(pTask, pEndpoint, off, paSegments, cSegments, cbWrite, 709 PDMACTASKFILETRANSFER_WRITE); 650 710 } 651 711 … … 656 716 PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pTask; 657 717 658 pTaskFile->enmTransferType = PDMACTASKFILETRANSFER_FLUSH; 659 pdmacFileEpAddTask(pEpFile, pTaskFile); 718 if (RT_UNLIKELY(pEpFile->fReadonly)) 719 return VERR_NOT_SUPPORTED; 720 721 pTaskFile->cbTransferLeft = 0; 722 723 PPDMACTASKFILE pIoTask = pdmacFileTaskAlloc(pEpFile); 724 AssertPtr(pIoTask); 725 726 pIoTask->pEndpoint = pEpFile; 727 pIoTask->enmTransferType = PDMACTASKFILETRANSFER_FLUSH; 728 pIoTask->pvUser = pTaskFile; 729 pIoTask->pfnCompleted = pdmacFileEpTaskCompleted; 730 pdmacFileEpAddTask(pEpFile, pIoTask); 660 731 661 732 return VINF_SUCCESS; -
trunk/src/VBox/VMM/PDMAsyncCompletionFileFailsafe.cpp
r21496 r22309 29 29 { 30 30 int rc = VINF_SUCCESS; 31 PPDMA SYNCCOMPLETIONTASKpTasks = pdmacFileEpGetNewTasks(pEndpoint);31 PPDMACTASKFILE pTasks = pdmacFileEpGetNewTasks(pEndpoint); 32 32 33 33 while (pTasks) 34 34 { 35 PPDMA SYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pTasks;35 PPDMACTASKFILE pCurr = pTasks; 36 36 37 if (pTasks->pNext) 38 AssertMsg(pTasks->uTaskId < pTasks->pNext->uTaskId, 39 ("The task IDs are not ordered Curr=%u Next=%u\n", pTasks->uTaskId, pTasks->pNext->uTaskId)); 40 41 switch (pTaskFile->enmTransferType) 37 switch (pCurr->enmTransferType) 42 38 { 43 39 case PDMACTASKFILETRANSFER_FLUSH: … … 49 45 case PDMACTASKFILETRANSFER_WRITE: 50 46 { 51 PPDMACTASKFILESEG pSeg = pTaskFile->u.DataTransfer.pSegmentsHead; 52 RTFOFF offCurr = pTaskFile->u.DataTransfer.off; 53 54 do 47 if (pCurr->enmTransferType == PDMACTASKFILETRANSFER_READ) 55 48 { 56 if (pTaskFile->enmTransferType == PDMACTASKFILETRANSFER_READ) 57 { 58 rc = RTFileReadAt(pEndpoint->File, offCurr, 59 pSeg->DataSeg.pvSeg, 60 pSeg->DataSeg.cbSeg, 61 NULL); 62 } 63 else 64 { 65 rc = RTFileWriteAt(pEndpoint->File, offCurr, 66 pSeg->DataSeg.pvSeg, 67 pSeg->DataSeg.cbSeg, 68 NULL); 69 } 70 71 /* Free the segment. */ 72 PPDMACTASKFILESEG pCur = pSeg; 73 pSeg = pSeg->pNext; 74 75 offCurr += pCur->DataSeg.cbSeg; 76 77 pdmacFileSegmentFree(pEndpoint, pCur); 78 } while(pSeg && RT_SUCCESS(rc)); 79 80 AssertMsg(offCurr == (pTaskFile->u.DataTransfer.off + (RTFOFF)pTaskFile->u.DataTransfer.cbTransfer), 81 ("Incomplete transfer %llu bytes requested offCurr=%llu rc=%Rrc\n", 82 pTaskFile->u.DataTransfer.cbTransfer, 83 offCurr, rc)); 49 rc = RTFileReadAt(pEndpoint->File, pCurr->Off, 50 pCurr->DataSeg.pvSeg, 51 pCurr->DataSeg.cbSeg, 52 NULL); 53 } 54 else 55 { 56 rc = RTFileWriteAt(pEndpoint->File, pCurr->Off, 57 pCurr->DataSeg.pvSeg, 58 pCurr->DataSeg.cbSeg, 59 NULL); 60 } 84 61 85 62 break; 86 63 } 87 64 default: 88 AssertMsgFailed(("Invalid transfer type %d\n", pTask File->enmTransferType));65 AssertMsgFailed(("Invalid transfer type %d\n", pTasks->enmTransferType)); 89 66 } 90 67 91 68 AssertRC(rc); 69 70 pCurr->pfnCompleted(pCurr, pCurr->pvUser); 71 pdmacFileTaskFree(pEndpoint, pCurr); 72 92 73 pTasks = pTasks->pNext; 93 94 /* Notify task owner */95 pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core);96 74 } 97 75 … … 108 86 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser; 109 87 110 while (!pAioMgr->fShutdown) 88 while ( (pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING) 89 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING)) 111 90 { 112 91 if (!ASMAtomicReadBool(&pAioMgr->fWokenUp)) … … 138 117 AssertMsg(VALID_PTR(pEndpointNew), ("Adding endpoint event without a endpoint to add\n")); 139 118 119 pEndpointNew->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE; 120 140 121 pEndpointNew->AioMgr.pEndpointNext = pAioMgr->pEndpointsHead; 141 122 pEndpointNew->AioMgr.pEndpointPrev = NULL; … … 149 130 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove = pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint; 150 131 AssertMsg(VALID_PTR(pEndpointRemove), ("Removing endpoint event without a endpoint to remove\n")); 132 133 pEndpointRemove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING; 151 134 152 135 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev; … … 166 149 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointClose = pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint; 167 150 AssertMsg(VALID_PTR(pEndpointClose), ("Close endpoint event without a endpoint to Close\n")); 151 152 pEndpointClose->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING; 168 153 169 154 /* Make sure all tasks finished. */ -
trunk/src/VBox/VMM/PDMAsyncCompletionFileInternal.h
r21496 r22309 23 23 #define ___PDMAsyncCompletionFileInternal_h 24 24 25 #include <VBox/cfgm.h> 26 #include <VBox/stam.h> 25 27 #include <iprt/types.h> 26 28 #include <iprt/file.h> 27 29 #include <iprt/thread.h> 28 30 #include <iprt/semaphore.h> 31 #include <iprt/critsect.h> 32 #include <iprt/avl.h> 29 33 30 34 #include "PDMAsyncCompletionInternal.h" … … 47 51 typedef struct PDMASYNCCOMPLETIONENDPOINTFILE *PPDMASYNCCOMPLETIONENDPOINTFILE; 48 52 /** Pointer to a request segment. */ 49 typedef struct PDMACTASKFILE SEG *PPDMACTASKFILESEG;53 typedef struct PDMACTASKFILE *PPDMACTASKFILE; 50 54 /** Pointer to the endpoint class data. */ 51 55 typedef struct PDMASYNCCOMPLETIONTASKFILE *PPDMASYNCCOMPLETIONTASKFILE; 56 /** Pointer to a cache LRU list. */ 57 typedef struct PDMACFILELRULIST *PPDMACFILELRULIST; 58 /** Pointer to the global cache structure. */ 59 typedef struct PDMACFILECACHEGLOBAL *PPDMACFILECACHEGLOBAL; 52 60 53 61 /** … … 68 76 /** The manager is requested to suspend */ 69 77 PDMACEPFILEAIOMGRBLOCKINGEVENT_SUSPEND, 78 /** The manager is requested to resume */ 79 PDMACEPFILEAIOMGRBLOCKINGEVENT_RESUME, 70 80 /** 32bit hack */ 71 81 PDMACEPFILEAIOMGRBLOCKINGEVENT_32BIT_HACK = 0x7fffffff 72 82 } PDMACEPFILEAIOMGRBLOCKINGEVENT; 83 84 /** 85 * States of the I/O manager. 86 */ 87 typedef enum PDMACEPFILEMGRSTATE 88 { 89 /** Invalid state. */ 90 PDMACEPFILEMGRSTATE_INVALID = 0, 91 /** Normal running state accepting new requests 92 * and processing them. 93 */ 94 PDMACEPFILEMGRSTATE_RUNNING, 95 /** Fault state - not accepting new tasks for endpoints but waiting for 96 * remaining ones to finish. 97 */ 98 PDMACEPFILEMGRSTATE_FAULT, 99 /** Suspending state - not accepting new tasks for endpoints but waiting 100 * for remaining ones to finish. 101 */ 102 PDMACEPFILEMGRSTATE_SUSPENDING, 103 /** Shutdown state - not accepting new tasks for endpoints but waiting 104 * for remaining ones to finish. 105 */ 106 PDMACEPFILEMGRSTATE_SHUTDOWN, 107 /** 32bit hack */ 108 PDMACEPFILEMGRSTATE_32BIT_HACK = 0x7fffffff 109 } PDMACEPFILEMGRSTATE; 73 110 74 111 /** … … 81 118 /** Previous Aio manager in the list. */ 82 119 R3PTRTYPE(struct PDMACEPFILEMGR *) pPrev; 120 /** Current state of the manager. */ 121 PDMACEPFILEMGRSTATE enmState; 83 122 /** Event semaphore the manager sleeps on when waiting for new requests. */ 84 123 RTSEMEVENT EventSem; … … 91 130 /** The async I/O context for this manager. */ 92 131 RTFILEAIOCTX hAioCtx; 93 /** Flag whether the I/O manager is requested to terminate */94 volatile bool fShutdown;95 132 /** Flag whether the I/O manager was woken up. */ 96 133 volatile bool fWokenUp; … … 99 136 /** Number of requests active currently. */ 100 137 unsigned cRequestsActive; 138 /** Pointer to an array of free async I/O request handles. */ 139 RTFILEAIOREQ *pahReqsFree; 140 /** Next free position for a free request handle. */ 141 unsigned iFreeEntryNext; 142 /** Position of the next free task handle */ 143 unsigned iFreeReqNext; 144 /** Size of the array. */ 145 unsigned cReqEntries; 101 146 /** Critical section protecting the blocking event handling. */ 102 147 RTCRITSECT CritSectBlockingEvent; … … 139 184 140 185 /** 186 * Data for one request segment waiting for cache entry. 187 */ 188 typedef struct PDMACFILETASKSEG 189 { 190 /** Next task segment in the list. */ 191 struct PDMACFILETASKSEG *pNext; 192 /** Task this segment is for. */ 193 PPDMASYNCCOMPLETIONTASKFILE pTask; 194 /** Offset into the cache entry buffer to start reading from. */ 195 uint32_t uBufOffset; 196 /** Number of bytes to transfer. */ 197 size_t cbTransfer; 198 /** Pointer to the buffer. */ 199 void *pvBuf; 200 /** Flag whether this entry writes data to the cache. */ 201 bool fWrite; 202 } PDMACFILETASKSEG, *PPDMACFILETASKSEG; 203 204 /** 205 * A cache entry 206 */ 207 typedef struct PDMACFILECACHEENTRY 208 { 209 /** The AVL entry data. */ 210 AVLRFOFFNODECORE Core; 211 /** Pointer to the previous element. Used in one of the LRU lists.*/ 212 struct PDMACFILECACHEENTRY *pPrev; 213 /** Pointer to the next element. Used in one of the LRU lists.*/ 214 struct PDMACFILECACHEENTRY *pNext; 215 /** Pointer to the list the entry is in. */ 216 PPDMACFILELRULIST pList; 217 /** Pointer to the global cache structure. */ 218 PPDMACFILECACHEGLOBAL pCache; 219 /** Endpoint the entry belongs to. */ 220 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint; 221 /** Flags for this entry. Combinations of PDMACFILECACHE_* #defines */ 222 uint32_t fFlags; 223 /** Size of the entry. */ 224 size_t cbData; 225 /** Pointer to the memory containing the data. */ 226 uint8_t *pbData; 227 /** List of tasks waiting for this one to finish. */ 228 PPDMACFILETASKSEG pHead; 229 } PDMACFILECACHEENTRY, *PPDMACFILECACHEENTRY; 230 /** I/O is still in progress for this entry. This entry is not evictable. */ 231 #define PDMACFILECACHE_ENTRY_IO_IN_PROGRESS RT_BIT(0) 232 /** Entry is locked and thus not evictable. */ 233 #define PDMACFILECACHE_ENTRY_LOCKED RT_BIT(1) 234 /** Entry is dirty */ 235 #define PDMACFILECACHE_ENTRY_IS_DIRTY RT_BIT(2) 236 /** Entry is not evictable. */ 237 #define PDMACFILECACHE_NOT_EVICTABLE (PDMACFILECACHE_ENTRY_LOCKED | PDMACFILECACHE_IO_IN_PROGRESS) 238 239 /** 240 * LRU list data 241 */ 242 typedef struct PDMACFILELRULIST 243 { 244 /** Head of the list. */ 245 PPDMACFILECACHEENTRY pHead; 246 /** Tail of the list. */ 247 PPDMACFILECACHEENTRY pTail; 248 /** Number of bytes cached in the list. */ 249 uint32_t cbCached; 250 } PDMACFILELRULIST; 251 252 /** 253 * Global cache data. 254 */ 255 typedef struct PDMACFILECACHEGLOBAL 256 { 257 /** Maximum size of the cache in bytes. */ 258 uint32_t cbMax; 259 /** Current size of the cache in bytes. */ 260 uint32_t cbCached; 261 /** Critical section protecting the cache. */ 262 RTCRITSECT CritSect; 263 /** Adaption parameter (p) */ 264 uint32_t uAdaptVal; 265 /** LRU list for recently used entries (T1) */ 266 PDMACFILELRULIST LruRecentlyUsed; 267 /** LRU list for frequently used entries (T2) */ 268 PDMACFILELRULIST LruFrequentlyUsed; 269 /** LRU list for recently evicted entries (B1) */ 270 PDMACFILELRULIST LruRecentlyGhost; 271 /** LRU list for evicted entries from T2 (B2) */ 272 PDMACFILELRULIST LruFrequentlyGhost; 273 #ifdef VBOX_WITH_STATISTICS 274 /** Hit counter. */ 275 STAMCOUNTER cHits; 276 /** Partial hit counter. */ 277 STAMCOUNTER cPartialHits; 278 /** Miss counter. */ 279 STAMCOUNTER cMisses; 280 /** Bytes read from cache. */ 281 STAMCOUNTER StatRead; 282 /** Bytes written to the cache. */ 283 STAMCOUNTER StatWritten; 284 /** Time spend to get an entry in the AVL tree. */ 285 STAMPROFILEADV StatTreeGet; 286 /** Time spend to insert an entry in the AVL tree. */ 287 STAMPROFILEADV StatTreeInsert; 288 /** Time spend to remove an entry in the AVL tree. */ 289 STAMPROFILEADV StatTreeRemove; 290 #endif 291 } PDMACFILECACHEGLOBAL; 292 293 /** 294 * Per endpoint cache data. 295 */ 296 typedef struct PDMACFILEENDPOINTCACHE 297 { 298 /** AVL tree managing cache entries. */ 299 PAVLRFOFFTREE pTree; 300 /** Critical section protecting the tree. */ 301 RTCRITSECT CritSect; 302 /** Pointer to the gobal cache data */ 303 PPDMACFILECACHEGLOBAL pCache; 304 } PDMACFILEENDPOINTCACHE, *PPDMACFILEENDPOINTCACHE; 305 306 /** 141 307 * Global data for the file endpoint class. 142 308 */ … … 154 320 unsigned cAioMgrs; 155 321 /** Maximum number of segments to cache per endpoint */ 156 unsigned c SegmentsCacheMax;322 unsigned cTasksCacheMax; 157 323 /** Maximum number of simultaneous outstandingrequests. */ 158 324 uint32_t cReqsOutstandingMax; 159 325 /** Bitmask for checking the alignment of a buffer. */ 160 326 RTR3UINTPTR uBitmaskAlignment; 327 /** Global cache data. */ 328 PDMACFILECACHEGLOBAL Cache; 161 329 } PDMASYNCCOMPLETIONEPCLASSFILE; 162 330 /** Pointer to the endpoint class data. */ … … 174 342 175 343 /** 344 * States of the endpoint. 345 */ 346 typedef enum PDMASYNCCOMPLETIONENDPOINTFILESTATE 347 { 348 /** Invalid state. */ 349 PDMASYNCCOMPLETIONENDPOINTFILESTATE_INVALID = 0, 350 /** Normal running state accepting new requests 351 * and processing them. 352 */ 353 PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE, 354 /** The endpoint is about to be closed - not accepting new tasks for endpoints but waiting for 355 * remaining ones to finish. 356 */ 357 PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING, 358 /** Removing from current I/O manager state - not processing new tasks for endpoints but waiting 359 * for remaining ones to finish. 360 */ 361 PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING, 362 /** The current endpoint will be migrated to another I/O manager. */ 363 PDMASYNCCOMPLETIONENDPOINTFILESTATE_MIGRATING, 364 /** 32bit hack */ 365 PDMASYNCCOMPLETIONENDPOINTFILESTATE_32BIT_HACK = 0x7fffffff 366 } PDMASYNCCOMPLETIONENDPOINTFILESTATE; 367 368 /** 176 369 * Data for the file endpoint. 177 370 */ … … 180 373 /** Common data. */ 181 374 PDMASYNCCOMPLETIONENDPOINT Core; 375 /** Current state of the endpoint. */ 376 PDMASYNCCOMPLETIONENDPOINTFILESTATE enmState; 182 377 /** async I/O manager this endpoint is assigned to. */ 183 378 R3PTRTYPE(volatile PPDMACEPFILEMGR) pAioMgr; 184 /** Filename */185 char *pszFilename;186 379 /** Flags for opening the file. */ 187 380 unsigned fFlags; 188 381 /** File handle. */ 189 382 RTFILE File; 383 /** Size of the underlying file. 384 * Updated while data is appended. */ 385 uint64_t cbFile; 190 386 /** Flag whether caching is enabled for this file. */ 191 387 bool fCaching; 388 /** Flag whether the file was opened readonly. */ 389 bool fReadonly; 192 390 /** List of new tasks. */ 193 R3PTRTYPE(volatile PPDMA SYNCCOMPLETIONTASK)pTasksNewHead;391 R3PTRTYPE(volatile PPDMACTASKFILE) pTasksNewHead; 194 392 195 393 /** Head of the small cache for allocated task segments for exclusive 196 394 * use by this endpoint. */ 197 R3PTRTYPE(volatile PPDMACTASKFILE SEG) pSegmentsFreeHead;395 R3PTRTYPE(volatile PPDMACTASKFILE) pTasksFreeHead; 198 396 /** Tail of the small cache for allocated task segments for exclusive 199 397 * use by this endpoint. */ 200 R3PTRTYPE(volatile PPDMACTASKFILE SEG) pSegmentsFreeTail;398 R3PTRTYPE(volatile PPDMACTASKFILE) pTasksFreeTail; 201 399 /** Number of elements in the cache. */ 202 volatile uint32_t cSegmentsCached; 400 volatile uint32_t cTasksCached; 401 402 /** Cache of endpoint data. */ 403 PDMACFILEENDPOINTCACHE DataCache; 203 404 204 405 /** Flag whether a flush request is currently active */ 205 R3PTRTYPE(PPDMASYNCCOMPLETIONTASKFILE) pFlushReq; 206 207 /** Flag whether the endpoint is currently closed or removed from 208 * the active endpoint. */ 209 bool fRemovedOrClosed; 406 PPDMACTASKFILE pFlushReq; 210 407 211 408 /** Event sempahore for blocking external events. … … 225 422 { 226 423 /** The task to cancel. */ 227 PPDMA SYNCCOMPLETIONTASKpTask;424 PPDMACTASKFILE pTask; 228 425 } Cancel; 229 426 } BlockingEventData; … … 237 434 /** List of pending requests (not submitted due to usage restrictions 238 435 * or a pending flush request) */ 239 R3PTRTYPE(PPDMA SYNCCOMPLETIONTASK)pReqsPendingHead;436 R3PTRTYPE(PPDMACTASKFILE) pReqsPendingHead; 240 437 /** Tail of pending requests. */ 241 R3PTRTYPE(PPDMA SYNCCOMPLETIONTASK)pReqsPendingTail;438 R3PTRTYPE(PPDMACTASKFILE) pReqsPendingTail; 242 439 /** Number of requests currently being processed for this endpoint 243 440 * (excluded flush requests). */ 244 441 unsigned cRequestsActive; 442 /** Number of requests processed during the last second. */ 443 unsigned cReqsPerSec; 444 /** Current number of processed requests for the current update period. */ 445 unsigned cReqsProcessed; 245 446 } AioMgr; 246 447 } PDMASYNCCOMPLETIONENDPOINTFILE; … … 248 449 typedef PDMASYNCCOMPLETIONENDPOINTFILE *PPDMASYNCCOMPLETIONENDPOINTFILE; 249 450 250 /** 251 * Segment data of a request. 252 */ 253 typedef struct PDMACTASKFILESEG 254 { 255 /** Pointer to the next segment in the list. */ 256 R3PTRTYPE(struct PDMACTASKFILESEG *) pNext; 257 /** Pointer to the previous segment in the list. */ 258 R3PTRTYPE(struct PDMACTASKFILESEG *) pPrev; 259 /** Pointer to the task owning the segment. */ 260 R3PTRTYPE(PPDMASYNCCOMPLETIONTASKFILE) pTask; 261 /** Data segment. */ 262 PDMDATASEG DataSeg; 263 /** Flag whether this segment uses a bounce buffer 264 * because the provided buffer doesn't meet host requirements. */ 265 bool fBounceBuffer; 266 /** Pointer to the used bounce buffer if any. */ 267 void *pvBounceBuffer; 268 /** AIO request */ 269 RTFILEAIOREQ hAioReq; 270 } PDMACTASKFILESEG; 451 /** Request completion function */ 452 typedef DECLCALLBACK(void) FNPDMACTASKCOMPLETED(PPDMACTASKFILE pTask, void *pvUser); 453 /** Pointer to a request completion function. */ 454 typedef FNPDMACTASKCOMPLETED *PFNPDMACTASKCOMPLETED; 271 455 272 456 /** … … 286 470 287 471 /** 472 * Data of a request. 473 */ 474 typedef struct PDMACTASKFILE 475 { 476 /** next task in the list. */ 477 struct PDMACTASKFILE *pNext; 478 /** Endpoint */ 479 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint; 480 /** Transfer type. */ 481 PDMACTASKFILETRANSFER enmTransferType; 482 /** Start offset */ 483 RTFOFF Off; 484 /** Data segment. */ 485 PDMDATASEG DataSeg; 486 /** Flag whether this segment uses a bounce buffer 487 * because the provided buffer doesn't meet host requirements. */ 488 bool fBounceBuffer; 489 /** Pointer to the used bounce buffer if any. */ 490 void *pvBounceBuffer; 491 /** Completion function to call on completion. */ 492 PFNPDMACTASKCOMPLETED pfnCompleted; 493 /** User data */ 494 void *pvUser; 495 } PDMACTASKFILE; 496 497 /** 288 498 * Per task data. 289 499 */ … … 292 502 /** Common data. */ 293 503 PDMASYNCCOMPLETIONTASK Core; 294 /** Transfer type. */ 295 PDMACTASKFILETRANSFER enmTransferType; 296 /** Type dependent data. */ 297 union 298 { 299 /** Data for a data transfer */ 300 struct 301 { 302 /** Start offset. */ 303 RTFOFF off; 304 /** Number of bytes to transfer. */ 305 size_t cbTransfer; 306 /** Number of segments which still needs to be processed before the task 307 * completes. */ 308 unsigned cSegments; 309 /** Head of the request segments list for read and write requests. */ 310 PPDMACTASKFILESEG pSegmentsHead; 311 } DataTransfer; 312 } u; 504 /** Number of bytes to transfer until this task completes. */ 505 volatile int32_t cbTransferLeft; 506 /** Flag whether the task completed. */ 507 volatile bool fCompleted; 313 508 } PDMASYNCCOMPLETIONTASKFILE; 314 509 … … 319 514 void pdmacFileAioMgrNormalDestroy(PPDMACEPFILEMGR pAioMgr); 320 515 321 PPDMASYNCCOMPLETIONTASK pdmacFileEpGetNewTasks(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint); 322 PPDMACTASKFILESEG pdmacFileSegmentAlloc(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint); 323 void pdmacFileSegmentFree(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, 324 PPDMACTASKFILESEG pSeg); 516 PPDMACTASKFILE pdmacFileEpGetNewTasks(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint); 517 PPDMACTASKFILE pdmacFileTaskAlloc(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint); 518 void pdmacFileTaskFree(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, 519 PPDMACTASKFILE pTask); 520 521 int pdmacFileEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask); 522 523 void pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser); 524 525 int pdmacFileCacheInit(PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile, PCFGMNODE pCfgNode); 526 void pdmacFileCacheDestroy(PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile); 527 int pdmacFileEpCacheInit(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile); 528 void pdmacFileEpCacheDestroy(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint); 529 530 int pdmacFileEpCacheRead(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONTASKFILE pTask, 531 RTFOFF off, PCPDMDATASEG paSegments, size_t cSegments, 532 size_t cbRead); 533 int pdmacFileEpCacheWrite(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMASYNCCOMPLETIONTASKFILE pTask, 534 RTFOFF off, PCPDMDATASEG paSegments, size_t cSegments, 535 size_t cbWrite); 325 536 326 537 RT_C_DECLS_END -
trunk/src/VBox/VMM/PDMAsyncCompletionFileNormal.cpp
r21496 r22309 29 29 #include "PDMAsyncCompletionFileInternal.h" 30 30 31 /** The update period for the I/O load statistics in ms. */ 32 #define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 1000 33 /** Maximum number of requests a manager will handle. */ 34 #define PDMACEPFILEMGR_REQS_MAX 512 /* @todo: Find better solution wrt. the request number*/ 35 31 36 int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr) 32 37 { … … 35 40 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS); 36 41 if (rc == VERR_OUT_OF_RANGE) 37 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, 128); /* @todo: Find better solution wrt. the request number*/ 42 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, PDMACEPFILEMGR_REQS_MAX); 43 44 if (RT_SUCCESS(rc)) 45 { 46 /* Initialize request handle array. */ 47 pAioMgr->iFreeEntryNext = 0; 48 pAioMgr->iFreeReqNext = 0; 49 pAioMgr->cReqEntries = PDMACEPFILEMGR_REQS_MAX + 1; 50 pAioMgr->pahReqsFree = (RTFILEAIOREQ *)RTMemAllocZ(pAioMgr->cReqEntries * sizeof(RTFILEAIOREQ)); 51 52 if (pAioMgr->pahReqsFree) 53 { 54 return VINF_SUCCESS; 55 } 56 else 57 { 58 RTFileAioCtxDestroy(pAioMgr->hAioCtx); 59 rc = VERR_NO_MEMORY; 60 } 61 } 38 62 39 63 return rc; … … 43 67 { 44 68 RTFileAioCtxDestroy(pAioMgr->hAioCtx); 69 70 while (pAioMgr->iFreeReqNext != pAioMgr->iFreeEntryNext) 71 { 72 RTFileAioReqDestroy(pAioMgr->pahReqsFree[pAioMgr->iFreeReqNext]); 73 pAioMgr->iFreeReqNext = (pAioMgr->iFreeReqNext + 1) % pAioMgr->cReqEntries; 74 } 75 76 RTMemFree(pAioMgr->pahReqsFree); 45 77 } 46 78 … … 52 84 * @param rc The error code. 53 85 */ 54 static int pdmacFileAioMgrNormalErrorHandler(PPDMACEPFILEMGR pAioMgr, int rc) 55 { 86 static int pdmacFileAioMgrNormalErrorHandler(PPDMACEPFILEMGR pAioMgr, int rc, RT_SRC_POS_DECL) 87 { 88 LogRel(("AIOMgr: I/O manager %#p encountered a critical error (rc=%Rrc) during operation. Falling back to failsafe mode. Expect reduced performance\n", 89 pAioMgr, rc)); 90 LogRel(("AIOMgr: Error happened in %s:(%u){%s}\n", RT_SRC_POS_ARGS)); 91 LogRel(("AIOMgr: Please contact the product vendor\n")); 92 93 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass; 94 95 pAioMgr->enmState = PDMACEPFILEMGRSTATE_FAULT; 96 ASMAtomicWriteBool(&pEpClassFile->fFailsafe, true); 97 56 98 AssertMsgFailed(("Implement\n")); 57 99 return VINF_SUCCESS; 58 100 } 59 101 60 static int pdmacFileAioMgrNormalProcessTaskList(PPDMA SYNCCOMPLETIONTASKpTaskHead,102 static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead, 61 103 PPDMACEPFILEMGR pAioMgr, 62 104 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint) … … 67 109 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass; 68 110 111 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE, 112 ("Trying to process request lists of a non active endpoint!\n")); 113 69 114 /* Go through the list and queue the requests until we get a flush request */ 70 115 while (pTaskHead && !pEndpoint->pFlushReq) 71 116 { 72 PPDMA SYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pTaskHead;117 PPDMACTASKFILE pCurr = pTaskHead; 73 118 74 119 pTaskHead = pTaskHead->pNext; 75 120 76 switch (pTaskFile->enmTransferType) 121 AssertMsg(VALID_PTR(pCurr->pEndpoint) && (pCurr->pEndpoint == pEndpoint), 122 ("Endpoints do not match\n")); 123 124 switch (pCurr->enmTransferType) 77 125 { 78 126 case PDMACTASKFILETRANSFER_FLUSH: … … 81 129 if (!pEndpoint->AioMgr.cRequestsActive) 82 130 { 83 /* Task completed. Notify owner */84 pdm R3AsyncCompletionCompleteTask(&pTaskFile->Core);131 pCurr->pfnCompleted(pCurr, pCurr->pvUser); 132 pdmacFileTaskFree(pEndpoint, pCurr); 85 133 } 86 134 else 87 135 { 88 pEndpoint->pFlushReq = p TaskFile;136 pEndpoint->pFlushReq = pCurr; 89 137 90 138 if (pTaskHead) … … 114 162 case PDMACTASKFILETRANSFER_WRITE: 115 163 { 116 PPDMACTASKFILESEG pSeg = pTaskFile->u.DataTransfer.pSegmentsHead; 117 RTFOFF offCurr = pTaskFile->u.DataTransfer.off; 118 size_t cbTransfer = pTaskFile->u.DataTransfer.cbTransfer; 119 120 AssertPtr(pSeg); 121 122 do 123 { 124 void *pvBuf = pSeg->DataSeg.pvSeg; 125 126 rc = RTFileAioReqCreate(&pSeg->hAioReq); 164 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ; 165 void *pvBuf = pCurr->DataSeg.pvSeg; 166 167 /* Get a request handle. */ 168 if (pAioMgr->iFreeReqNext != pAioMgr->iFreeEntryNext) 169 { 170 hReq = pAioMgr->pahReqsFree[pAioMgr->iFreeReqNext]; 171 pAioMgr->pahReqsFree[pAioMgr->iFreeReqNext] = NIL_RTFILEAIOREQ; 172 pAioMgr->iFreeReqNext = (pAioMgr->iFreeReqNext + 1) % pAioMgr->cReqEntries; 173 } 174 else 175 { 176 rc = RTFileAioReqCreate(&hReq); 127 177 AssertRC(rc); 128 129 /* Check if the alignment requirements are met. */ 130 if ((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) != (RTR3UINTPTR)pvBuf) 131 { 132 /* Create bounce buffer. */ 133 pSeg->fBounceBuffer = true; 134 135 /** @todo: I think we need something like a RTMemAllocAligned method here. 136 * Current assumption is that the maximum alignment is 4096byte 137 * (GPT disk on Windows) 138 * so we can use RTMemPageAlloc here. 139 */ 140 pSeg->pvBounceBuffer = RTMemPageAlloc(pSeg->DataSeg.cbSeg); 141 AssertPtr(pSeg->pvBounceBuffer); 142 pvBuf = pSeg->pvBounceBuffer; 143 144 if (pTaskFile->enmTransferType == PDMACTASKFILETRANSFER_WRITE) 145 memcpy(pvBuf, pSeg->DataSeg.pvSeg, pSeg->DataSeg.cbSeg); 146 } 147 else 148 pSeg->fBounceBuffer = false; 149 150 AssertMsg((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) == (RTR3UINTPTR)pvBuf, 151 ("AIO: Alignment restrictions not met!\n")); 152 153 if (pTaskFile->enmTransferType == PDMACTASKFILETRANSFER_WRITE) 154 rc = RTFileAioReqPrepareWrite(pSeg->hAioReq, pEndpoint->File, 155 offCurr, pvBuf, pSeg->DataSeg.cbSeg, pSeg); 156 else 157 rc = RTFileAioReqPrepareRead(pSeg->hAioReq, pEndpoint->File, 158 offCurr, pvBuf, pSeg->DataSeg.cbSeg, pSeg); 159 AssertRC(rc); 160 161 apReqs[cRequests] = pSeg->hAioReq; 162 cRequests++; 163 if (cRequests == RT_ELEMENTS(apReqs)) 164 { 165 pAioMgr->cRequestsActive += cRequests; 166 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, apReqs, cRequests); 167 if (RT_FAILURE(rc)) 168 { 169 /* @todo implement */ 170 AssertMsgFailed(("Implement\n")); 171 } 172 173 cRequests = 0; 174 } 175 176 offCurr += pSeg->DataSeg.cbSeg; 177 cbTransfer -= pSeg->DataSeg.cbSeg; 178 pSeg = pSeg->pNext; 179 } while (pSeg && RT_SUCCESS(rc)); 180 181 AssertMsg(!cbTransfer, ("Incomplete transfer cbTransfer=%u\n", cbTransfer)); 182 178 } 179 180 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n")); 181 182 /* Check if the alignment requirements are met. */ 183 if ((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) != (RTR3UINTPTR)pvBuf) 184 { 185 /* Create bounce buffer. */ 186 pCurr->fBounceBuffer = true; 187 188 /** @todo: I think we need something like a RTMemAllocAligned method here. 189 * Current assumption is that the maximum alignment is 4096byte 190 * (GPT disk on Windows) 191 * so we can use RTMemPageAlloc here. 192 */ 193 pCurr->pvBounceBuffer = RTMemPageAlloc(pCurr->DataSeg.cbSeg); 194 AssertPtr(pCurr->pvBounceBuffer); 195 pvBuf = pCurr->pvBounceBuffer; 196 197 if (pCurr->enmTransferType == PDMACTASKFILETRANSFER_WRITE) 198 memcpy(pvBuf, pCurr->DataSeg.pvSeg, pCurr->DataSeg.cbSeg); 199 } 200 else 201 pCurr->fBounceBuffer = false; 202 203 AssertMsg((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) == (RTR3UINTPTR)pvBuf, 204 ("AIO: Alignment restrictions not met!\n")); 205 206 if (pCurr->enmTransferType == PDMACTASKFILETRANSFER_WRITE) 207 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File, 208 pCurr->Off, pvBuf, pCurr->DataSeg.cbSeg, pCurr); 209 else 210 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File, 211 pCurr->Off, pvBuf, pCurr->DataSeg.cbSeg, pCurr); 212 AssertRC(rc); 213 214 apReqs[cRequests] = hReq; 215 pEndpoint->AioMgr.cReqsProcessed++; 216 cRequests++; 217 if (cRequests == RT_ELEMENTS(apReqs)) 218 { 219 pAioMgr->cRequestsActive += cRequests; 220 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, apReqs, cRequests); 221 if (RT_FAILURE(rc)) 222 { 223 /* @todo implement */ 224 AssertMsgFailed(("Implement\n")); 225 } 226 227 cRequests = 0; 228 } 183 229 break; 184 230 } 185 231 default: 186 AssertMsgFailed(("Invalid transfer type %d\n", p TaskFile->enmTransferType));232 AssertMsgFailed(("Invalid transfer type %d\n", pCurr->enmTransferType)); 187 233 } 188 234 } … … 194 240 if (RT_FAILURE(rc)) 195 241 { 242 /* Not enough ressources on this context anymore. */ 196 243 /* @todo implement */ 197 244 AssertMsgFailed(("Implement\n")); … … 215 262 { 216 263 int rc = VINF_SUCCESS; 217 PPDMASYNCCOMPLETIONTASK pTasksHead = NULL; 264 PPDMACTASKFILE pTasksHead = NULL; 265 266 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE, 267 ("Trying to process request lists of a non active endpoint!\n")); 218 268 219 269 Assert(!pEndpoint->pFlushReq); … … 225 275 /* 226 276 * Clear the list as the processing routine will insert them into the list 227 * again if it gets a flush request.277 * again if it gets a flush request. 228 278 */ 229 279 pEndpoint->AioMgr.pReqsPendingHead = NULL; … … 250 300 { 251 301 int rc = VINF_SUCCESS; 252 bool fNotifyWaiter = true;302 bool fNotifyWaiter = false; 253 303 254 304 Assert(pAioMgr->fBlockingEventPending); … … 260 310 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointNew = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint); 261 311 AssertMsg(VALID_PTR(pEndpointNew), ("Adding endpoint event without a endpoint to add\n")); 312 313 pEndpointNew->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE; 262 314 263 315 pEndpointNew->AioMgr.pEndpointNext = pAioMgr->pEndpointsHead; … … 269 321 /* Assign the completion point to this file. */ 270 322 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pEndpointNew->File); 323 fNotifyWaiter = true; 271 324 break; 272 325 } … … 278 331 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev; 279 332 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointRemove->AioMgr.pEndpointNext; 333 334 pEndpointRemove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING; 280 335 281 336 if (pPrev) … … 294 349 /* Reopen the file so that the new endpoint can reassociate with the file */ 295 350 RTFileClose(pEndpointRemove->File); 296 rc = RTFileOpen(&pEndpointRemove->File, pEndpointRemove-> pszFilename, pEndpointRemove->fFlags);351 rc = RTFileOpen(&pEndpointRemove->File, pEndpointRemove->Core.pszUri, pEndpointRemove->fFlags); 297 352 AssertRC(rc); 298 }299 else300 {301 /* Mark the endpoint as removed and wait until all pending requests are finished. */302 pEndpointRemove->fRemovedOrClosed = true;303 304 /* We can't release the waiting thread here. */305 fNotifyWaiter = false;306 353 } 307 354 break; … … 316 363 AssertRC(rc); 317 364 365 pEndpointClose->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING; 366 367 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointClose->AioMgr.pEndpointPrev; 368 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointClose->AioMgr.pEndpointNext; 369 370 if (pPrev) 371 pPrev->AioMgr.pEndpointNext = pNext; 372 else 373 pAioMgr->pEndpointsHead = pNext; 374 375 if (pNext) 376 pNext->AioMgr.pEndpointPrev = pPrev; 377 318 378 if (!pEndpointClose->AioMgr.cRequestsActive) 319 379 { … … 322 382 /* Reopen the file to deassociate it from the endpoint. */ 323 383 RTFileClose(pEndpointClose->File); 324 rc = RTFileOpen(&pEndpointClose->File, pEndpointClose-> pszFilename, pEndpointClose->fFlags);384 rc = RTFileOpen(&pEndpointClose->File, pEndpointClose->Core.pszUri, pEndpointClose->fFlags); 325 385 AssertRC(rc); 326 } 327 else 328 { 329 /* Mark the endpoint as removed and wait until all pending requests are finished. */ 330 pEndpointClose->fRemovedOrClosed = true; 331 332 /* We can't release the waiting thread here. */ 333 fNotifyWaiter = false; 386 fNotifyWaiter = true; 334 387 } 335 388 break; 336 389 } 337 390 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN: 391 { 392 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SHUTDOWN; 393 if (!pAioMgr->cRequestsActive) 394 fNotifyWaiter = true; 338 395 break; 396 } 339 397 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SUSPEND: 398 { 399 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SUSPENDING; 340 400 break; 401 } 402 case PDMACEPFILEAIOMGRBLOCKINGEVENT_RESUME: 403 { 404 pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING; 405 fNotifyWaiter = true; 406 break; 407 } 341 408 default: 342 409 AssertReleaseMsgFailed(("Invalid event type %d\n", pAioMgr->enmBlockingEvent)); … … 346 413 { 347 414 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false); 415 pAioMgr->enmBlockingEvent = PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID; 348 416 349 417 /* Release the waiting thread. */ … … 359 427 if (RT_FAILURE(rc)) \ 360 428 {\ 361 int rc2 = pdmacFileAioMgrNormalErrorHandler(pAioMgr, rc );\429 int rc2 = pdmacFileAioMgrNormalErrorHandler(pAioMgr, rc, RT_SRC_POS);\ 362 430 return rc2;\ 363 431 } … … 372 440 int pdmacFileAioMgrNormal(RTTHREAD ThreadSelf, void *pvUser) 373 441 { 374 int rc = VINF_SUCCESS;442 int rc = VINF_SUCCESS; 375 443 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser; 376 377 while (!pAioMgr->fShutdown) 444 uint64_t uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD; 445 446 while ( (pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING) 447 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING)) 378 448 { 379 449 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, true); … … 384 454 385 455 LogFlow(("Got woken up\n")); 456 ASMAtomicWriteBool(&pAioMgr->fWokenUp, false); 386 457 387 458 /* Check for an external blocking event first. */ … … 392 463 } 393 464 394 /* Check the assigned endpoints for new tasks if there isn't a flush request active at the moment. */395 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pAioMgr->pEndpointsHead;396 397 while (pEndpoint)398 { 399 if (!pEndpoint->pFlushReq)465 if (RT_LIKELY(pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING)) 466 { 467 /* Check the assigned endpoints for new tasks if there isn't a flush request active at the moment. */ 468 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pAioMgr->pEndpointsHead; 469 470 while (pEndpoint) 400 471 { 401 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint); 472 if (!pEndpoint->pFlushReq && (pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)) 473 { 474 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint); 475 CHECK_RC(pAioMgr, rc); 476 } 477 478 pEndpoint = pEndpoint->AioMgr.pEndpointNext; 479 } 480 481 while (pAioMgr->cRequestsActive) 482 { 483 RTFILEAIOREQ apReqs[20]; 484 uint32_t cReqsCompleted = 0; 485 486 rc = RTFileAioCtxWait(pAioMgr->hAioCtx, 1, RT_INDEFINITE_WAIT, apReqs, 487 RT_ELEMENTS(apReqs), &cReqsCompleted); 402 488 CHECK_RC(pAioMgr, rc); 489 490 for (uint32_t i = 0; i < cReqsCompleted; i++) 491 { 492 size_t cbTransfered = 0; 493 int rcReq = RTFileAioReqGetRC(apReqs[i], &cbTransfered); 494 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(apReqs[i]); 495 496 pEndpoint = pTask->pEndpoint; 497 498 AssertMsg( RT_SUCCESS(rcReq) 499 && (cbTransfered == pTask->DataSeg.cbSeg), 500 ("Task didn't completed successfully (rc=%Rrc) or was incomplete (cbTransfered=%u)\n", rc, cbTransfered)); 501 502 if (pTask->fBounceBuffer) 503 { 504 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ) 505 memcpy(pTask->DataSeg.pvSeg, pTask->pvBounceBuffer, pTask->DataSeg.cbSeg); 506 507 RTMemPageFree(pTask->pvBounceBuffer); 508 } 509 510 /* Put the entry on the free array */ 511 pAioMgr->pahReqsFree[pAioMgr->iFreeEntryNext] = apReqs[i]; 512 pAioMgr->iFreeEntryNext = (pAioMgr->iFreeEntryNext + 1) %pAioMgr->cReqEntries; 513 514 pAioMgr->cRequestsActive--; 515 pEndpoint->AioMgr.cReqsProcessed++; 516 517 /* Call completion callback */ 518 pTask->pfnCompleted(pTask, pTask->pvUser); 519 pdmacFileTaskFree(pEndpoint, pTask); 520 521 /* 522 * If there is no request left on the endpoint but a flush request is set 523 * it completed now and we notify the owner. 524 * Furthermore we look for new requests and continue. 525 */ 526 if (!pEndpoint->AioMgr.cRequestsActive && pEndpoint->pFlushReq) 527 { 528 /* Call completion callback */ 529 pTask = pEndpoint->pFlushReq; 530 pEndpoint->pFlushReq = NULL; 531 532 AssertMsg(pTask->pEndpoint == pEndpoint, ("Endpoint of the flush request does not match assigned one\n")); 533 534 pTask->pfnCompleted(pTask, pTask->pvUser); 535 pdmacFileTaskFree(pEndpoint, pTask); 536 } 537 538 if (pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE) 539 { 540 if (!pEndpoint->pFlushReq) 541 { 542 /* Check if there are events on the endpoint. */ 543 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint); 544 CHECK_RC(pAioMgr, rc); 545 } 546 } 547 else if (!pEndpoint->AioMgr.cRequestsActive) 548 { 549 Assert(pAioMgr->fBlockingEventPending); 550 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false); 551 552 /* Release the waiting thread. */ 553 rc = RTSemEventSignal(pAioMgr->EventSemBlock); 554 AssertRC(rc); 555 } 556 } 557 558 /* Check for an external blocking event before we go to sleep again. */ 559 if (pAioMgr->fBlockingEventPending) 560 { 561 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr); 562 CHECK_RC(pAioMgr, rc); 563 } 564 565 /* Update load statistics. */ 566 uint64_t uMillisCurr = RTTimeMilliTS(); 567 if (uMillisCurr > uMillisEnd) 568 { 569 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointCurr = pAioMgr->pEndpointsHead; 570 571 /* Calculate timespan. */ 572 uMillisCurr -= uMillisEnd; 573 574 while (pEndpointCurr) 575 { 576 pEndpointCurr->AioMgr.cReqsPerSec = pEndpointCurr->AioMgr.cReqsProcessed / (uMillisCurr + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD); 577 pEndpointCurr->AioMgr.cReqsProcessed = 0; 578 pEndpointCurr = pEndpointCurr->AioMgr.pEndpointNext; 579 } 580 581 /* Set new update interval */ 582 uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD; 583 } 403 584 } 404 405 pEndpoint = pEndpoint->AioMgr.pEndpointNext;406 }407 408 while (pAioMgr->cRequestsActive)409 {410 RTFILEAIOREQ apReqs[20];411 uint32_t cReqsCompleted = 0;412 413 rc = RTFileAioCtxWait(pAioMgr->hAioCtx, 1, RT_INDEFINITE_WAIT, apReqs,414 RT_ELEMENTS(apReqs), &cReqsCompleted);415 CHECK_RC(pAioMgr, rc);416 417 for (uint32_t i = 0; i < cReqsCompleted; i++)418 {419 size_t cbTransfered = 0;420 int rcReq = RTFileAioReqGetRC(apReqs[i], &cbTransfered);421 PPDMACTASKFILESEG pTaskSeg = (PPDMACTASKFILESEG)RTFileAioReqGetUser(apReqs[i]);422 PPDMASYNCCOMPLETIONTASKFILE pTask = pTaskSeg->pTask;423 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTask->Core.pEndpoint;424 425 AssertMsg( RT_SUCCESS(rcReq)426 && (cbTransfered == pTaskSeg->DataSeg.cbSeg),427 ("Task didn't completed successfully (rc=%Rrc) or was incomplete (cbTransfered=%u)\n", rc, cbTransfered));428 429 if (pTaskSeg->fBounceBuffer)430 {431 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)432 memcpy(pTaskSeg->DataSeg.pvSeg, pTaskSeg->pvBounceBuffer, pTaskSeg->DataSeg.cbSeg);433 434 RTMemPageFree(pTaskSeg->pvBounceBuffer);435 }436 437 pTask->u.DataTransfer.cSegments--;438 pAioMgr->cRequestsActive--;439 if (!pTask->u.DataTransfer.cSegments)440 {441 /* Free all segments. */442 PPDMACTASKFILESEG pSegCurr = pTask->u.DataTransfer.pSegmentsHead;443 while (pSegCurr)444 {445 PPDMACTASKFILESEG pSegFree = pSegCurr;446 447 pSegCurr = pSegCurr->pNext;448 449 RTFileAioReqDestroy(pSegFree->hAioReq);450 pdmacFileSegmentFree(pEndpoint, pSegFree);451 }452 453 pEndpoint->AioMgr.cRequestsActive--;454 455 /* Task completed. Notify owner */456 pdmR3AsyncCompletionCompleteTask(&pTask->Core);457 }458 459 /*460 * If there is no request left on the endpoint but a flush request is set461 * it completed now and we notify the owner.462 * Furthermore we look for new requests and continue.463 */464 if (!pEndpoint->AioMgr.cRequestsActive && pEndpoint->pFlushReq)465 {466 pdmR3AsyncCompletionCompleteTask(&pEndpoint->pFlushReq->Core);467 pEndpoint->pFlushReq = NULL;468 }469 470 if (!pEndpoint->fRemovedOrClosed)471 {472 if (!pEndpoint->pFlushReq)473 {474 /* Check if there are events on the endpoint. */475 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint);476 CHECK_RC(pAioMgr, rc);477 }478 }479 else if (!pEndpoint->AioMgr.cRequestsActive)480 {481 pEndpoint->fRemovedOrClosed = false;482 483 Assert(pAioMgr->fBlockingEventPending);484 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);485 486 /* Release the waiting thread. */487 rc = RTSemEventSignal(pAioMgr->EventSemBlock);488 AssertRC(rc);489 }490 }491 492 /* Check for an external blocking event before we go to sleep again. */493 if (pAioMgr->fBlockingEventPending)494 {495 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);496 CHECK_RC(pAioMgr, rc);497 }498 585 } 499 586 } -
trunk/src/VBox/VMM/PDMAsyncCompletionInternal.h
r20374 r22309 202 202 /** Next endpoint in the list. */ 203 203 R3PTRTYPE(PPDMASYNCCOMPLETIONENDPOINT) pNext; 204 /** previous endpoint in the list. */204 /** Previous endpoint in the list. */ 205 205 R3PTRTYPE(PPDMASYNCCOMPLETIONENDPOINT) pPrev; 206 206 /** Pointer to the class this endpoint belongs to. */ … … 222 222 /** Template associated with this endpoint. */ 223 223 PPDMASYNCCOMPLETIONTEMPLATE pTemplate; 224 /** Reference count. */ 225 unsigned cUsers; 226 /** URI describing the endpoint */ 227 char *pszUri; 224 228 } PDMASYNCCOMPLETIONENDPOINT; 225 229
Note:
See TracChangeset
for help on using the changeset viewer.