VirtualBox

source: vbox/trunk/src/VBox/VMM/PDMAsyncCompletionFileNormal.cpp@ 34079

Last change on this file since 34079 was 33595, checked in by vboxsync, 14 years ago

src/*: more spelling fixes (logging), thanks Timeless!

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 64.7 KB
Line 
1/* $Id: PDMAsyncCompletionFileNormal.cpp 33595 2010-10-29 10:35:00Z vboxsync $ */
2/** @file
3 * PDM Async I/O - Transport data asynchronous in R3 using EMT.
4 * Async File I/O manager.
5 */
6
7/*
8 * Copyright (C) 2006-2008 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
19#include <iprt/types.h>
20#include <iprt/asm.h>
21#include <iprt/file.h>
22#include <iprt/mem.h>
23#include <iprt/string.h>
24#include <iprt/assert.h>
25#include <VBox/log.h>
26
27#include "PDMAsyncCompletionFileInternal.h"
28
29/** The update period for the I/O load statistics in ms. */
30#define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 1000
31/** Maximum number of requests a manager will handle. */
32#define PDMACEPFILEMGR_REQS_STEP 512
33
34/*******************************************************************************
35* Internal functions *
36*******************************************************************************/
37static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
38 PPDMACEPFILEMGR pAioMgr,
39 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint);
40
41static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr,
42 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
43 PPDMACFILERANGELOCK pRangeLock);
44
45static void pdmacFileAioMgrNormalReqCompleteRc(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq,
46 int rc, size_t cbTransfered);
47
48int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr)
49{
50 int rc = VINF_SUCCESS;
51
52 pAioMgr->cRequestsActiveMax = PDMACEPFILEMGR_REQS_STEP;
53
54 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS);
55 if (rc == VERR_OUT_OF_RANGE)
56 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, pAioMgr->cRequestsActiveMax);
57
58 if (RT_SUCCESS(rc))
59 {
60 /* Initialize request handle array. */
61 pAioMgr->iFreeEntry = 0;
62 pAioMgr->cReqEntries = pAioMgr->cRequestsActiveMax;
63 pAioMgr->pahReqsFree = (RTFILEAIOREQ *)RTMemAllocZ(pAioMgr->cReqEntries * sizeof(RTFILEAIOREQ));
64
65 if (pAioMgr->pahReqsFree)
66 {
67 /* Create the range lock memcache. */
68 rc = RTMemCacheCreate(&pAioMgr->hMemCacheRangeLocks, sizeof(PDMACFILERANGELOCK),
69 0, UINT32_MAX, NULL, NULL, NULL, 0);
70 if (RT_SUCCESS(rc))
71 return VINF_SUCCESS;
72
73 RTMemFree(pAioMgr->pahReqsFree);
74 }
75 else
76 {
77 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
78 rc = VERR_NO_MEMORY;
79 }
80 }
81
82 return rc;
83}
84
85void pdmacFileAioMgrNormalDestroy(PPDMACEPFILEMGR pAioMgr)
86{
87 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
88
89 while (pAioMgr->iFreeEntry > 0)
90 {
91 pAioMgr->iFreeEntry--;
92 Assert(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] != NIL_RTFILEAIOREQ);
93 RTFileAioReqDestroy(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry]);
94 }
95
96 RTMemFree(pAioMgr->pahReqsFree);
97 RTMemCacheDestroy(pAioMgr->hMemCacheRangeLocks);
98}
99
100/**
101 * Sorts the endpoint list with insertion sort.
102 */
103static void pdmacFileAioMgrNormalEndpointsSortByLoad(PPDMACEPFILEMGR pAioMgr)
104{
105 PPDMASYNCCOMPLETIONENDPOINTFILE pEpPrev, pEpCurr, pEpNextToSort;
106
107 pEpPrev = pAioMgr->pEndpointsHead;
108 pEpCurr = pEpPrev->AioMgr.pEndpointNext;
109
110 while (pEpCurr)
111 {
112 /* Remember the next element to sort because the list might change. */
113 pEpNextToSort = pEpCurr->AioMgr.pEndpointNext;
114
115 /* Unlink the current element from the list. */
116 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
117 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
118
119 if (pPrev)
120 pPrev->AioMgr.pEndpointNext = pNext;
121 else
122 pAioMgr->pEndpointsHead = pNext;
123
124 if (pNext)
125 pNext->AioMgr.pEndpointPrev = pPrev;
126
127 /* Go back until we reached the place to insert the current endpoint into. */
128 while (pEpPrev && (pEpPrev->AioMgr.cReqsPerSec < pEpCurr->AioMgr.cReqsPerSec))
129 pEpPrev = pEpPrev->AioMgr.pEndpointPrev;
130
131 /* Link the endpoint into the list. */
132 if (pEpPrev)
133 pNext = pEpPrev->AioMgr.pEndpointNext;
134 else
135 pNext = pAioMgr->pEndpointsHead;
136
137 pEpCurr->AioMgr.pEndpointNext = pNext;
138 pEpCurr->AioMgr.pEndpointPrev = pEpPrev;
139
140 if (pNext)
141 pNext->AioMgr.pEndpointPrev = pEpCurr;
142
143 if (pEpPrev)
144 pEpPrev->AioMgr.pEndpointNext = pEpCurr;
145 else
146 pAioMgr->pEndpointsHead = pEpCurr;
147
148 pEpCurr = pEpNextToSort;
149 }
150
151#ifdef DEBUG
152 /* Validate sorting algorithm */
153 unsigned cEndpoints = 0;
154 pEpCurr = pAioMgr->pEndpointsHead;
155
156 AssertMsg(pEpCurr, ("No endpoint in the list?\n"));
157 AssertMsg(!pEpCurr->AioMgr.pEndpointPrev, ("First element in the list points to previous element\n"));
158
159 while (pEpCurr)
160 {
161 cEndpoints++;
162
163 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
164 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
165
166 Assert(!pNext || pNext->AioMgr.cReqsPerSec <= pEpCurr->AioMgr.cReqsPerSec);
167 Assert(!pPrev || pPrev->AioMgr.cReqsPerSec >= pEpCurr->AioMgr.cReqsPerSec);
168
169 pEpCurr = pNext;
170 }
171
172 AssertMsg(cEndpoints == pAioMgr->cEndpoints, ("Endpoints lost during sort!\n"));
173
174#endif
175}
176
177/**
178 * Removes an endpoint from the currently assigned manager.
179 *
180 * @returns TRUE if there are still requests pending on the current manager for this endpoint.
181 * FALSE otherwise.
182 * @param pEndpointRemove The endpoint to remove.
183 */
184static bool pdmacFileAioMgrNormalRemoveEndpoint(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove)
185{
186 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev;
187 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointRemove->AioMgr.pEndpointNext;
188 PPDMACEPFILEMGR pAioMgr = pEndpointRemove->pAioMgr;
189
190 pAioMgr->cEndpoints--;
191
192 if (pPrev)
193 pPrev->AioMgr.pEndpointNext = pNext;
194 else
195 pAioMgr->pEndpointsHead = pNext;
196
197 if (pNext)
198 pNext->AioMgr.pEndpointPrev = pPrev;
199
200 /* Make sure that there is no request pending on this manager for the endpoint. */
201 if (!pEndpointRemove->AioMgr.cRequestsActive)
202 {
203 Assert(!pEndpointRemove->pFlushReq);
204
205 /* Reopen the file so that the new endpoint can re-associate with the file */
206 RTFileClose(pEndpointRemove->File);
207 int rc = RTFileOpen(&pEndpointRemove->File, pEndpointRemove->Core.pszUri, pEndpointRemove->fFlags);
208 AssertRC(rc);
209 return false;
210 }
211
212 return true;
213}
214
215static bool pdmacFileAioMgrNormalIsBalancePossible(PPDMACEPFILEMGR pAioMgr)
216{
217 /* Balancing doesn't make sense with only one endpoint. */
218 if (pAioMgr->cEndpoints == 1)
219 return false;
220
221 /* Doesn't make sens to move endpoints if only one produces the whole load */
222 unsigned cEndpointsWithLoad = 0;
223
224 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead;
225
226 while (pCurr)
227 {
228 if (pCurr->AioMgr.cReqsPerSec)
229 cEndpointsWithLoad++;
230
231 pCurr = pCurr->AioMgr.pEndpointNext;
232 }
233
234 return (cEndpointsWithLoad > 1);
235}
236
237/**
238 * Creates a new I/O manager and spreads the I/O load of the endpoints
239 * between the given I/O manager and the new one.
240 *
241 * @returns nothing.
242 * @param pAioMgr The I/O manager with high I/O load.
243 */
244static void pdmacFileAioMgrNormalBalanceLoad(PPDMACEPFILEMGR pAioMgr)
245{
246 PPDMACEPFILEMGR pAioMgrNew = NULL;
247 int rc = VINF_SUCCESS;
248
249 /*
250 * Check if balancing would improve the situation.
251 */
252 if (pdmacFileAioMgrNormalIsBalancePossible(pAioMgr))
253 {
254 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
255
256 rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgrNew, PDMACEPFILEMGRTYPE_ASYNC);
257 if (RT_SUCCESS(rc))
258 {
259 /* We will sort the list by request count per second. */
260 pdmacFileAioMgrNormalEndpointsSortByLoad(pAioMgr);
261
262 /* Now move some endpoints to the new manager. */
263 unsigned cReqsHere = pAioMgr->pEndpointsHead->AioMgr.cReqsPerSec;
264 unsigned cReqsOther = 0;
265 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead->AioMgr.pEndpointNext;
266
267 while (pCurr)
268 {
269 if (cReqsHere <= cReqsOther)
270 {
271 /*
272 * The other manager has more requests to handle now.
273 * We will keep the current endpoint.
274 */
275 Log(("Keeping endpoint %#p{%s} with %u reqs/s\n", pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
276 cReqsHere += pCurr->AioMgr.cReqsPerSec;
277 pCurr = pCurr->AioMgr.pEndpointNext;
278 }
279 else
280 {
281 /* Move to other endpoint. */
282 Log(("Moving endpoint %#p{%s} with %u reqs/s to other manager\n", pCurr, pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
283 cReqsOther += pCurr->AioMgr.cReqsPerSec;
284
285 PPDMASYNCCOMPLETIONENDPOINTFILE pMove = pCurr;
286
287 pCurr = pCurr->AioMgr.pEndpointNext;
288
289 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pMove);
290
291 if (fReqsPending)
292 {
293 pMove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
294 pMove->AioMgr.fMoving = true;
295 pMove->AioMgr.pAioMgrDst = pAioMgrNew;
296 }
297 else
298 {
299 pMove->AioMgr.fMoving = false;
300 pMove->AioMgr.pAioMgrDst = NULL;
301 pdmacFileAioMgrAddEndpoint(pAioMgrNew, pMove);
302 }
303 }
304 }
305 }
306 else
307 {
308 /* Don't process further but leave a log entry about reduced performance. */
309 LogRel(("AIOMgr: Could not create new I/O manager (rc=%Rrc). Expect reduced performance\n", rc));
310 }
311 }
312 else
313 Log(("AIOMgr: Load balancing would not improve anything\n"));
314}
315
316/**
317 * Increase the maximum number of active requests for the given I/O manager.
318 *
319 * @returns VBox status code.
320 * @param pAioMgr The I/O manager to grow.
321 */
322static int pdmacFileAioMgrNormalGrow(PPDMACEPFILEMGR pAioMgr)
323{
324 int rc = VINF_SUCCESS;
325 RTFILEAIOCTX hAioCtxNew = NIL_RTFILEAIOCTX;
326
327 LogFlowFunc(("pAioMgr=%#p\n", pAioMgr));
328
329 AssertMsg( pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING
330 && !pAioMgr->cRequestsActive,
331 ("Invalid state of the I/O manager\n"));
332
333#ifdef RT_OS_WINDOWS
334 /*
335 * Reopen the files of all assigned endpoints first so we can assign them to the new
336 * I/O context.
337 */
338 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead;
339
340 while (pCurr)
341 {
342 RTFileClose(pCurr->File);
343 rc = RTFileOpen(&pCurr->File, pCurr->Core.pszUri, pCurr->fFlags);
344 AssertRC(rc);
345
346 pCurr = pCurr->AioMgr.pEndpointNext;
347 }
348#endif
349
350 /* Create the new bigger context. */
351 pAioMgr->cRequestsActiveMax += PDMACEPFILEMGR_REQS_STEP;
352
353 rc = RTFileAioCtxCreate(&hAioCtxNew, RTFILEAIO_UNLIMITED_REQS);
354 if (rc == VERR_OUT_OF_RANGE)
355 rc = RTFileAioCtxCreate(&hAioCtxNew, pAioMgr->cRequestsActiveMax);
356
357 if (RT_SUCCESS(rc))
358 {
359 /* Close the old context. */
360 rc = RTFileAioCtxDestroy(pAioMgr->hAioCtx);
361 AssertRC(rc);
362
363 pAioMgr->hAioCtx = hAioCtxNew;
364
365 /* Create a new I/O task handle array */
366 uint32_t cReqEntriesNew = pAioMgr->cRequestsActiveMax + 1;
367 RTFILEAIOREQ *pahReqNew = (RTFILEAIOREQ *)RTMemAllocZ(cReqEntriesNew * sizeof(RTFILEAIOREQ));
368
369 if (pahReqNew)
370 {
371 /* Copy the cached request handles. */
372 for (uint32_t iReq = 0; iReq < pAioMgr->cReqEntries; iReq++)
373 pahReqNew[iReq] = pAioMgr->pahReqsFree[iReq];
374
375 RTMemFree(pAioMgr->pahReqsFree);
376 pAioMgr->pahReqsFree = pahReqNew;
377 pAioMgr->cReqEntries = cReqEntriesNew;
378 LogFlowFunc(("I/O manager increased to handle a maximum of %u requests\n",
379 pAioMgr->cRequestsActiveMax));
380 }
381 else
382 rc = VERR_NO_MEMORY;
383 }
384
385#ifdef RT_OS_WINDOWS
386 /* Assign the file to the new context. */
387 pCurr = pAioMgr->pEndpointsHead;
388
389 while (pCurr)
390 {
391 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pCurr->File);
392 AssertRC(rc);
393
394 pCurr = pCurr->AioMgr.pEndpointNext;
395 }
396#endif
397
398 if (RT_FAILURE(rc))
399 {
400 LogFlow(("Increasing size of the I/O manager failed with rc=%Rrc\n", rc));
401 pAioMgr->cRequestsActiveMax -= PDMACEPFILEMGR_REQS_STEP;
402 }
403
404 pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
405 LogFlowFunc(("returns rc=%Rrc\n", rc));
406
407 return rc;
408}
409
410/**
411 * Checks if a given status code is fatal.
412 * Non fatal errors can be fixed by migrating the endpoint to a
413 * failsafe manager.
414 *
415 * @returns true If the error is fatal and migrating to a failsafe manager doesn't help
416 * false If the error can be fixed by a migration. (image on NFS disk for example)
417 * @param rcReq The status code to check.
418 */
419DECLINLINE(bool) pdmacFileAioMgrNormalRcIsFatal(int rcReq)
420{
421 return rcReq == VERR_DEV_IO_ERROR
422 || rcReq == VERR_FILE_IO_ERROR
423 || rcReq == VERR_DISK_IO_ERROR
424 || rcReq == VERR_DISK_FULL
425 || rcReq == VERR_FILE_TOO_BIG;
426}
427
428/**
429 * Error handler which will create the failsafe managers and destroy the failed I/O manager.
430 *
431 * @returns VBox status code
432 * @param pAioMgr The I/O manager the error occurred on.
433 * @param rc The error code.
434 */
435static int pdmacFileAioMgrNormalErrorHandler(PPDMACEPFILEMGR pAioMgr, int rc, RT_SRC_POS_DECL)
436{
437 LogRel(("AIOMgr: I/O manager %#p encountered a critical error (rc=%Rrc) during operation. Falling back to failsafe mode. Expect reduced performance\n",
438 pAioMgr, rc));
439 LogRel(("AIOMgr: Error happened in %s:(%u){%s}\n", RT_SRC_POS_ARGS));
440 LogRel(("AIOMgr: Please contact the product vendor\n"));
441
442 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
443
444 pAioMgr->enmState = PDMACEPFILEMGRSTATE_FAULT;
445 ASMAtomicWriteU32((volatile uint32_t *)&pEpClassFile->enmMgrTypeOverride, PDMACEPFILEMGRTYPE_SIMPLE);
446
447 AssertMsgFailed(("Implement\n"));
448 return VINF_SUCCESS;
449}
450
451/**
452 * Put a list of tasks in the pending request list of an endpoint.
453 */
454DECLINLINE(void) pdmacFileAioMgrEpAddTaskList(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTaskHead)
455{
456 /* Add the rest of the tasks to the pending list */
457 if (!pEndpoint->AioMgr.pReqsPendingHead)
458 {
459 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
460 pEndpoint->AioMgr.pReqsPendingHead = pTaskHead;
461 }
462 else
463 {
464 Assert(pEndpoint->AioMgr.pReqsPendingTail);
465 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTaskHead;
466 }
467
468 /* Update the tail. */
469 while (pTaskHead->pNext)
470 pTaskHead = pTaskHead->pNext;
471
472 pEndpoint->AioMgr.pReqsPendingTail = pTaskHead;
473 pTaskHead->pNext = NULL;
474}
475
476/**
477 * Put one task in the pending request list of an endpoint.
478 */
479DECLINLINE(void) pdmacFileAioMgrEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask)
480{
481 /* Add the rest of the tasks to the pending list */
482 if (!pEndpoint->AioMgr.pReqsPendingHead)
483 {
484 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
485 pEndpoint->AioMgr.pReqsPendingHead = pTask;
486 }
487 else
488 {
489 Assert(pEndpoint->AioMgr.pReqsPendingTail);
490 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTask;
491 }
492
493 pEndpoint->AioMgr.pReqsPendingTail = pTask;
494 pTask->pNext = NULL;
495}
496
497/**
498 * Allocates a async I/O request.
499 *
500 * @returns Handle to the request.
501 * @param pAioMgr The I/O manager.
502 */
503static RTFILEAIOREQ pdmacFileAioMgrNormalRequestAlloc(PPDMACEPFILEMGR pAioMgr)
504{
505 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
506
507 /* Get a request handle. */
508 if (pAioMgr->iFreeEntry > 0)
509 {
510 pAioMgr->iFreeEntry--;
511 hReq = pAioMgr->pahReqsFree[pAioMgr->iFreeEntry];
512 pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] = NIL_RTFILEAIOREQ;
513 Assert(hReq != NIL_RTFILEAIOREQ);
514 }
515 else
516 {
517 int rc = RTFileAioReqCreate(&hReq);
518 AssertRC(rc);
519 }
520
521 return hReq;
522}
523
524/**
525 * Frees a async I/O request handle.
526 *
527 * @returns nothing.
528 * @param pAioMgr The I/O manager.
529 * @param hReq The I/O request handle to free.
530 */
531static void pdmacFileAioMgrNormalRequestFree(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq)
532{
533 Assert(pAioMgr->iFreeEntry < pAioMgr->cReqEntries);
534 Assert(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] == NIL_RTFILEAIOREQ);
535
536 pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] = hReq;
537 pAioMgr->iFreeEntry++;
538}
539
540/**
541 * Wrapper around RTFIleAioCtxSubmit() which is also doing error handling.
542 */
543static int pdmacFileAioMgrNormalReqsEnqueue(PPDMACEPFILEMGR pAioMgr,
544 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
545 PRTFILEAIOREQ pahReqs, unsigned cReqs)
546{
547 int rc;
548
549 pAioMgr->cRequestsActive += cReqs;
550 pEndpoint->AioMgr.cRequestsActive += cReqs;
551
552 LogFlow(("Enqueuing %d requests. I/O manager has a total of %d active requests now\n", cReqs, pAioMgr->cRequestsActive));
553 LogFlow(("Endpoint has a total of %d active requests now\n", pEndpoint->AioMgr.cRequestsActive));
554
555 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, pahReqs, cReqs);
556 if (RT_FAILURE(rc))
557 {
558 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
559 {
560 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
561
562 /* Append any not submitted task to the waiting list. */
563 for (size_t i = 0; i < cReqs; i++)
564 {
565 int rcReq = RTFileAioReqGetRC(pahReqs[i], NULL);
566
567 if (rcReq != VERR_FILE_AIO_IN_PROGRESS)
568 {
569 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(pahReqs[i]);
570
571 Assert(pTask->hReq == pahReqs[i]);
572 pdmacFileAioMgrEpAddTask(pEndpoint, pTask);
573 pAioMgr->cRequestsActive--;
574 pEndpoint->AioMgr.cRequestsActive--;
575
576 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
577 {
578 /* Clear the pending flush */
579 Assert(pEndpoint->pFlushReq == pTask);
580 pEndpoint->pFlushReq = NULL;
581 }
582 }
583 }
584
585 pAioMgr->cRequestsActiveMax = pAioMgr->cRequestsActive;
586
587 /* Print an entry in the release log */
588 if (RT_UNLIKELY(!pEpClass->fOutOfResourcesWarningPrinted))
589 {
590 pEpClass->fOutOfResourcesWarningPrinted = true;
591 LogRel(("AIOMgr: Host limits number of active IO requests to %u. Expect a performance impact.\n",
592 pAioMgr->cRequestsActive));
593 }
594
595 LogFlow(("Removed requests. I/O manager has a total of %u active requests now\n", pAioMgr->cRequestsActive));
596 LogFlow(("Endpoint has a total of %u active requests now\n", pEndpoint->AioMgr.cRequestsActive));
597 rc = VINF_SUCCESS;
598 }
599 else /* Another kind of error happened (full disk, ...) */
600 {
601 /* An error happened. Find out which one caused the error and resubmit all other tasks. */
602 for (size_t i = 0; i < cReqs; i++)
603 {
604 int rcReq = RTFileAioReqGetRC(pahReqs[i], NULL);
605
606 if (rcReq == VERR_FILE_AIO_NOT_SUBMITTED)
607 {
608 /* We call ourself again to do any error handling which might come up now. */
609 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, &pahReqs[i], 1);
610 AssertRC(rc);
611 }
612 else if (rcReq != VERR_FILE_AIO_IN_PROGRESS)
613 {
614 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(pahReqs[i]);
615
616 pdmacFileAioMgrNormalReqCompleteRc(pAioMgr, pahReqs[i], rcReq, 0);
617 }
618 }
619
620
621 if ( pEndpoint->pFlushReq
622 && !pAioMgr->cRequestsActive
623 && !pEndpoint->fAsyncFlushSupported)
624 {
625 /*
626 * Complete a pending flush if we don't have requests enqueued and the host doesn't support
627 * the async flush API.
628 * Happens only if this we just noticed that this is not supported
629 * and the only active request was a flush.
630 */
631 PPDMACTASKFILE pFlush = pEndpoint->pFlushReq;
632 pEndpoint->pFlushReq = NULL;
633 pFlush->pfnCompleted(pFlush, pFlush->pvUser, VINF_SUCCESS);
634 pdmacFileTaskFree(pEndpoint, pFlush);
635 }
636 }
637 }
638
639 return VINF_SUCCESS;
640}
641
642static bool pdmacFileAioMgrNormalIsRangeLocked(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
643 RTFOFF offStart, size_t cbRange,
644 PPDMACTASKFILE pTask)
645{
646 PPDMACFILERANGELOCK pRangeLock = NULL; /** < Range lock */
647
648 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
649 || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ,
650 ("Invalid task type %d\n", pTask->enmTransferType));
651
652 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetRangeGet(pEndpoint->AioMgr.pTreeRangesLocked, offStart);
653 if (!pRangeLock)
654 {
655 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetGetBestFit(pEndpoint->AioMgr.pTreeRangesLocked, offStart, true);
656 /* Check if we intersect with the range. */
657 if ( !pRangeLock
658 || !( (pRangeLock->Core.Key) <= (offStart + (RTFOFF)cbRange - 1)
659 && (pRangeLock->Core.KeyLast) >= offStart))
660 {
661 pRangeLock = NULL; /* False alarm */
662 }
663 }
664
665 /* Check whether we have one of the situations explained below */
666 if ( pRangeLock
667#if 0 /** @todo: later. For now we will just block all requests if they interfere */
668 && ( (pRangeLock->fReadLock && pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
669 || (!pRangeLock->fReadLock)
670#endif
671 )
672 {
673 /* Add to the list. */
674 pTask->pNext = NULL;
675
676 if (!pRangeLock->pWaitingTasksHead)
677 {
678 Assert(!pRangeLock->pWaitingTasksTail);
679 pRangeLock->pWaitingTasksHead = pTask;
680 pRangeLock->pWaitingTasksTail = pTask;
681 }
682 else
683 {
684 AssertPtr(pRangeLock->pWaitingTasksTail);
685 pRangeLock->pWaitingTasksTail->pNext = pTask;
686 pRangeLock->pWaitingTasksTail = pTask;
687 }
688 return true;
689 }
690
691 return false;
692}
693
694static int pdmacFileAioMgrNormalRangeLock(PPDMACEPFILEMGR pAioMgr,
695 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
696 RTFOFF offStart, size_t cbRange,
697 PPDMACTASKFILE pTask)
698{
699 AssertMsg(!pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbRange, pTask),
700 ("Range is already locked offStart=%RTfoff cbRange=%u\n",
701 offStart, cbRange));
702
703 PPDMACFILERANGELOCK pRangeLock = (PPDMACFILERANGELOCK)RTMemCacheAlloc(pAioMgr->hMemCacheRangeLocks);
704 if (!pRangeLock)
705 return VERR_NO_MEMORY;
706
707 /* Init the lock. */
708 pRangeLock->Core.Key = offStart;
709 pRangeLock->Core.KeyLast = offStart + cbRange - 1;
710 pRangeLock->cRefs = 1;
711 pRangeLock->fReadLock = pTask->enmTransferType == PDMACTASKFILETRANSFER_READ;
712 pRangeLock->pWaitingTasksHead = NULL;
713 pRangeLock->pWaitingTasksTail = NULL;
714
715 bool fInserted = RTAvlrFileOffsetInsert(pEndpoint->AioMgr.pTreeRangesLocked, &pRangeLock->Core);
716 AssertMsg(fInserted, ("Range lock was not inserted!\n"));
717
718 /* Let the task point to its lock. */
719 pTask->pRangeLock = pRangeLock;
720
721 return VINF_SUCCESS;
722}
723
724static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr,
725 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
726 PPDMACFILERANGELOCK pRangeLock)
727{
728 PPDMACTASKFILE pTasksWaitingHead;
729
730 AssertPtr(pRangeLock);
731 Assert(pRangeLock->cRefs == 1);
732
733 RTAvlrFileOffsetRemove(pEndpoint->AioMgr.pTreeRangesLocked, pRangeLock->Core.Key);
734 pTasksWaitingHead = pRangeLock->pWaitingTasksHead;
735 pRangeLock->pWaitingTasksHead = NULL;
736 pRangeLock->pWaitingTasksTail = NULL;
737 RTMemCacheFree(pAioMgr->hMemCacheRangeLocks, pRangeLock);
738
739 return pTasksWaitingHead;
740}
741
742static int pdmacFileAioMgrNormalTaskPrepareBuffered(PPDMACEPFILEMGR pAioMgr,
743 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
744 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
745{
746 int rc = VINF_SUCCESS;
747 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
748 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
749 void *pvBuf = pTask->DataSeg.pvSeg;
750
751 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
752 || (uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) <= pEndpoint->cbFile,
753 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
754 pTask->Off, pTask->DataSeg.cbSeg, pEndpoint->cbFile));
755
756 pTask->fPrefetch = false;
757 pTask->cbBounceBuffer = 0;
758
759 /*
760 * Before we start to setup the request we have to check whether there is a task
761 * already active which range intersects with ours. We have to defer execution
762 * of this task in two cases:
763 * - The pending task is a write and the current is either read or write
764 * - The pending task is a read and the current task is a write task.
765 *
766 * To check whether a range is currently "locked" we use the AVL tree where every pending task
767 * is stored by its file offset range. The current task will be added to the active task
768 * and will be executed when the active one completes. (The method below
769 * which checks whether a range is already used will add the task)
770 *
771 * This is necessary because of the requirement to align all requests to a 512 boundary
772 * which is enforced by the host OS (Linux and Windows atm). It is possible that
773 * we have to process unaligned tasks and need to align them using bounce buffers.
774 * While the data is fetched from the file another request might arrive writing to
775 * the same range. This will result in data corruption if both are executed concurrently.
776 */
777 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, pTask->Off, pTask->DataSeg.cbSeg, pTask);
778
779 if (!fLocked)
780 {
781 /* Get a request handle. */
782 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
783 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
784
785 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
786 {
787 /* Grow the file if needed. */
788 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
789 {
790 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
791 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
792 }
793
794 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
795 pTask->Off, pTask->DataSeg.pvSeg,
796 pTask->DataSeg.cbSeg, pTask);
797 }
798 else
799 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File,
800 pTask->Off, pTask->DataSeg.pvSeg,
801 pTask->DataSeg.cbSeg, pTask);
802 AssertRC(rc);
803
804 rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, pTask->Off,
805 pTask->DataSeg.cbSeg,
806 pTask);
807
808 if (RT_SUCCESS(rc))
809 {
810 pTask->hReq = hReq;
811 *phReq = hReq;
812 }
813 }
814 else
815 LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
816
817 return rc;
818}
819
820static int pdmacFileAioMgrNormalTaskPrepareNonBuffered(PPDMACEPFILEMGR pAioMgr,
821 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
822 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
823{
824 int rc = VINF_SUCCESS;
825 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
826 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
827 void *pvBuf = pTask->DataSeg.pvSeg;
828
829 /*
830 * Check if the alignment requirements are met.
831 * Offset, transfer size and buffer address
832 * need to be on a 512 boundary.
833 */
834 RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
835 size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg + (pTask->Off - offStart), 512);
836 PDMACTASKFILETRANSFER enmTransferType = pTask->enmTransferType;
837
838 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
839 || (uint64_t)(offStart + cbToTransfer) <= pEndpoint->cbFile,
840 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
841 offStart, cbToTransfer, pEndpoint->cbFile));
842
843 pTask->fPrefetch = false;
844
845 /*
846 * Before we start to setup the request we have to check whether there is a task
847 * already active which range intersects with ours. We have to defer execution
848 * of this task in two cases:
849 * - The pending task is a write and the current is either read or write
850 * - The pending task is a read and the current task is a write task.
851 *
852 * To check whether a range is currently "locked" we use the AVL tree where every pending task
853 * is stored by its file offset range. The current task will be added to the active task
854 * and will be executed when the active one completes. (The method below
855 * which checks whether a range is already used will add the task)
856 *
857 * This is necessary because of the requirement to align all requests to a 512 boundary
858 * which is enforced by the host OS (Linux and Windows atm). It is possible that
859 * we have to process unaligned tasks and need to align them using bounce buffers.
860 * While the data is fetched from the file another request might arrive writing to
861 * the same range. This will result in data corruption if both are executed concurrently.
862 */
863 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbToTransfer, pTask);
864
865 if (!fLocked)
866 {
867 /* Get a request handle. */
868 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
869 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
870
871 if ( RT_UNLIKELY(cbToTransfer != pTask->DataSeg.cbSeg)
872 || RT_UNLIKELY(offStart != pTask->Off)
873 || ((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) != (RTR3UINTPTR)pvBuf))
874 {
875 LogFlow(("Using bounce buffer for task %#p cbToTransfer=%zd cbSeg=%zd offStart=%RTfoff off=%RTfoff\n",
876 pTask, cbToTransfer, pTask->DataSeg.cbSeg, offStart, pTask->Off));
877
878 /* Create bounce buffer. */
879 pTask->cbBounceBuffer = cbToTransfer;
880
881 AssertMsg(pTask->Off >= offStart, ("Overflow in calculation Off=%llu offStart=%llu\n",
882 pTask->Off, offStart));
883 pTask->offBounceBuffer = pTask->Off - offStart;
884
885 /** @todo: I think we need something like a RTMemAllocAligned method here.
886 * Current assumption is that the maximum alignment is 4096byte
887 * (GPT disk on Windows)
888 * so we can use RTMemPageAlloc here.
889 */
890 pTask->pvBounceBuffer = RTMemPageAlloc(cbToTransfer);
891 if (RT_LIKELY(pTask->pvBounceBuffer))
892 {
893 pvBuf = pTask->pvBounceBuffer;
894
895 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
896 {
897 if ( RT_UNLIKELY(cbToTransfer != pTask->DataSeg.cbSeg)
898 || RT_UNLIKELY(offStart != pTask->Off))
899 {
900 /* We have to fill the buffer first before we can update the data. */
901 LogFlow(("Prefetching data for task %#p\n", pTask));
902 pTask->fPrefetch = true;
903 enmTransferType = PDMACTASKFILETRANSFER_READ;
904 }
905 else
906 memcpy(pvBuf, pTask->DataSeg.pvSeg, pTask->DataSeg.cbSeg);
907 }
908 }
909 else
910 rc = VERR_NO_MEMORY;
911 }
912 else
913 pTask->cbBounceBuffer = 0;
914
915 if (RT_SUCCESS(rc))
916 {
917 AssertMsg((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) == (RTR3UINTPTR)pvBuf,
918 ("AIO: Alignment restrictions not met! pvBuf=%p uBitmaskAlignment=%p\n", pvBuf, pEpClassFile->uBitmaskAlignment));
919
920 if (enmTransferType == PDMACTASKFILETRANSFER_WRITE)
921 {
922 /* Grow the file if needed. */
923 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
924 {
925 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
926 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
927 }
928
929 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
930 offStart, pvBuf, cbToTransfer, pTask);
931 }
932 else
933 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File,
934 offStart, pvBuf, cbToTransfer, pTask);
935 AssertRC(rc);
936
937 rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, offStart, cbToTransfer, pTask);
938
939 if (RT_SUCCESS(rc))
940 {
941 pTask->hReq = hReq;
942 *phReq = hReq;
943 }
944 else
945 {
946 /* Cleanup */
947 if (pTask->cbBounceBuffer)
948 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
949 }
950 }
951 }
952 else
953 LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
954
955 return rc;
956}
957
958static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
959 PPDMACEPFILEMGR pAioMgr,
960 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
961{
962 RTFILEAIOREQ apReqs[20];
963 unsigned cRequests = 0;
964 unsigned cMaxRequests = pAioMgr->cRequestsActiveMax - pAioMgr->cRequestsActive;
965 int rc = VINF_SUCCESS;
966
967 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
968 ("Trying to process request lists of a non active endpoint!\n"));
969
970 /* Go through the list and queue the requests until we get a flush request */
971 while ( pTaskHead
972 && !pEndpoint->pFlushReq
973 && (pAioMgr->cRequestsActive + cRequests < pAioMgr->cRequestsActiveMax)
974 && RT_SUCCESS(rc))
975 {
976 RTMSINTERVAL msWhenNext;
977 PPDMACTASKFILE pCurr = pTaskHead;
978
979 if (!pdmacEpIsTransferAllowed(&pEndpoint->Core, (uint32_t)pCurr->DataSeg.cbSeg, &msWhenNext))
980 {
981 pAioMgr->fBwLimitReached = true;
982 break;
983 }
984
985 pTaskHead = pTaskHead->pNext;
986
987 pCurr->pNext = NULL;
988
989 AssertMsg(VALID_PTR(pCurr->pEndpoint) && (pCurr->pEndpoint == pEndpoint),
990 ("Endpoints do not match\n"));
991
992 switch (pCurr->enmTransferType)
993 {
994 case PDMACTASKFILETRANSFER_FLUSH:
995 {
996 /* If there is no data transfer request this flush request finished immediately. */
997 if (pEndpoint->fAsyncFlushSupported)
998 {
999 /* Issue a flush to the host. */
1000 RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
1001 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
1002
1003 LogFlow(("Flush request %#p\n", hReq));
1004
1005 rc = RTFileAioReqPrepareFlush(hReq, pEndpoint->File, pCurr);
1006 if (RT_FAILURE(rc))
1007 {
1008 pEndpoint->fAsyncFlushSupported = false;
1009 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1010 rc = VINF_SUCCESS; /* Fake success */
1011 }
1012 else
1013 {
1014 pCurr->hReq = hReq;
1015 apReqs[cRequests] = hReq;
1016 pEndpoint->AioMgr.cReqsProcessed++;
1017 cRequests++;
1018 }
1019 }
1020
1021 if ( !pEndpoint->AioMgr.cRequestsActive
1022 && !pEndpoint->fAsyncFlushSupported)
1023 {
1024 pCurr->pfnCompleted(pCurr, pCurr->pvUser, VINF_SUCCESS);
1025 pdmacFileTaskFree(pEndpoint, pCurr);
1026 }
1027 else
1028 {
1029 Assert(!pEndpoint->pFlushReq);
1030 pEndpoint->pFlushReq = pCurr;
1031 }
1032 break;
1033 }
1034 case PDMACTASKFILETRANSFER_READ:
1035 case PDMACTASKFILETRANSFER_WRITE:
1036 {
1037 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
1038
1039 if (pCurr->hReq == NIL_RTFILEAIOREQ)
1040 {
1041 if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_BUFFERED)
1042 rc = pdmacFileAioMgrNormalTaskPrepareBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
1043 else if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_NON_BUFFERED)
1044 rc = pdmacFileAioMgrNormalTaskPrepareNonBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
1045 else
1046 AssertMsgFailed(("Invalid backend type %d\n", pEndpoint->enmBackendType));
1047
1048 AssertRC(rc);
1049 }
1050 else
1051 {
1052 LogFlow(("Task %#p has I/O request %#p already\n", pCurr, pCurr->hReq));
1053 hReq = pCurr->hReq;
1054 }
1055
1056 LogFlow(("Read/Write request %#p\n", hReq));
1057
1058 if (hReq != NIL_RTFILEAIOREQ)
1059 {
1060 apReqs[cRequests] = hReq;
1061 cRequests++;
1062 }
1063 break;
1064 }
1065 default:
1066 AssertMsgFailed(("Invalid transfer type %d\n", pCurr->enmTransferType));
1067 } /* switch transfer type */
1068
1069 /* Queue the requests if the array is full. */
1070 if (cRequests == RT_ELEMENTS(apReqs))
1071 {
1072 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
1073 cRequests = 0;
1074 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1075 ("Unexpected return code\n"));
1076 }
1077 }
1078
1079 if (cRequests)
1080 {
1081 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
1082 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1083 ("Unexpected return code rc=%Rrc\n", rc));
1084 }
1085
1086 if (pTaskHead)
1087 {
1088 /* Add the rest of the tasks to the pending list */
1089 pdmacFileAioMgrEpAddTaskList(pEndpoint, pTaskHead);
1090
1091 if (RT_UNLIKELY( pAioMgr->cRequestsActiveMax == pAioMgr->cRequestsActive
1092 && !pEndpoint->pFlushReq
1093 && !pAioMgr->fBwLimitReached))
1094 {
1095#if 0
1096 /*
1097 * The I/O manager has no room left for more requests
1098 * but there are still requests to process.
1099 * Create a new I/O manager and let it handle some endpoints.
1100 */
1101 pdmacFileAioMgrNormalBalanceLoad(pAioMgr);
1102#else
1103 /* Grow the I/O manager */
1104 pAioMgr->enmState = PDMACEPFILEMGRSTATE_GROWING;
1105#endif
1106 }
1107 }
1108
1109 /* Insufficient resources are not fatal. */
1110 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
1111 rc = VINF_SUCCESS;
1112
1113 return rc;
1114}
1115
1116/**
1117 * Adds all pending requests for the given endpoint
1118 * until a flush request is encountered or there is no
1119 * request anymore.
1120 *
1121 * @returns VBox status code.
1122 * @param pAioMgr The async I/O manager for the endpoint
1123 * @param pEndpoint The endpoint to get the requests from.
1124 */
1125static int pdmacFileAioMgrNormalQueueReqs(PPDMACEPFILEMGR pAioMgr,
1126 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
1127{
1128 int rc = VINF_SUCCESS;
1129 PPDMACTASKFILE pTasksHead = NULL;
1130
1131 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
1132 ("Trying to process request lists of a non active endpoint!\n"));
1133
1134 Assert(!pEndpoint->pFlushReq);
1135
1136 /* Check the pending list first */
1137 if (pEndpoint->AioMgr.pReqsPendingHead)
1138 {
1139 LogFlow(("Queuing pending requests first\n"));
1140
1141 pTasksHead = pEndpoint->AioMgr.pReqsPendingHead;
1142 /*
1143 * Clear the list as the processing routine will insert them into the list
1144 * again if it gets a flush request.
1145 */
1146 pEndpoint->AioMgr.pReqsPendingHead = NULL;
1147 pEndpoint->AioMgr.pReqsPendingTail = NULL;
1148 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
1149 AssertRC(rc);
1150 }
1151
1152 if (!pEndpoint->pFlushReq && !pEndpoint->AioMgr.pReqsPendingHead)
1153 {
1154 /* Now the request queue. */
1155 pTasksHead = pdmacFileEpGetNewTasks(pEndpoint);
1156 if (pTasksHead)
1157 {
1158 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
1159 AssertRC(rc);
1160 }
1161 }
1162
1163 return rc;
1164}
1165
1166static int pdmacFileAioMgrNormalProcessBlockingEvent(PPDMACEPFILEMGR pAioMgr)
1167{
1168 int rc = VINF_SUCCESS;
1169 bool fNotifyWaiter = false;
1170
1171 LogFlowFunc((": Enter\n"));
1172
1173 Assert(pAioMgr->fBlockingEventPending);
1174
1175 switch (pAioMgr->enmBlockingEvent)
1176 {
1177 case PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT:
1178 {
1179 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointNew = ASMAtomicReadPtrT(&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint, PPDMASYNCCOMPLETIONENDPOINTFILE);
1180 AssertMsg(VALID_PTR(pEndpointNew), ("Adding endpoint event without a endpoint to add\n"));
1181
1182 pEndpointNew->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE;
1183
1184 pEndpointNew->AioMgr.pEndpointNext = pAioMgr->pEndpointsHead;
1185 pEndpointNew->AioMgr.pEndpointPrev = NULL;
1186 if (pAioMgr->pEndpointsHead)
1187 pAioMgr->pEndpointsHead->AioMgr.pEndpointPrev = pEndpointNew;
1188 pAioMgr->pEndpointsHead = pEndpointNew;
1189
1190 /* Assign the completion point to this file. */
1191 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pEndpointNew->File);
1192 fNotifyWaiter = true;
1193 pAioMgr->cEndpoints++;
1194 break;
1195 }
1196 case PDMACEPFILEAIOMGRBLOCKINGEVENT_REMOVE_ENDPOINT:
1197 {
1198 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove = ASMAtomicReadPtrT(&pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint, PPDMASYNCCOMPLETIONENDPOINTFILE);
1199 AssertMsg(VALID_PTR(pEndpointRemove), ("Removing endpoint event without a endpoint to remove\n"));
1200
1201 pEndpointRemove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
1202 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointRemove);
1203 break;
1204 }
1205 case PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT:
1206 {
1207 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointClose = ASMAtomicReadPtrT(&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint, PPDMASYNCCOMPLETIONENDPOINTFILE);
1208 AssertMsg(VALID_PTR(pEndpointClose), ("Close endpoint event without a endpoint to close\n"));
1209
1210 if (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1211 {
1212 LogFlowFunc((": Closing endpoint %#p{%s}\n", pEndpointClose, pEndpointClose->Core.pszUri));
1213
1214 /* Make sure all tasks finished. Process the queues a last time first. */
1215 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpointClose);
1216 AssertRC(rc);
1217
1218 pEndpointClose->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING;
1219 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointClose);
1220 }
1221 else if ( (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING)
1222 && (!pEndpointClose->AioMgr.cRequestsActive))
1223 fNotifyWaiter = true;
1224 break;
1225 }
1226 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN:
1227 {
1228 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SHUTDOWN;
1229 if (!pAioMgr->cRequestsActive)
1230 fNotifyWaiter = true;
1231 break;
1232 }
1233 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SUSPEND:
1234 {
1235 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SUSPENDING;
1236 break;
1237 }
1238 case PDMACEPFILEAIOMGRBLOCKINGEVENT_RESUME:
1239 {
1240 pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
1241 fNotifyWaiter = true;
1242 break;
1243 }
1244 default:
1245 AssertReleaseMsgFailed(("Invalid event type %d\n", pAioMgr->enmBlockingEvent));
1246 }
1247
1248 if (fNotifyWaiter)
1249 {
1250 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
1251 pAioMgr->enmBlockingEvent = PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID;
1252
1253 /* Release the waiting thread. */
1254 LogFlow(("Signalling waiter\n"));
1255 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
1256 AssertRC(rc);
1257 }
1258
1259 LogFlowFunc((": Leave\n"));
1260 return rc;
1261}
1262
1263/**
1264 * Checks all endpoints for pending events or new requests.
1265 *
1266 * @returns VBox status code.
1267 * @param pAioMgr The I/O manager handle.
1268 */
1269static int pdmacFileAioMgrNormalCheckEndpoints(PPDMACEPFILEMGR pAioMgr)
1270{
1271 /* Check the assigned endpoints for new tasks if there isn't a flush request active at the moment. */
1272 int rc = VINF_SUCCESS;
1273 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pAioMgr->pEndpointsHead;
1274
1275 pAioMgr->fBwLimitReached = false;
1276
1277 while (pEndpoint)
1278 {
1279 if (!pEndpoint->pFlushReq
1280 && (pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1281 && !pEndpoint->AioMgr.fMoving)
1282 {
1283 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint);
1284 if (RT_FAILURE(rc))
1285 return rc;
1286 }
1287 else if ( !pEndpoint->AioMgr.cRequestsActive
1288 && pEndpoint->enmState != PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1289 {
1290 /* Reopen the file so that the new endpoint can re-associate with the file */
1291 RTFileClose(pEndpoint->File);
1292 rc = RTFileOpen(&pEndpoint->File, pEndpoint->Core.pszUri, pEndpoint->fFlags);
1293 AssertRC(rc);
1294
1295 if (pEndpoint->AioMgr.fMoving)
1296 {
1297 pEndpoint->AioMgr.fMoving = false;
1298 pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1299 }
1300 else
1301 {
1302 Assert(pAioMgr->fBlockingEventPending);
1303 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
1304
1305 /* Release the waiting thread. */
1306 LogFlow(("Signalling waiter\n"));
1307 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
1308 AssertRC(rc);
1309 }
1310 }
1311
1312 pEndpoint = pEndpoint->AioMgr.pEndpointNext;
1313 }
1314
1315 return rc;
1316}
1317
1318/**
1319 * Wrapper around pdmacFileAioMgrNormalReqCompleteRc().
1320 */
1321static void pdmacFileAioMgrNormalReqComplete(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq)
1322{
1323 size_t cbTransfered = 0;
1324 int rcReq = RTFileAioReqGetRC(hReq, &cbTransfered);
1325
1326 pdmacFileAioMgrNormalReqCompleteRc(pAioMgr, hReq, rcReq, cbTransfered);
1327}
1328
1329static void pdmacFileAioMgrNormalReqCompleteRc(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq,
1330 int rcReq, size_t cbTransfered)
1331{
1332 int rc = VINF_SUCCESS;
1333 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint;
1334 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(hReq);
1335 PPDMACTASKFILE pTasksWaiting;
1336
1337 LogFlowFunc(("pAioMgr=%#p hReq=%#p\n", pAioMgr, hReq));
1338
1339 pEndpoint = pTask->pEndpoint;
1340
1341 pTask->hReq = NIL_RTFILEAIOREQ;
1342
1343 pAioMgr->cRequestsActive--;
1344 pEndpoint->AioMgr.cRequestsActive--;
1345 pEndpoint->AioMgr.cReqsProcessed++;
1346
1347 /*
1348 * It is possible that the request failed on Linux with kernels < 2.6.23
1349 * if the passed buffer was allocated with remap_pfn_range or if the file
1350 * is on an NFS endpoint which does not support async and direct I/O at the same time.
1351 * The endpoint will be migrated to a failsafe manager in case a request fails.
1352 */
1353 if (RT_FAILURE(rcReq))
1354 {
1355 /* Free bounce buffers and the IPRT request. */
1356 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1357
1358 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
1359 {
1360 LogFlow(("Async flushes are not supported for this endpoint, disabling\n"));
1361 pEndpoint->fAsyncFlushSupported = false;
1362 AssertMsg(pEndpoint->pFlushReq == pTask, ("Failed flush request doesn't match active one\n"));
1363 /* The other method will take over now. */
1364 }
1365 else
1366 {
1367 /* Free the lock and process pending tasks if necessary */
1368 pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
1369 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksWaiting, pAioMgr, pEndpoint);
1370 AssertRC(rc);
1371
1372 if (pTask->cbBounceBuffer)
1373 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
1374
1375 /*
1376 * Fatal errors are reported to the guest and non-fatal errors
1377 * will cause a migration to the failsafe manager in the hope
1378 * that the error disappears.
1379 */
1380 if (!pdmacFileAioMgrNormalRcIsFatal(rcReq))
1381 {
1382 /* Queue the request on the pending list. */
1383 pTask->pNext = pEndpoint->AioMgr.pReqsPendingHead;
1384 pEndpoint->AioMgr.pReqsPendingHead = pTask;
1385
1386 /* Create a new failsafe manager if necessary. */
1387 if (!pEndpoint->AioMgr.fMoving)
1388 {
1389 PPDMACEPFILEMGR pAioMgrFailsafe;
1390
1391 LogRel(("%s: Request %#p failed with rc=%Rrc, migrating endpoint %s to failsafe manager.\n",
1392 RTThreadGetName(pAioMgr->Thread), pTask, rcReq, pEndpoint->Core.pszUri));
1393
1394 pEndpoint->AioMgr.fMoving = true;
1395
1396 rc = pdmacFileAioMgrCreate((PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass,
1397 &pAioMgrFailsafe, PDMACEPFILEMGRTYPE_SIMPLE);
1398 AssertRC(rc);
1399
1400 pEndpoint->AioMgr.pAioMgrDst = pAioMgrFailsafe;
1401
1402 /* Update the flags to open the file with. Disable async I/O and enable the host cache. */
1403 pEndpoint->fFlags &= ~(RTFILE_O_ASYNC_IO | RTFILE_O_NO_CACHE);
1404 }
1405
1406 /* If this was the last request for the endpoint migrate it to the new manager. */
1407 if (!pEndpoint->AioMgr.cRequestsActive)
1408 {
1409 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
1410 Assert(!fReqsPending);
1411
1412 rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1413 AssertRC(rc);
1414 }
1415 }
1416 else
1417 {
1418 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1419 pdmacFileTaskFree(pEndpoint, pTask);
1420 }
1421 }
1422 }
1423 else
1424 {
1425 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
1426 {
1427 /* Clear pending flush */
1428 AssertMsg(pEndpoint->pFlushReq == pTask, ("Completed flush request doesn't match active one\n"));
1429 pEndpoint->pFlushReq = NULL;
1430 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1431
1432 /* Call completion callback */
1433 LogFlow(("Flush task=%#p completed with %Rrc\n", pTask, rcReq));
1434 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1435 pdmacFileTaskFree(pEndpoint, pTask);
1436 }
1437 else
1438 {
1439 /*
1440 * Restart an incomplete transfer.
1441 * This usually means that the request will return an error now
1442 * but to get the cause of the error (disk full, file too big, I/O error, ...)
1443 * the transfer needs to be continued.
1444 */
1445 if (RT_UNLIKELY( cbTransfered < pTask->DataSeg.cbSeg
1446 || ( pTask->cbBounceBuffer
1447 && cbTransfered < pTask->cbBounceBuffer)))
1448 {
1449 RTFOFF offStart;
1450 size_t cbToTransfer;
1451 uint8_t *pbBuf = NULL;
1452
1453 LogFlow(("Restarting incomplete transfer %#p (%zu bytes transferred)\n",
1454 pTask, cbTransfered));
1455 Assert(cbTransfered % 512 == 0);
1456
1457 if (pTask->cbBounceBuffer)
1458 {
1459 AssertPtr(pTask->pvBounceBuffer);
1460 offStart = (pTask->Off & ~((RTFOFF)512-1)) + cbTransfered;
1461 cbToTransfer = pTask->cbBounceBuffer - cbTransfered;
1462 pbBuf = (uint8_t *)pTask->pvBounceBuffer + cbTransfered;
1463 }
1464 else
1465 {
1466 Assert(!pTask->pvBounceBuffer);
1467 offStart = pTask->Off + cbTransfered;
1468 cbToTransfer = pTask->DataSeg.cbSeg - cbTransfered;
1469 pbBuf = (uint8_t *)pTask->DataSeg.pvSeg + cbTransfered;
1470 }
1471
1472 if (pTask->fPrefetch || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
1473 {
1474 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File, offStart,
1475 pbBuf, cbToTransfer, pTask);
1476 }
1477 else
1478 {
1479 AssertMsg(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE,
1480 ("Invalid transfer type\n"));
1481 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File, offStart,
1482 pbBuf, cbToTransfer, pTask);
1483 }
1484
1485 AssertRC(rc);
1486 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, &hReq, 1);
1487 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1488 ("Unexpected return code rc=%Rrc\n", rc));
1489 }
1490 else if (pTask->fPrefetch)
1491 {
1492 Assert(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE);
1493 Assert(pTask->cbBounceBuffer);
1494
1495 memcpy(((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer,
1496 pTask->DataSeg.pvSeg,
1497 pTask->DataSeg.cbSeg);
1498
1499 /* Write it now. */
1500 pTask->fPrefetch = false;
1501 size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg, 512);
1502 RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
1503
1504 /* Grow the file if needed. */
1505 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
1506 {
1507 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
1508 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
1509 }
1510
1511 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
1512 offStart, pTask->pvBounceBuffer, cbToTransfer, pTask);
1513 AssertRC(rc);
1514 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, &hReq, 1);
1515 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1516 ("Unexpected return code rc=%Rrc\n", rc));
1517 }
1518 else
1519 {
1520 if (RT_SUCCESS(rc) && pTask->cbBounceBuffer)
1521 {
1522 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
1523 memcpy(pTask->DataSeg.pvSeg,
1524 ((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer,
1525 pTask->DataSeg.cbSeg);
1526
1527 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
1528 }
1529
1530 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1531
1532 /* Free the lock and process pending tasks if necessary */
1533 pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
1534 if (pTasksWaiting)
1535 {
1536 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksWaiting, pAioMgr, pEndpoint);
1537 AssertRC(rc);
1538 }
1539
1540 /* Call completion callback */
1541 LogFlow(("Task=%#p completed with %Rrc\n", pTask, rcReq));
1542 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1543 pdmacFileTaskFree(pEndpoint, pTask);
1544
1545 /*
1546 * If there is no request left on the endpoint but a flush request is set
1547 * it completed now and we notify the owner.
1548 * Furthermore we look for new requests and continue.
1549 */
1550 if (!pEndpoint->AioMgr.cRequestsActive && pEndpoint->pFlushReq)
1551 {
1552 /* Call completion callback */
1553 pTask = pEndpoint->pFlushReq;
1554 pEndpoint->pFlushReq = NULL;
1555
1556 AssertMsg(pTask->pEndpoint == pEndpoint, ("Endpoint of the flush request does not match assigned one\n"));
1557
1558 pTask->pfnCompleted(pTask, pTask->pvUser, VINF_SUCCESS);
1559 pdmacFileTaskFree(pEndpoint, pTask);
1560 }
1561 else if (RT_UNLIKELY(!pEndpoint->AioMgr.cRequestsActive && pEndpoint->AioMgr.fMoving))
1562 {
1563 /* If the endpoint is about to be migrated do it now. */
1564 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
1565 Assert(!fReqsPending);
1566
1567 rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1568 AssertRC(rc);
1569 }
1570 }
1571 } /* Not a flush request */
1572 } /* request completed successfully */
1573}
1574
1575/** Helper macro for checking for error codes. */
1576#define CHECK_RC(pAioMgr, rc) \
1577 if (RT_FAILURE(rc)) \
1578 {\
1579 int rc2 = pdmacFileAioMgrNormalErrorHandler(pAioMgr, rc, RT_SRC_POS);\
1580 return rc2;\
1581 }
1582
1583/**
1584 * The normal I/O manager using the RTFileAio* API
1585 *
1586 * @returns VBox status code.
1587 * @param ThreadSelf Handle of the thread.
1588 * @param pvUser Opaque user data.
1589 */
1590int pdmacFileAioMgrNormal(RTTHREAD ThreadSelf, void *pvUser)
1591{
1592 int rc = VINF_SUCCESS;
1593 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser;
1594 uint64_t uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
1595
1596 while ( (pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING)
1597 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING)
1598 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING))
1599 {
1600 if (!pAioMgr->cRequestsActive)
1601 {
1602 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, true);
1603 if (!ASMAtomicReadBool(&pAioMgr->fWokenUp))
1604 rc = RTSemEventWait(pAioMgr->EventSem, RT_INDEFINITE_WAIT);
1605 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, false);
1606 AssertRC(rc);
1607
1608 LogFlow(("Got woken up\n"));
1609 ASMAtomicWriteBool(&pAioMgr->fWokenUp, false);
1610 }
1611
1612 /* Check for an external blocking event first. */
1613 if (pAioMgr->fBlockingEventPending)
1614 {
1615 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
1616 CHECK_RC(pAioMgr, rc);
1617 }
1618
1619 if (RT_LIKELY( pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING
1620 || pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING))
1621 {
1622 /* We got woken up because an endpoint issued new requests. Queue them. */
1623 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1624 CHECK_RC(pAioMgr, rc);
1625
1626 while ( pAioMgr->cRequestsActive
1627 || pAioMgr->fBwLimitReached)
1628 {
1629 if (pAioMgr->cRequestsActive)
1630 {
1631 RTFILEAIOREQ apReqs[20];
1632 uint32_t cReqsCompleted = 0;
1633 size_t cReqsWait;
1634
1635 if (pAioMgr->cRequestsActive > RT_ELEMENTS(apReqs))
1636 cReqsWait = RT_ELEMENTS(apReqs);
1637 else
1638 cReqsWait = pAioMgr->cRequestsActive;
1639
1640 LogFlow(("Waiting for %d of %d tasks to complete\n", 1, cReqsWait));
1641
1642 rc = RTFileAioCtxWait(pAioMgr->hAioCtx,
1643 1,
1644 RT_INDEFINITE_WAIT, apReqs,
1645 cReqsWait, &cReqsCompleted);
1646 if (RT_FAILURE(rc) && (rc != VERR_INTERRUPTED))
1647 CHECK_RC(pAioMgr, rc);
1648
1649 LogFlow(("%d tasks completed\n", cReqsCompleted));
1650
1651 for (uint32_t i = 0; i < cReqsCompleted; i++)
1652 pdmacFileAioMgrNormalReqComplete(pAioMgr, apReqs[i]);
1653
1654 /* Check for an external blocking event before we go to sleep again. */
1655 if (pAioMgr->fBlockingEventPending)
1656 {
1657 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
1658 CHECK_RC(pAioMgr, rc);
1659 }
1660
1661 /* Update load statistics. */
1662 uint64_t uMillisCurr = RTTimeMilliTS();
1663 if (uMillisCurr > uMillisEnd)
1664 {
1665 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointCurr = pAioMgr->pEndpointsHead;
1666
1667 /* Calculate timespan. */
1668 uMillisCurr -= uMillisEnd;
1669
1670 while (pEndpointCurr)
1671 {
1672 pEndpointCurr->AioMgr.cReqsPerSec = pEndpointCurr->AioMgr.cReqsProcessed / (uMillisCurr + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD);
1673 pEndpointCurr->AioMgr.cReqsProcessed = 0;
1674 pEndpointCurr = pEndpointCurr->AioMgr.pEndpointNext;
1675 }
1676
1677 /* Set new update interval */
1678 uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
1679 }
1680 }
1681 else
1682 {
1683 /*
1684 * Bandwidth limit reached for all endpoints.
1685 * Yield and wait until we have enough resources again.
1686 */
1687 RTThreadYield();
1688 }
1689
1690 /* Check endpoints for new requests. */
1691 if (pAioMgr->enmState != PDMACEPFILEMGRSTATE_GROWING)
1692 {
1693 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1694 CHECK_RC(pAioMgr, rc);
1695 }
1696 } /* while requests are active. */
1697
1698 if (pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING)
1699 {
1700 rc = pdmacFileAioMgrNormalGrow(pAioMgr);
1701 AssertRC(rc);
1702 Assert(pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING);
1703
1704 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1705 CHECK_RC(pAioMgr, rc);
1706 }
1707 } /* if still running */
1708 } /* while running */
1709
1710 LogFlowFunc(("rc=%Rrc\n", rc));
1711 return rc;
1712}
1713
1714#undef CHECK_RC
1715
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette