VirtualBox

source: vbox/trunk/src/VBox/VMM/PDMAsyncCompletionFileNormal.cpp@ 29450

Last change on this file since 29450 was 29450, checked in by vboxsync, 15 years ago

AsyncCompletion: Fix parameters for RTFileAioCtxWait

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.2 KB
Line 
1/* $Id: PDMAsyncCompletionFileNormal.cpp 29450 2010-05-13 15:35:35Z vboxsync $ */
2/** @file
3 * PDM Async I/O - Transport data asynchronous in R3 using EMT.
4 * Async File I/O manager.
5 */
6
7/*
8 * Copyright (C) 2006-2008 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
19#include <iprt/types.h>
20#include <iprt/asm.h>
21#include <iprt/file.h>
22#include <iprt/mem.h>
23#include <iprt/string.h>
24#include <iprt/assert.h>
25#include <VBox/log.h>
26
27#include "PDMAsyncCompletionFileInternal.h"
28
29/** The update period for the I/O load statistics in ms. */
30#define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 1000
31/** Maximum number of requests a manager will handle. */
32#define PDMACEPFILEMGR_REQS_STEP 512
33
34/*******************************************************************************
35* Internal functions *
36*******************************************************************************/
37static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
38 PPDMACEPFILEMGR pAioMgr,
39 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint);
40
41static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr,
42 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
43 PPDMACFILERANGELOCK pRangeLock);
44
45int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr)
46{
47 int rc = VINF_SUCCESS;
48
49 pAioMgr->cRequestsActiveMax = PDMACEPFILEMGR_REQS_STEP;
50
51 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS);
52 if (rc == VERR_OUT_OF_RANGE)
53 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, pAioMgr->cRequestsActiveMax);
54
55 if (RT_SUCCESS(rc))
56 {
57 /* Initialize request handle array. */
58 pAioMgr->iFreeEntry = 0;
59 pAioMgr->cReqEntries = pAioMgr->cRequestsActiveMax;
60 pAioMgr->pahReqsFree = (RTFILEAIOREQ *)RTMemAllocZ(pAioMgr->cReqEntries * sizeof(RTFILEAIOREQ));
61
62 if (pAioMgr->pahReqsFree)
63 {
64 /* Create the range lock memcache. */
65 rc = RTMemCacheCreate(&pAioMgr->hMemCacheRangeLocks, sizeof(PDMACFILERANGELOCK),
66 0, UINT32_MAX, NULL, NULL, NULL, 0);
67 if (RT_SUCCESS(rc))
68 return VINF_SUCCESS;
69
70 RTMemFree(pAioMgr->pahReqsFree);
71 }
72 else
73 {
74 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
75 rc = VERR_NO_MEMORY;
76 }
77 }
78
79 return rc;
80}
81
82void pdmacFileAioMgrNormalDestroy(PPDMACEPFILEMGR pAioMgr)
83{
84 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
85
86 while (pAioMgr->iFreeEntry > 0)
87 {
88 pAioMgr->iFreeEntry--;
89 Assert(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] != NIL_RTFILEAIOREQ);
90 RTFileAioReqDestroy(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry]);
91 }
92
93 RTMemFree(pAioMgr->pahReqsFree);
94 RTMemCacheDestroy(pAioMgr->hMemCacheRangeLocks);
95}
96
97/**
98 * Sorts the endpoint list with insertion sort.
99 */
100static void pdmacFileAioMgrNormalEndpointsSortByLoad(PPDMACEPFILEMGR pAioMgr)
101{
102 PPDMASYNCCOMPLETIONENDPOINTFILE pEpPrev, pEpCurr, pEpNextToSort;
103
104 pEpPrev = pAioMgr->pEndpointsHead;
105 pEpCurr = pEpPrev->AioMgr.pEndpointNext;
106
107 while (pEpCurr)
108 {
109 /* Remember the next element to sort because the list might change. */
110 pEpNextToSort = pEpCurr->AioMgr.pEndpointNext;
111
112 /* Unlink the current element from the list. */
113 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
114 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
115
116 if (pPrev)
117 pPrev->AioMgr.pEndpointNext = pNext;
118 else
119 pAioMgr->pEndpointsHead = pNext;
120
121 if (pNext)
122 pNext->AioMgr.pEndpointPrev = pPrev;
123
124 /* Go back until we reached the place to insert the current endpoint into. */
125 while (pEpPrev && (pEpPrev->AioMgr.cReqsPerSec < pEpCurr->AioMgr.cReqsPerSec))
126 pEpPrev = pEpPrev->AioMgr.pEndpointPrev;
127
128 /* Link the endpoint into the list. */
129 if (pEpPrev)
130 pNext = pEpPrev->AioMgr.pEndpointNext;
131 else
132 pNext = pAioMgr->pEndpointsHead;
133
134 pEpCurr->AioMgr.pEndpointNext = pNext;
135 pEpCurr->AioMgr.pEndpointPrev = pEpPrev;
136
137 if (pNext)
138 pNext->AioMgr.pEndpointPrev = pEpCurr;
139
140 if (pEpPrev)
141 pEpPrev->AioMgr.pEndpointNext = pEpCurr;
142 else
143 pAioMgr->pEndpointsHead = pEpCurr;
144
145 pEpCurr = pEpNextToSort;
146 }
147
148#ifdef DEBUG
149 /* Validate sorting alogrithm */
150 unsigned cEndpoints = 0;
151 pEpCurr = pAioMgr->pEndpointsHead;
152
153 AssertMsg(pEpCurr, ("No endpoint in the list?\n"));
154 AssertMsg(!pEpCurr->AioMgr.pEndpointPrev, ("First element in the list points to previous element\n"));
155
156 while (pEpCurr)
157 {
158 cEndpoints++;
159
160 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
161 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
162
163 Assert(!pNext || pNext->AioMgr.cReqsPerSec <= pEpCurr->AioMgr.cReqsPerSec);
164 Assert(!pPrev || pPrev->AioMgr.cReqsPerSec >= pEpCurr->AioMgr.cReqsPerSec);
165
166 pEpCurr = pNext;
167 }
168
169 AssertMsg(cEndpoints == pAioMgr->cEndpoints, ("Endpoints lost during sort!\n"));
170
171#endif
172}
173
174/**
175 * Removes an endpoint from the currently assigned manager.
176 *
177 * @returns TRUE if there are still requests pending on the current manager for this endpoint.
178 * FALSE otherwise.
179 * @param pEndpointRemove The endpoint to remove.
180 */
181static bool pdmacFileAioMgrNormalRemoveEndpoint(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove)
182{
183 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev;
184 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointRemove->AioMgr.pEndpointNext;
185 PPDMACEPFILEMGR pAioMgr = pEndpointRemove->pAioMgr;
186
187 pAioMgr->cEndpoints--;
188
189 if (pPrev)
190 pPrev->AioMgr.pEndpointNext = pNext;
191 else
192 pAioMgr->pEndpointsHead = pNext;
193
194 if (pNext)
195 pNext->AioMgr.pEndpointPrev = pPrev;
196
197 /* Make sure that there is no request pending on this manager for the endpoint. */
198 if (!pEndpointRemove->AioMgr.cRequestsActive)
199 {
200 Assert(!pEndpointRemove->pFlushReq);
201
202 /* Reopen the file so that the new endpoint can reassociate with the file */
203 RTFileClose(pEndpointRemove->File);
204 int rc = RTFileOpen(&pEndpointRemove->File, pEndpointRemove->Core.pszUri, pEndpointRemove->fFlags);
205 AssertRC(rc);
206 return false;
207 }
208
209 return true;
210}
211
212static bool pdmacFileAioMgrNormalIsBalancePossible(PPDMACEPFILEMGR pAioMgr)
213{
214 /* Balancing doesn't make sense with only one endpoint. */
215 if (pAioMgr->cEndpoints == 1)
216 return false;
217
218 /* Doesn't make sens to move endpoints if only one produces the whole load */
219 unsigned cEndpointsWithLoad = 0;
220
221 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead;
222
223 while (pCurr)
224 {
225 if (pCurr->AioMgr.cReqsPerSec)
226 cEndpointsWithLoad++;
227
228 pCurr = pCurr->AioMgr.pEndpointNext;
229 }
230
231 return (cEndpointsWithLoad > 1);
232}
233
234/**
235 * Creates a new I/O manager and spreads the I/O load of the endpoints
236 * between the given I/O manager and the new one.
237 *
238 * @returns nothing.
239 * @param pAioMgr The I/O manager with high I/O load.
240 */
241static void pdmacFileAioMgrNormalBalanceLoad(PPDMACEPFILEMGR pAioMgr)
242{
243 PPDMACEPFILEMGR pAioMgrNew = NULL;
244 int rc = VINF_SUCCESS;
245
246 /*
247 * Check if balancing would improve the situation.
248 */
249 if (pdmacFileAioMgrNormalIsBalancePossible(pAioMgr))
250 {
251 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
252
253 rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgrNew, PDMACEPFILEMGRTYPE_ASYNC);
254 if (RT_SUCCESS(rc))
255 {
256 /* We will sort the list by request count per second. */
257 pdmacFileAioMgrNormalEndpointsSortByLoad(pAioMgr);
258
259 /* Now move some endpoints to the new manager. */
260 unsigned cReqsHere = pAioMgr->pEndpointsHead->AioMgr.cReqsPerSec;
261 unsigned cReqsOther = 0;
262 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead->AioMgr.pEndpointNext;
263
264 while (pCurr)
265 {
266 if (cReqsHere <= cReqsOther)
267 {
268 /*
269 * The other manager has more requests to handle now.
270 * We will keep the current endpoint.
271 */
272 Log(("Keeping endpoint %#p{%s} with %u reqs/s\n", pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
273 cReqsHere += pCurr->AioMgr.cReqsPerSec;
274 pCurr = pCurr->AioMgr.pEndpointNext;
275 }
276 else
277 {
278 /* Move to other endpoint. */
279 Log(("Moving endpoint %#p{%s} with %u reqs/s to other manager\n", pCurr, pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
280 cReqsOther += pCurr->AioMgr.cReqsPerSec;
281
282 PPDMASYNCCOMPLETIONENDPOINTFILE pMove = pCurr;
283
284 pCurr = pCurr->AioMgr.pEndpointNext;
285
286 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pMove);
287
288 if (fReqsPending)
289 {
290 pMove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
291 pMove->AioMgr.fMoving = true;
292 pMove->AioMgr.pAioMgrDst = pAioMgrNew;
293 }
294 else
295 {
296 pMove->AioMgr.fMoving = false;
297 pMove->AioMgr.pAioMgrDst = NULL;
298 pdmacFileAioMgrAddEndpoint(pAioMgrNew, pMove);
299 }
300 }
301 }
302 }
303 else
304 {
305 /* Don't process further but leave a log entry about reduced performance. */
306 LogRel(("AIOMgr: Could not create new I/O manager (rc=%Rrc). Expect reduced performance\n", rc));
307 }
308 }
309 else
310 Log(("AIOMgr: Load balancing would not improve anything\n"));
311}
312
313/**
314 * Increase the maximum number of active requests for the given I/O manager.
315 *
316 * @returns VBox status code.
317 * @param pAioMgr The I/O manager to grow.
318 */
319static int pdmacFileAioMgrNormalGrow(PPDMACEPFILEMGR pAioMgr)
320{
321 int rc = VINF_SUCCESS;
322 RTFILEAIOCTX hAioCtxNew = NIL_RTFILEAIOCTX;
323
324 LogFlowFunc(("pAioMgr=%#p\n", pAioMgr));
325
326 AssertMsg( pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING
327 && !pAioMgr->cRequestsActive,
328 ("Invalid state of the I/O manager\n"));
329
330#ifdef RT_OS_WINDOWS
331 /*
332 * Reopen the files of all assigned endpoints first so we can assign them to the new
333 * I/O context.
334 */
335 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead;
336
337 while (pCurr)
338 {
339 RTFileClose(pCurr->File);
340 rc = RTFileOpen(&pCurr->File, pCurr->Core.pszUri, pCurr->fFlags);
341 AssertRC(rc);
342
343 pCurr = pCurr->AioMgr.pEndpointNext;
344 }
345#endif
346
347 /* Create the new bigger context. */
348 pAioMgr->cRequestsActiveMax += PDMACEPFILEMGR_REQS_STEP;
349
350 rc = RTFileAioCtxCreate(&hAioCtxNew, RTFILEAIO_UNLIMITED_REQS);
351 if (rc == VERR_OUT_OF_RANGE)
352 rc = RTFileAioCtxCreate(&hAioCtxNew, pAioMgr->cRequestsActiveMax);
353
354 if (RT_SUCCESS(rc))
355 {
356 /* Close the old context. */
357 rc = RTFileAioCtxDestroy(pAioMgr->hAioCtx);
358 AssertRC(rc);
359
360 pAioMgr->hAioCtx = hAioCtxNew;
361
362 /* Create a new I/O task handle array */
363 uint32_t cReqEntriesNew = pAioMgr->cRequestsActiveMax + 1;
364 RTFILEAIOREQ *pahReqNew = (RTFILEAIOREQ *)RTMemAllocZ(cReqEntriesNew * sizeof(RTFILEAIOREQ));
365
366 if (pahReqNew)
367 {
368 /* Copy the cached request handles. */
369 for (uint32_t iReq = 0; iReq < pAioMgr->cReqEntries; iReq++)
370 pahReqNew[iReq] = pAioMgr->pahReqsFree[iReq];
371
372 RTMemFree(pAioMgr->pahReqsFree);
373 pAioMgr->pahReqsFree = pahReqNew;
374 pAioMgr->cReqEntries = cReqEntriesNew;
375 LogFlowFunc(("I/O manager increased to handle a maximum of %u requests\n",
376 pAioMgr->cRequestsActiveMax));
377 }
378 else
379 rc = VERR_NO_MEMORY;
380 }
381
382#ifdef RT_OS_WINDOWS
383 /* Assign the file to the new context. */
384 pCurr = pAioMgr->pEndpointsHead;
385
386 while (pCurr)
387 {
388 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pCurr->File);
389 AssertRC(rc);
390
391 pCurr = pCurr->AioMgr.pEndpointNext;
392 }
393#endif
394
395 if (RT_FAILURE(rc))
396 {
397 LogFlow(("Increasing size of the I/O manager failed with rc=%Rrc\n", rc));
398 pAioMgr->cRequestsActiveMax -= PDMACEPFILEMGR_REQS_STEP;
399 }
400
401 pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
402 LogFlowFunc(("returns rc=%Rrc\n", rc));
403
404 return rc;
405}
406
407/**
408 * Checks if a given status code is fatal.
409 * Non fatal errors can be fixed by migrating the endpoint to a
410 * failsafe manager.
411 *
412 * @returns true If the error is fatal and migrating to a failsafe manager doesn't help
413 * false If the error can be fixed by a migration. (image on NFS disk for example)
414 * @param rcReq The status code to check.
415 */
416DECLINLINE(bool) pdmacFileAioMgrNormalRcIsFatal(int rcReq)
417{
418 return rcReq == VERR_DEV_IO_ERROR
419 || rcReq == VERR_FILE_IO_ERROR
420 || rcReq == VERR_DISK_IO_ERROR
421 || rcReq == VERR_DISK_FULL
422 || rcReq == VERR_FILE_TOO_BIG;
423}
424
425/**
426 * Error handler which will create the failsafe managers and destroy the failed I/O manager.
427 *
428 * @returns VBox status code
429 * @param pAioMgr The I/O manager the error ocurred on.
430 * @param rc The error code.
431 */
432static int pdmacFileAioMgrNormalErrorHandler(PPDMACEPFILEMGR pAioMgr, int rc, RT_SRC_POS_DECL)
433{
434 LogRel(("AIOMgr: I/O manager %#p encountered a critical error (rc=%Rrc) during operation. Falling back to failsafe mode. Expect reduced performance\n",
435 pAioMgr, rc));
436 LogRel(("AIOMgr: Error happened in %s:(%u){%s}\n", RT_SRC_POS_ARGS));
437 LogRel(("AIOMgr: Please contact the product vendor\n"));
438
439 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
440
441 pAioMgr->enmState = PDMACEPFILEMGRSTATE_FAULT;
442 ASMAtomicWriteU32((volatile uint32_t *)&pEpClassFile->enmMgrTypeOverride, PDMACEPFILEMGRTYPE_SIMPLE);
443
444 AssertMsgFailed(("Implement\n"));
445 return VINF_SUCCESS;
446}
447
448/**
449 * Put a list of tasks in the pending request list of an endpoint.
450 */
451DECLINLINE(void) pdmacFileAioMgrEpAddTaskList(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTaskHead)
452{
453 /* Add the rest of the tasks to the pending list */
454 if (!pEndpoint->AioMgr.pReqsPendingHead)
455 {
456 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
457 pEndpoint->AioMgr.pReqsPendingHead = pTaskHead;
458 }
459 else
460 {
461 Assert(pEndpoint->AioMgr.pReqsPendingTail);
462 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTaskHead;
463 }
464
465 /* Update the tail. */
466 while (pTaskHead->pNext)
467 pTaskHead = pTaskHead->pNext;
468
469 pEndpoint->AioMgr.pReqsPendingTail = pTaskHead;
470 pTaskHead->pNext = NULL;
471}
472
473/**
474 * Put one task in the pending request list of an endpoint.
475 */
476DECLINLINE(void) pdmacFileAioMgrEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask)
477{
478 /* Add the rest of the tasks to the pending list */
479 if (!pEndpoint->AioMgr.pReqsPendingHead)
480 {
481 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
482 pEndpoint->AioMgr.pReqsPendingHead = pTask;
483 }
484 else
485 {
486 Assert(pEndpoint->AioMgr.pReqsPendingTail);
487 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTask;
488 }
489
490 pEndpoint->AioMgr.pReqsPendingTail = pTask;
491 pTask->pNext = NULL;
492}
493
494/**
495 * Allocates a async I/O request.
496 *
497 * @returns Handle to the request.
498 * @param pAioMgr The I/O manager.
499 */
500static RTFILEAIOREQ pdmacFileAioMgrNormalRequestAlloc(PPDMACEPFILEMGR pAioMgr)
501{
502 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
503
504 /* Get a request handle. */
505 if (pAioMgr->iFreeEntry > 0)
506 {
507 pAioMgr->iFreeEntry--;
508 hReq = pAioMgr->pahReqsFree[pAioMgr->iFreeEntry];
509 pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] = NIL_RTFILEAIOREQ;
510 Assert(hReq != NIL_RTFILEAIOREQ);
511 }
512 else
513 {
514 int rc = RTFileAioReqCreate(&hReq);
515 AssertRC(rc);
516 }
517
518 return hReq;
519}
520
521/**
522 * Frees a async I/O request handle.
523 *
524 * @returns nothing.
525 * @param pAioMgr The I/O manager.
526 * @param hReq The I/O request handle to free.
527 */
528static void pdmacFileAioMgrNormalRequestFree(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq)
529{
530 Assert(pAioMgr->iFreeEntry < pAioMgr->cReqEntries);
531 Assert(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] == NIL_RTFILEAIOREQ);
532
533 pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] = hReq;
534 pAioMgr->iFreeEntry++;
535}
536
537/**
538 * Wrapper around RTFIleAioCtxSubmit() which is also doing error handling.
539 */
540static int pdmacFileAioMgrNormalReqsEnqueue(PPDMACEPFILEMGR pAioMgr,
541 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
542 PRTFILEAIOREQ pahReqs, unsigned cReqs)
543{
544 int rc;
545
546 pAioMgr->cRequestsActive += cReqs;
547 pEndpoint->AioMgr.cRequestsActive += cReqs;
548
549 LogFlow(("Enqueuing %d requests. I/O manager has a total of %d active requests now\n", cReqs, pAioMgr->cRequestsActive));
550 LogFlow(("Endpoint has a total of %d active requests now\n", pEndpoint->AioMgr.cRequestsActive));
551
552 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, pahReqs, cReqs);
553 if (RT_FAILURE(rc))
554 {
555 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
556 unsigned cReqsResubmit = 0;
557 RTFILEAIOREQ ahReqsResubmit[20];
558
559 /*
560 * We run out of resources.
561 * Need to check which requests got queued
562 * and put the rest on the pending list again.
563 */
564 for (size_t i = 0; i < cReqs; i++)
565 {
566 int rcReq = RTFileAioReqGetRC(pahReqs[i], NULL);
567
568 if (rcReq != VERR_FILE_AIO_IN_PROGRESS)
569 {
570 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(pahReqs[i]);
571
572 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
573 {
574 /* Mark as not supported. */
575 if (rcReq != VERR_FILE_AIO_NOT_SUBMITTED)
576 {
577 LogFlow(("Async flushes are not supported for this endpoint, disabling\n"));
578 pEndpoint->fAsyncFlushSupported = false;
579 pdmacFileAioMgrNormalRequestFree(pAioMgr, pahReqs[i]);
580 rc = VINF_SUCCESS;
581 }
582 else
583 {
584 AssertMsg(rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES, ("Flush wasn't submitted but we are not out of ressources\n"));
585 /* Clear the pending flush */
586 pdmacFileAioMgrEpAddTask(pEndpoint, pTask);
587 Assert(pEndpoint->pFlushReq == pTask);
588 pEndpoint->pFlushReq = NULL;
589 }
590 }
591 else
592 {
593 AssertMsg(rcReq == VERR_FILE_AIO_NOT_SUBMITTED,
594 ("Request returned unexpected return code: rc=%Rrc\n", rcReq));
595
596 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
597 {
598 PPDMACTASKFILE pTasksWaiting;
599
600 pdmacFileAioMgrNormalRequestFree(pAioMgr, pahReqs[i]);
601
602 if (pTask->cbBounceBuffer)
603 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
604
605 pTask->fPrefetch = false;
606 pTask->cbBounceBuffer = 0;
607
608 /* Free the lock and process pending tasks if neccessary */
609 pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
610
611 pdmacFileAioMgrEpAddTask(pEndpoint, pTask);
612 if (pTasksWaiting)
613 pdmacFileAioMgrEpAddTaskList(pEndpoint, pTasksWaiting);
614 }
615 else
616 {
617 ahReqsResubmit[cReqsResubmit] = pahReqs[i];
618 cReqsResubmit++;
619 }
620 }
621
622 pEndpoint->AioMgr.cRequestsActive--;
623 pAioMgr->cRequestsActive--;
624
625 if (cReqsResubmit == RT_ELEMENTS(ahReqsResubmit))
626 {
627 int rc2 = RTFileAioCtxSubmit(pAioMgr->hAioCtx, ahReqsResubmit, cReqsResubmit);
628 AssertRC(rc2);
629 cReqsResubmit = 0;
630 }
631 }
632
633 /* Resubmit tasks. */
634 if (cReqsResubmit)
635 {
636 int rc2 = RTFileAioCtxSubmit(pAioMgr->hAioCtx, ahReqsResubmit, cReqsResubmit);
637 AssertRC(rc2);
638 cReqsResubmit = 0;
639 }
640 else if ( pEndpoint->pFlushReq
641 && !pAioMgr->cRequestsActive
642 && !pEndpoint->fAsyncFlushSupported)
643 {
644 /*
645 * Complete a pending flush if we don't have requests enqueued and the host doesn't support
646 * the async flush API.
647 * Happens only if this we just noticed that this is not supported
648 * and the only active request was a flush.
649 */
650 PPDMACTASKFILE pFlush = pEndpoint->pFlushReq;
651 pEndpoint->pFlushReq = NULL;
652 pFlush->pfnCompleted(pFlush, pFlush->pvUser, VINF_SUCCESS);
653 pdmacFileTaskFree(pEndpoint, pFlush);
654 }
655 }
656
657 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
658 {
659 pAioMgr->cRequestsActiveMax = pAioMgr->cRequestsActive;
660
661 /* Print an entry in the release log */
662 if (RT_UNLIKELY(!pEpClass->fOutOfResourcesWarningPrinted))
663 {
664 pEpClass->fOutOfResourcesWarningPrinted = true;
665 LogRel(("AIOMgr: Host limits number of active IO requests to %u. Expect a performance impact.\n",
666 pAioMgr->cRequestsActive));
667 }
668 }
669
670 LogFlow(("Removed requests. I/O manager has a total of %u active requests now\n", pAioMgr->cRequestsActive));
671 LogFlow(("Endpoint has a total of %u active requests now\n", pEndpoint->AioMgr.cRequestsActive));
672 }
673
674 return rc;
675}
676
677static bool pdmacFileAioMgrNormalIsRangeLocked(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
678 RTFOFF offStart, size_t cbRange,
679 PPDMACTASKFILE pTask)
680{
681 PPDMACFILERANGELOCK pRangeLock = NULL; /** < Range lock */
682
683 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
684 || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ,
685 ("Invalid task type %d\n", pTask->enmTransferType));
686
687 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetRangeGet(pEndpoint->AioMgr.pTreeRangesLocked, offStart);
688 if (!pRangeLock)
689 {
690 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetGetBestFit(pEndpoint->AioMgr.pTreeRangesLocked, offStart, true);
691 /* Check if we intersect with the range. */
692 if ( !pRangeLock
693 || !( (pRangeLock->Core.Key) <= (offStart + (RTFOFF)cbRange - 1)
694 && (pRangeLock->Core.KeyLast) >= offStart))
695 {
696 pRangeLock = NULL; /* False alarm */
697 }
698 }
699
700 /* Check whether we have one of the situations explained below */
701 if ( pRangeLock
702#if 0 /** @todo: later. For now we will just block all requests if they interfere */
703 && ( (pRangeLock->fReadLock && pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
704 || (!pRangeLock->fReadLock)
705#endif
706 )
707 {
708 /* Add to the list. */
709 pTask->pNext = NULL;
710
711 if (!pRangeLock->pWaitingTasksHead)
712 {
713 Assert(!pRangeLock->pWaitingTasksTail);
714 pRangeLock->pWaitingTasksHead = pTask;
715 pRangeLock->pWaitingTasksTail = pTask;
716 }
717 else
718 {
719 AssertPtr(pRangeLock->pWaitingTasksTail);
720 pRangeLock->pWaitingTasksTail->pNext = pTask;
721 pRangeLock->pWaitingTasksTail = pTask;
722 }
723 return true;
724 }
725
726 return false;
727}
728
729static int pdmacFileAioMgrNormalRangeLock(PPDMACEPFILEMGR pAioMgr,
730 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
731 RTFOFF offStart, size_t cbRange,
732 PPDMACTASKFILE pTask)
733{
734 AssertMsg(!pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbRange, pTask),
735 ("Range is already locked offStart=%RTfoff cbRange=%u\n",
736 offStart, cbRange));
737
738 PPDMACFILERANGELOCK pRangeLock = (PPDMACFILERANGELOCK)RTMemCacheAlloc(pAioMgr->hMemCacheRangeLocks);
739 if (!pRangeLock)
740 return VERR_NO_MEMORY;
741
742 /* Init the lock. */
743 pRangeLock->Core.Key = offStart;
744 pRangeLock->Core.KeyLast = offStart + cbRange - 1;
745 pRangeLock->cRefs = 1;
746 pRangeLock->fReadLock = pTask->enmTransferType == PDMACTASKFILETRANSFER_READ;
747 pRangeLock->pWaitingTasksHead = NULL;
748 pRangeLock->pWaitingTasksTail = NULL;
749
750 bool fInserted = RTAvlrFileOffsetInsert(pEndpoint->AioMgr.pTreeRangesLocked, &pRangeLock->Core);
751 AssertMsg(fInserted, ("Range lock was not inserted!\n"));
752
753 /* Let the task point to its lock. */
754 pTask->pRangeLock = pRangeLock;
755
756 return VINF_SUCCESS;
757}
758
759static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr,
760 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
761 PPDMACFILERANGELOCK pRangeLock)
762{
763 PPDMACTASKFILE pTasksWaitingHead;
764
765 AssertPtr(pRangeLock);
766 Assert(pRangeLock->cRefs == 1);
767
768 RTAvlrFileOffsetRemove(pEndpoint->AioMgr.pTreeRangesLocked, pRangeLock->Core.Key);
769 pTasksWaitingHead = pRangeLock->pWaitingTasksHead;
770 pRangeLock->pWaitingTasksHead = NULL;
771 pRangeLock->pWaitingTasksTail = NULL;
772 RTMemCacheFree(pAioMgr->hMemCacheRangeLocks, pRangeLock);
773
774 return pTasksWaitingHead;
775}
776
777static int pdmacFileAioMgrNormalTaskPrepareBuffered(PPDMACEPFILEMGR pAioMgr,
778 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
779 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
780{
781 int rc = VINF_SUCCESS;
782 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
783 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
784 void *pvBuf = pTask->DataSeg.pvSeg;
785
786 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
787 || (uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) <= pEndpoint->cbFile,
788 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
789 pTask->Off, pTask->DataSeg.cbSeg, pEndpoint->cbFile));
790
791 pTask->fPrefetch = false;
792 pTask->cbBounceBuffer = 0;
793
794 /*
795 * Before we start to setup the request we have to check whether there is a task
796 * already active which range intersects with ours. We have to defer execution
797 * of this task in two cases:
798 * - The pending task is a write and the current is either read or write
799 * - The pending task is a read and the current task is a write task.
800 *
801 * To check whether a range is currently "locked" we use the AVL tree where every pending task
802 * is stored by its file offset range. The current task will be added to the active task
803 * and will be executed when the active one completes. (The method below
804 * which checks whether a range is already used will add the task)
805 *
806 * This is neccessary because of the requirement to align all requests to a 512 boundary
807 * which is enforced by the host OS (Linux and Windows atm). It is possible that
808 * we have to process unaligned tasks and need to align them using bounce buffers.
809 * While the data is fetched from the file another request might arrive writing to
810 * the same range. This will result in data corruption if both are executed concurrently.
811 */
812 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, pTask->Off, pTask->DataSeg.cbSeg, pTask);
813
814 if (!fLocked)
815 {
816 /* Get a request handle. */
817 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
818 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
819
820 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
821 {
822 /* Grow the file if needed. */
823 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
824 {
825 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
826 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
827 }
828
829 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
830 pTask->Off, pTask->DataSeg.pvSeg,
831 pTask->DataSeg.cbSeg, pTask);
832 }
833 else
834 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File,
835 pTask->Off, pTask->DataSeg.pvSeg,
836 pTask->DataSeg.cbSeg, pTask);
837 AssertRC(rc);
838
839 rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, pTask->Off,
840 pTask->DataSeg.cbSeg,
841 pTask);
842
843 if (RT_SUCCESS(rc))
844 *phReq = hReq;
845 }
846 else
847 LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
848
849 return rc;
850}
851
852static int pdmacFileAioMgrNormalTaskPrepareNonBuffered(PPDMACEPFILEMGR pAioMgr,
853 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
854 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
855{
856 int rc = VINF_SUCCESS;
857 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
858 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
859 void *pvBuf = pTask->DataSeg.pvSeg;
860
861 /*
862 * Check if the alignment requirements are met.
863 * Offset, transfer size and buffer address
864 * need to be on a 512 boundary.
865 */
866 RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
867 size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg + (pTask->Off - offStart), 512);
868 PDMACTASKFILETRANSFER enmTransferType = pTask->enmTransferType;
869
870 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
871 || (uint64_t)(offStart + cbToTransfer) <= pEndpoint->cbFile,
872 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
873 offStart, cbToTransfer, pEndpoint->cbFile));
874
875 pTask->fPrefetch = false;
876
877 /*
878 * Before we start to setup the request we have to check whether there is a task
879 * already active which range intersects with ours. We have to defer execution
880 * of this task in two cases:
881 * - The pending task is a write and the current is either read or write
882 * - The pending task is a read and the current task is a write task.
883 *
884 * To check whether a range is currently "locked" we use the AVL tree where every pending task
885 * is stored by its file offset range. The current task will be added to the active task
886 * and will be executed when the active one completes. (The method below
887 * which checks whether a range is already used will add the task)
888 *
889 * This is neccessary because of the requirement to align all requests to a 512 boundary
890 * which is enforced by the host OS (Linux and Windows atm). It is possible that
891 * we have to process unaligned tasks and need to align them using bounce buffers.
892 * While the data is fetched from the file another request might arrive writing to
893 * the same range. This will result in data corruption if both are executed concurrently.
894 */
895 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbToTransfer, pTask);
896
897 if (!fLocked)
898 {
899 /* Get a request handle. */
900 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
901 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
902
903 if ( RT_UNLIKELY(cbToTransfer != pTask->DataSeg.cbSeg)
904 || RT_UNLIKELY(offStart != pTask->Off)
905 || ((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) != (RTR3UINTPTR)pvBuf))
906 {
907 LogFlow(("Using bounce buffer for task %#p cbToTransfer=%zd cbSeg=%zd offStart=%RTfoff off=%RTfoff\n",
908 pTask, cbToTransfer, pTask->DataSeg.cbSeg, offStart, pTask->Off));
909
910 /* Create bounce buffer. */
911 pTask->cbBounceBuffer = cbToTransfer;
912
913 AssertMsg(pTask->Off >= offStart, ("Overflow in calculation Off=%llu offStart=%llu\n",
914 pTask->Off, offStart));
915 pTask->offBounceBuffer = pTask->Off - offStart;
916
917 /** @todo: I think we need something like a RTMemAllocAligned method here.
918 * Current assumption is that the maximum alignment is 4096byte
919 * (GPT disk on Windows)
920 * so we can use RTMemPageAlloc here.
921 */
922 pTask->pvBounceBuffer = RTMemPageAlloc(cbToTransfer);
923 if (RT_LIKELY(pTask->pvBounceBuffer))
924 {
925 pvBuf = pTask->pvBounceBuffer;
926
927 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
928 {
929 if ( RT_UNLIKELY(cbToTransfer != pTask->DataSeg.cbSeg)
930 || RT_UNLIKELY(offStart != pTask->Off))
931 {
932 /* We have to fill the buffer first before we can update the data. */
933 LogFlow(("Prefetching data for task %#p\n", pTask));
934 pTask->fPrefetch = true;
935 enmTransferType = PDMACTASKFILETRANSFER_READ;
936 }
937 else
938 memcpy(pvBuf, pTask->DataSeg.pvSeg, pTask->DataSeg.cbSeg);
939 }
940 }
941 else
942 rc = VERR_NO_MEMORY;
943 }
944 else
945 pTask->cbBounceBuffer = 0;
946
947 if (RT_SUCCESS(rc))
948 {
949 AssertMsg((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) == (RTR3UINTPTR)pvBuf,
950 ("AIO: Alignment restrictions not met! pvBuf=%p uBitmaskAlignment=%p\n", pvBuf, pEpClassFile->uBitmaskAlignment));
951
952 if (enmTransferType == PDMACTASKFILETRANSFER_WRITE)
953 {
954 /* Grow the file if needed. */
955 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
956 {
957 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
958 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
959 }
960
961 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
962 offStart, pvBuf, cbToTransfer, pTask);
963 }
964 else
965 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File,
966 offStart, pvBuf, cbToTransfer, pTask);
967 AssertRC(rc);
968
969 rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, offStart, cbToTransfer, pTask);
970
971 if (RT_SUCCESS(rc))
972 *phReq = hReq;
973 else
974 {
975 /* Cleanup */
976 if (pTask->cbBounceBuffer)
977 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
978 }
979 }
980 }
981 else
982 LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
983
984 return rc;
985}
986
987static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
988 PPDMACEPFILEMGR pAioMgr,
989 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
990{
991 RTFILEAIOREQ apReqs[20];
992 unsigned cRequests = 0;
993 unsigned cMaxRequests = pAioMgr->cRequestsActiveMax - pAioMgr->cRequestsActive;
994 int rc = VINF_SUCCESS;
995
996 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
997 ("Trying to process request lists of a non active endpoint!\n"));
998
999 /* Go through the list and queue the requests until we get a flush request */
1000 while ( pTaskHead
1001 && !pEndpoint->pFlushReq
1002 && (pAioMgr->cRequestsActive + cRequests < pAioMgr->cRequestsActiveMax)
1003 && RT_SUCCESS(rc))
1004 {
1005 PPDMACTASKFILE pCurr = pTaskHead;
1006
1007 if (!pdmacFileBwMgrIsTransferAllowed(pEndpoint->pBwMgr, (uint32_t)pCurr->DataSeg.cbSeg))
1008 {
1009 pAioMgr->fBwLimitReached = true;
1010 break;
1011 }
1012
1013 pTaskHead = pTaskHead->pNext;
1014
1015 pCurr->pNext = NULL;
1016
1017 AssertMsg(VALID_PTR(pCurr->pEndpoint) && (pCurr->pEndpoint == pEndpoint),
1018 ("Endpoints do not match\n"));
1019
1020 switch (pCurr->enmTransferType)
1021 {
1022 case PDMACTASKFILETRANSFER_FLUSH:
1023 {
1024 /* If there is no data transfer request this flush request finished immediately. */
1025 if (pEndpoint->fAsyncFlushSupported)
1026 {
1027 /* Issue a flush to the host. */
1028 RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
1029 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
1030
1031 LogFlow(("Flush request %#p\n", hReq));
1032
1033 rc = RTFileAioReqPrepareFlush(hReq, pEndpoint->File, pCurr);
1034 if (RT_FAILURE(rc))
1035 {
1036 pEndpoint->fAsyncFlushSupported = false;
1037 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1038 rc = VINF_SUCCESS; /* Fake success */
1039 }
1040 else
1041 {
1042 apReqs[cRequests] = hReq;
1043 pEndpoint->AioMgr.cReqsProcessed++;
1044 cRequests++;
1045 }
1046 }
1047
1048 if ( !pEndpoint->AioMgr.cRequestsActive
1049 && !pEndpoint->fAsyncFlushSupported)
1050 {
1051 pCurr->pfnCompleted(pCurr, pCurr->pvUser, VINF_SUCCESS);
1052 pdmacFileTaskFree(pEndpoint, pCurr);
1053 }
1054 else
1055 {
1056 Assert(!pEndpoint->pFlushReq);
1057 pEndpoint->pFlushReq = pCurr;
1058 }
1059 break;
1060 }
1061 case PDMACTASKFILETRANSFER_READ:
1062 case PDMACTASKFILETRANSFER_WRITE:
1063 {
1064 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
1065
1066 if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_BUFFERED)
1067 rc = pdmacFileAioMgrNormalTaskPrepareBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
1068 else if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_NON_BUFFERED)
1069 rc = pdmacFileAioMgrNormalTaskPrepareNonBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
1070 else
1071 AssertMsgFailed(("Invalid backend type %d\n", pEndpoint->enmBackendType));
1072
1073 AssertRC(rc);
1074
1075 LogFlow(("Read/Write request %#p\n", hReq));
1076
1077 if (hReq != NIL_RTFILEAIOREQ)
1078 {
1079 apReqs[cRequests] = hReq;
1080 pEndpoint->AioMgr.cReqsProcessed++;
1081 cRequests++;
1082 }
1083 break;
1084 }
1085 default:
1086 AssertMsgFailed(("Invalid transfer type %d\n", pCurr->enmTransferType));
1087 } /* switch transfer type */
1088
1089 /* Queue the requests if the array is full. */
1090 if (cRequests == RT_ELEMENTS(apReqs))
1091 {
1092 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
1093 cRequests = 0;
1094 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1095 ("Unexpected return code\n"));
1096 }
1097 }
1098
1099 if (cRequests)
1100 {
1101 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
1102 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1103 ("Unexpected return code rc=%Rrc\n", rc));
1104 }
1105
1106 if (pTaskHead)
1107 {
1108 /* Add the rest of the tasks to the pending list */
1109 pdmacFileAioMgrEpAddTaskList(pEndpoint, pTaskHead);
1110
1111 if (RT_UNLIKELY( pAioMgr->cRequestsActiveMax == pAioMgr->cRequestsActive
1112 && !pEndpoint->pFlushReq
1113 && !pAioMgr->fBwLimitReached))
1114 {
1115#if 0
1116 /*
1117 * The I/O manager has no room left for more requests
1118 * but there are still requests to process.
1119 * Create a new I/O manager and let it handle some endpoints.
1120 */
1121 pdmacFileAioMgrNormalBalanceLoad(pAioMgr);
1122#else
1123 /* Grow the I/O manager */
1124 pAioMgr->enmState = PDMACEPFILEMGRSTATE_GROWING;
1125#endif
1126 }
1127 }
1128
1129 /* Insufficient resources are not fatal. */
1130 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
1131 rc = VINF_SUCCESS;
1132
1133 return rc;
1134}
1135
1136/**
1137 * Adds all pending requests for the given endpoint
1138 * until a flush request is encountered or there is no
1139 * request anymore.
1140 *
1141 * @returns VBox status code.
1142 * @param pAioMgr The async I/O manager for the endpoint
1143 * @param pEndpoint The endpoint to get the requests from.
1144 */
1145static int pdmacFileAioMgrNormalQueueReqs(PPDMACEPFILEMGR pAioMgr,
1146 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
1147{
1148 int rc = VINF_SUCCESS;
1149 PPDMACTASKFILE pTasksHead = NULL;
1150
1151 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
1152 ("Trying to process request lists of a non active endpoint!\n"));
1153
1154 Assert(!pEndpoint->pFlushReq);
1155
1156 /* Check the pending list first */
1157 if (pEndpoint->AioMgr.pReqsPendingHead)
1158 {
1159 LogFlow(("Queuing pending requests first\n"));
1160
1161 pTasksHead = pEndpoint->AioMgr.pReqsPendingHead;
1162 /*
1163 * Clear the list as the processing routine will insert them into the list
1164 * again if it gets a flush request.
1165 */
1166 pEndpoint->AioMgr.pReqsPendingHead = NULL;
1167 pEndpoint->AioMgr.pReqsPendingTail = NULL;
1168 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
1169 AssertRC(rc);
1170 }
1171
1172 if (!pEndpoint->pFlushReq && !pEndpoint->AioMgr.pReqsPendingHead)
1173 {
1174 /* Now the request queue. */
1175 pTasksHead = pdmacFileEpGetNewTasks(pEndpoint);
1176 if (pTasksHead)
1177 {
1178 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
1179 AssertRC(rc);
1180 }
1181 }
1182
1183 return rc;
1184}
1185
1186static int pdmacFileAioMgrNormalProcessBlockingEvent(PPDMACEPFILEMGR pAioMgr)
1187{
1188 int rc = VINF_SUCCESS;
1189 bool fNotifyWaiter = false;
1190
1191 LogFlowFunc((": Enter\n"));
1192
1193 Assert(pAioMgr->fBlockingEventPending);
1194
1195 switch (pAioMgr->enmBlockingEvent)
1196 {
1197 case PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT:
1198 {
1199 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointNew = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint);
1200 AssertMsg(VALID_PTR(pEndpointNew), ("Adding endpoint event without a endpoint to add\n"));
1201
1202 pEndpointNew->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE;
1203
1204 pEndpointNew->AioMgr.pEndpointNext = pAioMgr->pEndpointsHead;
1205 pEndpointNew->AioMgr.pEndpointPrev = NULL;
1206 if (pAioMgr->pEndpointsHead)
1207 pAioMgr->pEndpointsHead->AioMgr.pEndpointPrev = pEndpointNew;
1208 pAioMgr->pEndpointsHead = pEndpointNew;
1209
1210 /* Assign the completion point to this file. */
1211 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pEndpointNew->File);
1212 fNotifyWaiter = true;
1213 pAioMgr->cEndpoints++;
1214 break;
1215 }
1216 case PDMACEPFILEAIOMGRBLOCKINGEVENT_REMOVE_ENDPOINT:
1217 {
1218 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint);
1219 AssertMsg(VALID_PTR(pEndpointRemove), ("Removing endpoint event without a endpoint to remove\n"));
1220
1221 pEndpointRemove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
1222 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointRemove);
1223 break;
1224 }
1225 case PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT:
1226 {
1227 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointClose = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint);
1228 AssertMsg(VALID_PTR(pEndpointClose), ("Close endpoint event without a endpoint to close\n"));
1229
1230 if (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1231 {
1232 LogFlowFunc((": Closing endpoint %#p{%s}\n", pEndpointClose, pEndpointClose->Core.pszUri));
1233
1234 /* Make sure all tasks finished. Process the queues a last time first. */
1235 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpointClose);
1236 AssertRC(rc);
1237
1238 pEndpointClose->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING;
1239 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointClose);
1240 }
1241 else if ( (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING)
1242 && (!pEndpointClose->AioMgr.cRequestsActive))
1243 fNotifyWaiter = true;
1244 break;
1245 }
1246 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN:
1247 {
1248 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SHUTDOWN;
1249 if (!pAioMgr->cRequestsActive)
1250 fNotifyWaiter = true;
1251 break;
1252 }
1253 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SUSPEND:
1254 {
1255 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SUSPENDING;
1256 break;
1257 }
1258 case PDMACEPFILEAIOMGRBLOCKINGEVENT_RESUME:
1259 {
1260 pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
1261 fNotifyWaiter = true;
1262 break;
1263 }
1264 default:
1265 AssertReleaseMsgFailed(("Invalid event type %d\n", pAioMgr->enmBlockingEvent));
1266 }
1267
1268 if (fNotifyWaiter)
1269 {
1270 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
1271 pAioMgr->enmBlockingEvent = PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID;
1272
1273 /* Release the waiting thread. */
1274 LogFlow(("Signalling waiter\n"));
1275 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
1276 AssertRC(rc);
1277 }
1278
1279 LogFlowFunc((": Leave\n"));
1280 return rc;
1281}
1282
1283/**
1284 * Checks all endpoints for pending events or new requests.
1285 *
1286 * @returns VBox status code.
1287 * @param pAioMgr The I/O manager handle.
1288 */
1289static int pdmacFileAioMgrNormalCheckEndpoints(PPDMACEPFILEMGR pAioMgr)
1290{
1291 /* Check the assigned endpoints for new tasks if there isn't a flush request active at the moment. */
1292 int rc = VINF_SUCCESS;
1293 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pAioMgr->pEndpointsHead;
1294
1295 pAioMgr->fBwLimitReached = false;
1296
1297 while (pEndpoint)
1298 {
1299 if (!pEndpoint->pFlushReq
1300 && (pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1301 && !pEndpoint->AioMgr.fMoving)
1302 {
1303 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint);
1304 if (RT_FAILURE(rc))
1305 return rc;
1306 }
1307 else if ( !pEndpoint->AioMgr.cRequestsActive
1308 && pEndpoint->enmState != PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1309 {
1310 /* Reopen the file so that the new endpoint can reassociate with the file */
1311 RTFileClose(pEndpoint->File);
1312 rc = RTFileOpen(&pEndpoint->File, pEndpoint->Core.pszUri, pEndpoint->fFlags);
1313 AssertRC(rc);
1314
1315 if (pEndpoint->AioMgr.fMoving)
1316 {
1317 pEndpoint->AioMgr.fMoving = false;
1318 pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1319 }
1320 else
1321 {
1322 Assert(pAioMgr->fBlockingEventPending);
1323 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
1324
1325 /* Release the waiting thread. */
1326 LogFlow(("Signalling waiter\n"));
1327 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
1328 AssertRC(rc);
1329 }
1330 }
1331
1332 pEndpoint = pEndpoint->AioMgr.pEndpointNext;
1333 }
1334
1335 return rc;
1336}
1337
1338static void pdmacFileAioMgrNormalReqComplete(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq)
1339{
1340 int rc = VINF_SUCCESS;
1341 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint;
1342 size_t cbTransfered = 0;
1343 int rcReq = RTFileAioReqGetRC(hReq, &cbTransfered);
1344 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(hReq);
1345 PPDMACTASKFILE pTasksWaiting;
1346
1347 pEndpoint = pTask->pEndpoint;
1348
1349 /*
1350 * It is possible that the request failed on Linux with kernels < 2.6.23
1351 * if the passed buffer was allocated with remap_pfn_range or if the file
1352 * is on an NFS endpoint which does not support async and direct I/O at the same time.
1353 * The endpoint will be migrated to a failsafe manager in case a request fails.
1354 */
1355 if (RT_FAILURE(rcReq))
1356 {
1357 /* Free bounce buffers and the IPRT request. */
1358 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1359
1360 pAioMgr->cRequestsActive--;
1361 pEndpoint->AioMgr.cRequestsActive--;
1362 pEndpoint->AioMgr.cReqsProcessed++;
1363
1364 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
1365 {
1366 LogFlow(("Async flushes are not supported for this endpoint, disabling\n"));
1367 pEndpoint->fAsyncFlushSupported = false;
1368 AssertMsg(pEndpoint->pFlushReq == pTask, ("Failed flush request doesn't match active one\n"));
1369 /* The other method will take over now. */
1370 }
1371 else
1372 {
1373 /* Free the lock and process pending tasks if neccessary */
1374 pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
1375 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksWaiting, pAioMgr, pEndpoint);
1376 AssertRC(rc);
1377
1378 if (pTask->cbBounceBuffer)
1379 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
1380
1381 /*
1382 * Fatal errors are reported to the guest and non-fatal errors
1383 * will cause a migration to the failsafe manager in the hope
1384 * that the error disappears.
1385 */
1386 if (!pdmacFileAioMgrNormalRcIsFatal(rcReq))
1387 {
1388 /* Queue the request on the pending list. */
1389 pTask->pNext = pEndpoint->AioMgr.pReqsPendingHead;
1390 pEndpoint->AioMgr.pReqsPendingHead = pTask;
1391
1392 /* Create a new failsafe manager if neccessary. */
1393 if (!pEndpoint->AioMgr.fMoving)
1394 {
1395 PPDMACEPFILEMGR pAioMgrFailsafe;
1396
1397 LogRel(("%s: Request %#p failed with rc=%Rrc, migrating endpoint %s to failsafe manager.\n",
1398 RTThreadGetName(pAioMgr->Thread), pTask, rcReq, pEndpoint->Core.pszUri));
1399
1400 pEndpoint->AioMgr.fMoving = true;
1401
1402 rc = pdmacFileAioMgrCreate((PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass,
1403 &pAioMgrFailsafe, PDMACEPFILEMGRTYPE_SIMPLE);
1404 AssertRC(rc);
1405
1406 pEndpoint->AioMgr.pAioMgrDst = pAioMgrFailsafe;
1407
1408 /* Update the flags to open the file with. Disable async I/O and enable the host cache. */
1409 pEndpoint->fFlags &= ~(RTFILE_O_ASYNC_IO | RTFILE_O_NO_CACHE);
1410 }
1411
1412 /* If this was the last request for the endpoint migrate it to the new manager. */
1413 if (!pEndpoint->AioMgr.cRequestsActive)
1414 {
1415 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
1416 Assert(!fReqsPending);
1417
1418 rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1419 AssertRC(rc);
1420 }
1421 }
1422 else
1423 {
1424 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1425 pdmacFileTaskFree(pEndpoint, pTask);
1426 }
1427 }
1428 }
1429 else
1430 {
1431 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
1432 {
1433 /* Clear pending flush */
1434 AssertMsg(pEndpoint->pFlushReq == pTask, ("Completed flush request doesn't match active one\n"));
1435 pEndpoint->pFlushReq = NULL;
1436 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1437
1438 pAioMgr->cRequestsActive--;
1439 pEndpoint->AioMgr.cRequestsActive--;
1440 pEndpoint->AioMgr.cReqsProcessed++;
1441
1442 /* Call completion callback */
1443 LogFlow(("Flush task=%#p completed with %Rrc\n", pTask, rcReq));
1444 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1445 pdmacFileTaskFree(pEndpoint, pTask);
1446 }
1447 else
1448 {
1449 /*
1450 * Restart an incomplete transfer.
1451 * This usually means that the request will return an error now
1452 * but to get the cause of the error (disk full, file too big, I/O error, ...)
1453 * the transfer needs to be continued.
1454 */
1455 if (RT_UNLIKELY( cbTransfered < pTask->DataSeg.cbSeg
1456 || ( pTask->cbBounceBuffer
1457 && cbTransfered < pTask->cbBounceBuffer)))
1458 {
1459 RTFOFF offStart;
1460 size_t cbToTransfer;
1461 uint8_t *pbBuf = NULL;
1462
1463 LogFlow(("Restarting incomplete transfer %#p (%zu bytes transfered)\n",
1464 pTask, cbTransfered));
1465 Assert(cbTransfered % 512 == 0);
1466
1467 if (pTask->cbBounceBuffer)
1468 {
1469 AssertPtr(pTask->pvBounceBuffer);
1470 offStart = (pTask->Off & ~((RTFOFF)512-1)) + cbTransfered;
1471 cbToTransfer = pTask->cbBounceBuffer - cbTransfered;
1472 pbBuf = (uint8_t *)pTask->pvBounceBuffer + cbTransfered;
1473 }
1474 else
1475 {
1476 Assert(!pTask->pvBounceBuffer);
1477 offStart = pTask->Off + cbTransfered;
1478 cbToTransfer = pTask->DataSeg.cbSeg - cbTransfered;
1479 pbBuf = (uint8_t *)pTask->DataSeg.pvSeg + cbTransfered;
1480 }
1481
1482 if (pTask->fPrefetch || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
1483 {
1484 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File, offStart,
1485 pbBuf, cbToTransfer, pTask);
1486 }
1487 else
1488 {
1489 AssertMsg(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE,
1490 ("Invalid transfer type\n"));
1491 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File, offStart,
1492 pbBuf, cbToTransfer, pTask);
1493 }
1494
1495 AssertRC(rc);
1496 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, &hReq, 1);
1497 AssertRC(rc);
1498 }
1499 else if (pTask->fPrefetch)
1500 {
1501 Assert(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE);
1502 Assert(pTask->cbBounceBuffer);
1503
1504 memcpy(((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer,
1505 pTask->DataSeg.pvSeg,
1506 pTask->DataSeg.cbSeg);
1507
1508 /* Write it now. */
1509 pTask->fPrefetch = false;
1510 size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg, 512);
1511 RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
1512
1513 /* Grow the file if needed. */
1514 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
1515 {
1516 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
1517 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
1518 }
1519
1520 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
1521 offStart, pTask->pvBounceBuffer, cbToTransfer, pTask);
1522 AssertRC(rc);
1523 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, &hReq, 1);
1524 AssertRC(rc);
1525 }
1526 else
1527 {
1528 if (RT_SUCCESS(rc) && pTask->cbBounceBuffer)
1529 {
1530 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
1531 memcpy(pTask->DataSeg.pvSeg,
1532 ((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer,
1533 pTask->DataSeg.cbSeg);
1534
1535 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
1536 }
1537
1538 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1539
1540 pAioMgr->cRequestsActive--;
1541 pEndpoint->AioMgr.cRequestsActive--;
1542 pEndpoint->AioMgr.cReqsProcessed++;
1543
1544 /* Free the lock and process pending tasks if neccessary */
1545 pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
1546 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksWaiting, pAioMgr, pEndpoint);
1547 AssertRC(rc);
1548
1549 /* Call completion callback */
1550 LogFlow(("Task=%#p completed with %Rrc\n", pTask, rcReq));
1551 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1552 pdmacFileTaskFree(pEndpoint, pTask);
1553
1554 /*
1555 * If there is no request left on the endpoint but a flush request is set
1556 * it completed now and we notify the owner.
1557 * Furthermore we look for new requests and continue.
1558 */
1559 if (!pEndpoint->AioMgr.cRequestsActive && pEndpoint->pFlushReq)
1560 {
1561 /* Call completion callback */
1562 pTask = pEndpoint->pFlushReq;
1563 pEndpoint->pFlushReq = NULL;
1564
1565 AssertMsg(pTask->pEndpoint == pEndpoint, ("Endpoint of the flush request does not match assigned one\n"));
1566
1567 pTask->pfnCompleted(pTask, pTask->pvUser, VINF_SUCCESS);
1568 pdmacFileTaskFree(pEndpoint, pTask);
1569 }
1570 else if (RT_UNLIKELY(!pEndpoint->AioMgr.cRequestsActive && pEndpoint->AioMgr.fMoving))
1571 {
1572 /* If the endpoint is about to be migrated do it now. */
1573 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
1574 Assert(!fReqsPending);
1575
1576 rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1577 AssertRC(rc);
1578 }
1579 }
1580 } /* Not a flush request */
1581 } /* request completed successfully */
1582}
1583
1584/** Helper macro for checking for error codes. */
1585#define CHECK_RC(pAioMgr, rc) \
1586 if (RT_FAILURE(rc)) \
1587 {\
1588 int rc2 = pdmacFileAioMgrNormalErrorHandler(pAioMgr, rc, RT_SRC_POS);\
1589 return rc2;\
1590 }
1591
1592/**
1593 * The normal I/O manager using the RTFileAio* API
1594 *
1595 * @returns VBox status code.
1596 * @param ThreadSelf Handle of the thread.
1597 * @param pvUser Opaque user data.
1598 */
1599int pdmacFileAioMgrNormal(RTTHREAD ThreadSelf, void *pvUser)
1600{
1601 int rc = VINF_SUCCESS;
1602 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser;
1603 uint64_t uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
1604
1605 while ( (pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING)
1606 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING)
1607 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING))
1608 {
1609 if (!pAioMgr->cRequestsActive)
1610 {
1611 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, true);
1612 if (!ASMAtomicReadBool(&pAioMgr->fWokenUp))
1613 rc = RTSemEventWait(pAioMgr->EventSem, RT_INDEFINITE_WAIT);
1614 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, false);
1615 AssertRC(rc);
1616
1617 LogFlow(("Got woken up\n"));
1618 ASMAtomicWriteBool(&pAioMgr->fWokenUp, false);
1619 }
1620
1621 /* Check for an external blocking event first. */
1622 if (pAioMgr->fBlockingEventPending)
1623 {
1624 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
1625 CHECK_RC(pAioMgr, rc);
1626 }
1627
1628 if (RT_LIKELY( pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING
1629 || pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING))
1630 {
1631 /* We got woken up because an endpoint issued new requests. Queue them. */
1632 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1633 CHECK_RC(pAioMgr, rc);
1634
1635 while ( pAioMgr->cRequestsActive
1636 || pAioMgr->fBwLimitReached)
1637 {
1638 if (pAioMgr->cRequestsActive)
1639 {
1640 RTFILEAIOREQ apReqs[20];
1641 uint32_t cReqsCompleted = 0;
1642 size_t cReqsWait;
1643
1644 if (pAioMgr->cRequestsActive > RT_ELEMENTS(apReqs))
1645 cReqsWait = RT_ELEMENTS(apReqs);
1646 else
1647 cReqsWait = pAioMgr->cRequestsActive;
1648
1649 LogFlow(("Waiting for %d of %d tasks to complete\n", 1, cReqsWait));
1650
1651 rc = RTFileAioCtxWait(pAioMgr->hAioCtx,
1652 1,
1653 RT_INDEFINITE_WAIT, apReqs,
1654 cReqsWait, &cReqsCompleted);
1655 if (RT_FAILURE(rc) && (rc != VERR_INTERRUPTED))
1656 CHECK_RC(pAioMgr, rc);
1657
1658 LogFlow(("%d tasks completed\n", cReqsCompleted));
1659
1660 for (uint32_t i = 0; i < cReqsCompleted; i++)
1661 pdmacFileAioMgrNormalReqComplete(pAioMgr, apReqs[i]);
1662
1663 /* Check for an external blocking event before we go to sleep again. */
1664 if (pAioMgr->fBlockingEventPending)
1665 {
1666 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
1667 CHECK_RC(pAioMgr, rc);
1668 }
1669
1670 /* Update load statistics. */
1671 uint64_t uMillisCurr = RTTimeMilliTS();
1672 if (uMillisCurr > uMillisEnd)
1673 {
1674 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointCurr = pAioMgr->pEndpointsHead;
1675
1676 /* Calculate timespan. */
1677 uMillisCurr -= uMillisEnd;
1678
1679 while (pEndpointCurr)
1680 {
1681 pEndpointCurr->AioMgr.cReqsPerSec = pEndpointCurr->AioMgr.cReqsProcessed / (uMillisCurr + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD);
1682 pEndpointCurr->AioMgr.cReqsProcessed = 0;
1683 pEndpointCurr = pEndpointCurr->AioMgr.pEndpointNext;
1684 }
1685
1686 /* Set new update interval */
1687 uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
1688 }
1689 }
1690 else
1691 {
1692 /*
1693 * Bandwidth limit reached for all endpoints.
1694 * Yield and wait until we have enough resources again.
1695 */
1696 RTThreadYield();
1697 }
1698
1699 /* Check endpoints for new requests. */
1700 if (pAioMgr->enmState != PDMACEPFILEMGRSTATE_GROWING)
1701 {
1702 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1703 CHECK_RC(pAioMgr, rc);
1704 }
1705 } /* while requests are active. */
1706
1707 if (pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING)
1708 {
1709 rc = pdmacFileAioMgrNormalGrow(pAioMgr);
1710 AssertRC(rc);
1711 Assert(pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING);
1712
1713 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1714 CHECK_RC(pAioMgr, rc);
1715 }
1716 } /* if still running */
1717 } /* while running */
1718
1719 LogFlowFunc(("rc=%Rrc\n", rc));
1720 return rc;
1721}
1722
1723#undef CHECK_RC
1724
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette