VirtualBox

source: vbox/trunk/src/VBox/VMM/PDMAsyncCompletionFileNormal.cpp@ 29287

Last change on this file since 29287 was 29228, checked in by vboxsync, 15 years ago

AsyncCompletion: Fake success if async flushes are not supported or we will hang on Windows

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.1 KB
Line 
1/* $Id: PDMAsyncCompletionFileNormal.cpp 29228 2010-05-07 17:08:58Z vboxsync $ */
2/** @file
3 * PDM Async I/O - Transport data asynchronous in R3 using EMT.
4 * Async File I/O manager.
5 */
6
7/*
8 * Copyright (C) 2006-2008 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
19#include <iprt/types.h>
20#include <iprt/asm.h>
21#include <iprt/file.h>
22#include <iprt/mem.h>
23#include <iprt/string.h>
24#include <iprt/assert.h>
25#include <VBox/log.h>
26
27#include "PDMAsyncCompletionFileInternal.h"
28
29/** The update period for the I/O load statistics in ms. */
30#define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 1000
31/** Maximum number of requests a manager will handle. */
32#define PDMACEPFILEMGR_REQS_STEP 512
33
34/*******************************************************************************
35* Internal functions *
36*******************************************************************************/
37static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
38 PPDMACEPFILEMGR pAioMgr,
39 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint);
40
41static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr,
42 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
43 PPDMACFILERANGELOCK pRangeLock);
44
45int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr)
46{
47 int rc = VINF_SUCCESS;
48
49 pAioMgr->cRequestsActiveMax = PDMACEPFILEMGR_REQS_STEP;
50
51 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS);
52 if (rc == VERR_OUT_OF_RANGE)
53 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, pAioMgr->cRequestsActiveMax);
54
55 if (RT_SUCCESS(rc))
56 {
57 /* Initialize request handle array. */
58 pAioMgr->iFreeEntry = 0;
59 pAioMgr->cReqEntries = pAioMgr->cRequestsActiveMax;
60 pAioMgr->pahReqsFree = (RTFILEAIOREQ *)RTMemAllocZ(pAioMgr->cReqEntries * sizeof(RTFILEAIOREQ));
61
62 if (pAioMgr->pahReqsFree)
63 {
64 /* Create the range lock memcache. */
65 rc = RTMemCacheCreate(&pAioMgr->hMemCacheRangeLocks, sizeof(PDMACFILERANGELOCK),
66 0, UINT32_MAX, NULL, NULL, NULL, 0);
67 if (RT_SUCCESS(rc))
68 return VINF_SUCCESS;
69
70 RTMemFree(pAioMgr->pahReqsFree);
71 }
72 else
73 {
74 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
75 rc = VERR_NO_MEMORY;
76 }
77 }
78
79 return rc;
80}
81
82void pdmacFileAioMgrNormalDestroy(PPDMACEPFILEMGR pAioMgr)
83{
84 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
85
86 while (pAioMgr->iFreeEntry > 0)
87 {
88 pAioMgr->iFreeEntry--;
89 Assert(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] != NIL_RTFILEAIOREQ);
90 RTFileAioReqDestroy(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry]);
91 }
92
93 RTMemFree(pAioMgr->pahReqsFree);
94 RTMemCacheDestroy(pAioMgr->hMemCacheRangeLocks);
95}
96
97/**
98 * Sorts the endpoint list with insertion sort.
99 */
100static void pdmacFileAioMgrNormalEndpointsSortByLoad(PPDMACEPFILEMGR pAioMgr)
101{
102 PPDMASYNCCOMPLETIONENDPOINTFILE pEpPrev, pEpCurr, pEpNextToSort;
103
104 pEpPrev = pAioMgr->pEndpointsHead;
105 pEpCurr = pEpPrev->AioMgr.pEndpointNext;
106
107 while (pEpCurr)
108 {
109 /* Remember the next element to sort because the list might change. */
110 pEpNextToSort = pEpCurr->AioMgr.pEndpointNext;
111
112 /* Unlink the current element from the list. */
113 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
114 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
115
116 if (pPrev)
117 pPrev->AioMgr.pEndpointNext = pNext;
118 else
119 pAioMgr->pEndpointsHead = pNext;
120
121 if (pNext)
122 pNext->AioMgr.pEndpointPrev = pPrev;
123
124 /* Go back until we reached the place to insert the current endpoint into. */
125 while (pEpPrev && (pEpPrev->AioMgr.cReqsPerSec < pEpCurr->AioMgr.cReqsPerSec))
126 pEpPrev = pEpPrev->AioMgr.pEndpointPrev;
127
128 /* Link the endpoint into the list. */
129 if (pEpPrev)
130 pNext = pEpPrev->AioMgr.pEndpointNext;
131 else
132 pNext = pAioMgr->pEndpointsHead;
133
134 pEpCurr->AioMgr.pEndpointNext = pNext;
135 pEpCurr->AioMgr.pEndpointPrev = pEpPrev;
136
137 if (pNext)
138 pNext->AioMgr.pEndpointPrev = pEpCurr;
139
140 if (pEpPrev)
141 pEpPrev->AioMgr.pEndpointNext = pEpCurr;
142 else
143 pAioMgr->pEndpointsHead = pEpCurr;
144
145 pEpCurr = pEpNextToSort;
146 }
147
148#ifdef DEBUG
149 /* Validate sorting alogrithm */
150 unsigned cEndpoints = 0;
151 pEpCurr = pAioMgr->pEndpointsHead;
152
153 AssertMsg(pEpCurr, ("No endpoint in the list?\n"));
154 AssertMsg(!pEpCurr->AioMgr.pEndpointPrev, ("First element in the list points to previous element\n"));
155
156 while (pEpCurr)
157 {
158 cEndpoints++;
159
160 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
161 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
162
163 Assert(!pNext || pNext->AioMgr.cReqsPerSec <= pEpCurr->AioMgr.cReqsPerSec);
164 Assert(!pPrev || pPrev->AioMgr.cReqsPerSec >= pEpCurr->AioMgr.cReqsPerSec);
165
166 pEpCurr = pNext;
167 }
168
169 AssertMsg(cEndpoints == pAioMgr->cEndpoints, ("Endpoints lost during sort!\n"));
170
171#endif
172}
173
174/**
175 * Removes an endpoint from the currently assigned manager.
176 *
177 * @returns TRUE if there are still requests pending on the current manager for this endpoint.
178 * FALSE otherwise.
179 * @param pEndpointRemove The endpoint to remove.
180 */
181static bool pdmacFileAioMgrNormalRemoveEndpoint(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove)
182{
183 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev;
184 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointRemove->AioMgr.pEndpointNext;
185 PPDMACEPFILEMGR pAioMgr = pEndpointRemove->pAioMgr;
186
187 pAioMgr->cEndpoints--;
188
189 if (pPrev)
190 pPrev->AioMgr.pEndpointNext = pNext;
191 else
192 pAioMgr->pEndpointsHead = pNext;
193
194 if (pNext)
195 pNext->AioMgr.pEndpointPrev = pPrev;
196
197 /* Make sure that there is no request pending on this manager for the endpoint. */
198 if (!pEndpointRemove->AioMgr.cRequestsActive)
199 {
200 Assert(!pEndpointRemove->pFlushReq);
201
202 /* Reopen the file so that the new endpoint can reassociate with the file */
203 RTFileClose(pEndpointRemove->File);
204 int rc = RTFileOpen(&pEndpointRemove->File, pEndpointRemove->Core.pszUri, pEndpointRemove->fFlags);
205 AssertRC(rc);
206 return false;
207 }
208
209 return true;
210}
211
212static bool pdmacFileAioMgrNormalIsBalancePossible(PPDMACEPFILEMGR pAioMgr)
213{
214 /* Balancing doesn't make sense with only one endpoint. */
215 if (pAioMgr->cEndpoints == 1)
216 return false;
217
218 /* Doesn't make sens to move endpoints if only one produces the whole load */
219 unsigned cEndpointsWithLoad = 0;
220
221 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead;
222
223 while (pCurr)
224 {
225 if (pCurr->AioMgr.cReqsPerSec)
226 cEndpointsWithLoad++;
227
228 pCurr = pCurr->AioMgr.pEndpointNext;
229 }
230
231 return (cEndpointsWithLoad > 1);
232}
233
234/**
235 * Creates a new I/O manager and spreads the I/O load of the endpoints
236 * between the given I/O manager and the new one.
237 *
238 * @returns nothing.
239 * @param pAioMgr The I/O manager with high I/O load.
240 */
241static void pdmacFileAioMgrNormalBalanceLoad(PPDMACEPFILEMGR pAioMgr)
242{
243 PPDMACEPFILEMGR pAioMgrNew = NULL;
244 int rc = VINF_SUCCESS;
245
246 /*
247 * Check if balancing would improve the situation.
248 */
249 if (pdmacFileAioMgrNormalIsBalancePossible(pAioMgr))
250 {
251 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
252
253 rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgrNew, PDMACEPFILEMGRTYPE_ASYNC);
254 if (RT_SUCCESS(rc))
255 {
256 /* We will sort the list by request count per second. */
257 pdmacFileAioMgrNormalEndpointsSortByLoad(pAioMgr);
258
259 /* Now move some endpoints to the new manager. */
260 unsigned cReqsHere = pAioMgr->pEndpointsHead->AioMgr.cReqsPerSec;
261 unsigned cReqsOther = 0;
262 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead->AioMgr.pEndpointNext;
263
264 while (pCurr)
265 {
266 if (cReqsHere <= cReqsOther)
267 {
268 /*
269 * The other manager has more requests to handle now.
270 * We will keep the current endpoint.
271 */
272 Log(("Keeping endpoint %#p{%s} with %u reqs/s\n", pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
273 cReqsHere += pCurr->AioMgr.cReqsPerSec;
274 pCurr = pCurr->AioMgr.pEndpointNext;
275 }
276 else
277 {
278 /* Move to other endpoint. */
279 Log(("Moving endpoint %#p{%s} with %u reqs/s to other manager\n", pCurr, pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
280 cReqsOther += pCurr->AioMgr.cReqsPerSec;
281
282 PPDMASYNCCOMPLETIONENDPOINTFILE pMove = pCurr;
283
284 pCurr = pCurr->AioMgr.pEndpointNext;
285
286 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pMove);
287
288 if (fReqsPending)
289 {
290 pMove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
291 pMove->AioMgr.fMoving = true;
292 pMove->AioMgr.pAioMgrDst = pAioMgrNew;
293 }
294 else
295 {
296 pMove->AioMgr.fMoving = false;
297 pMove->AioMgr.pAioMgrDst = NULL;
298 pdmacFileAioMgrAddEndpoint(pAioMgrNew, pMove);
299 }
300 }
301 }
302 }
303 else
304 {
305 /* Don't process further but leave a log entry about reduced performance. */
306 LogRel(("AIOMgr: Could not create new I/O manager (rc=%Rrc). Expect reduced performance\n", rc));
307 }
308 }
309 else
310 Log(("AIOMgr: Load balancing would not improve anything\n"));
311}
312
313/**
314 * Increase the maximum number of active requests for the given I/O manager.
315 *
316 * @returns VBox status code.
317 * @param pAioMgr The I/O manager to grow.
318 */
319static int pdmacFileAioMgrNormalGrow(PPDMACEPFILEMGR pAioMgr)
320{
321 int rc = VINF_SUCCESS;
322 RTFILEAIOCTX hAioCtxNew = NIL_RTFILEAIOCTX;
323
324 LogFlowFunc(("pAioMgr=%#p\n", pAioMgr));
325
326 AssertMsg( pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING
327 && !pAioMgr->cRequestsActive,
328 ("Invalid state of the I/O manager\n"));
329
330#ifdef RT_OS_WINDOWS
331 /*
332 * Reopen the files of all assigned endpoints first so we can assign them to the new
333 * I/O context.
334 */
335 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead;
336
337 while (pCurr)
338 {
339 RTFileClose(pCurr->File);
340 rc = RTFileOpen(&pCurr->File, pCurr->Core.pszUri, pCurr->fFlags);
341 AssertRC(rc);
342
343 pCurr = pCurr->AioMgr.pEndpointNext;
344 }
345#endif
346
347 /* Create the new bigger context. */
348 pAioMgr->cRequestsActiveMax += PDMACEPFILEMGR_REQS_STEP;
349
350 rc = RTFileAioCtxCreate(&hAioCtxNew, RTFILEAIO_UNLIMITED_REQS);
351 if (rc == VERR_OUT_OF_RANGE)
352 rc = RTFileAioCtxCreate(&hAioCtxNew, pAioMgr->cRequestsActiveMax);
353
354 if (RT_SUCCESS(rc))
355 {
356 /* Close the old context. */
357 rc = RTFileAioCtxDestroy(pAioMgr->hAioCtx);
358 AssertRC(rc);
359
360 pAioMgr->hAioCtx = hAioCtxNew;
361
362 /* Create a new I/O task handle array */
363 uint32_t cReqEntriesNew = pAioMgr->cRequestsActiveMax + 1;
364 RTFILEAIOREQ *pahReqNew = (RTFILEAIOREQ *)RTMemAllocZ(cReqEntriesNew * sizeof(RTFILEAIOREQ));
365
366 if (pahReqNew)
367 {
368 /* Copy the cached request handles. */
369 for (uint32_t iReq = 0; iReq < pAioMgr->cReqEntries; iReq++)
370 pahReqNew[iReq] = pAioMgr->pahReqsFree[iReq];
371
372 RTMemFree(pAioMgr->pahReqsFree);
373 pAioMgr->pahReqsFree = pahReqNew;
374 pAioMgr->cReqEntries = cReqEntriesNew;
375 LogFlowFunc(("I/O manager increased to handle a maximum of %u requests\n",
376 pAioMgr->cRequestsActiveMax));
377 }
378 else
379 rc = VERR_NO_MEMORY;
380 }
381
382#ifdef RT_OS_WINDOWS
383 /* Assign the file to the new context. */
384 pCurr = pAioMgr->pEndpointsHead;
385
386 while (pCurr)
387 {
388 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pCurr->File);
389 AssertRC(rc);
390
391 pCurr = pCurr->AioMgr.pEndpointNext;
392 }
393#endif
394
395 if (RT_FAILURE(rc))
396 {
397 LogFlow(("Increasing size of the I/O manager failed with rc=%Rrc\n", rc));
398 pAioMgr->cRequestsActiveMax -= PDMACEPFILEMGR_REQS_STEP;
399 }
400
401 pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
402 LogFlowFunc(("returns rc=%Rrc\n", rc));
403
404 return rc;
405}
406
407/**
408 * Checks if a given status code is fatal.
409 * Non fatal errors can be fixed by migrating the endpoint to a
410 * failsafe manager.
411 *
412 * @returns true If the error is fatal and migrating to a failsafe manager doesn't help
413 * false If the error can be fixed by a migration. (image on NFS disk for example)
414 * @param rcReq The status code to check.
415 */
416DECLINLINE(bool) pdmacFileAioMgrNormalRcIsFatal(int rcReq)
417{
418 return rcReq == VERR_DEV_IO_ERROR
419 || rcReq == VERR_FILE_IO_ERROR
420 || rcReq == VERR_DISK_IO_ERROR
421 || rcReq == VERR_DISK_FULL
422 || rcReq == VERR_FILE_TOO_BIG;
423}
424
425/**
426 * Error handler which will create the failsafe managers and destroy the failed I/O manager.
427 *
428 * @returns VBox status code
429 * @param pAioMgr The I/O manager the error ocurred on.
430 * @param rc The error code.
431 */
432static int pdmacFileAioMgrNormalErrorHandler(PPDMACEPFILEMGR pAioMgr, int rc, RT_SRC_POS_DECL)
433{
434 LogRel(("AIOMgr: I/O manager %#p encountered a critical error (rc=%Rrc) during operation. Falling back to failsafe mode. Expect reduced performance\n",
435 pAioMgr, rc));
436 LogRel(("AIOMgr: Error happened in %s:(%u){%s}\n", RT_SRC_POS_ARGS));
437 LogRel(("AIOMgr: Please contact the product vendor\n"));
438
439 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
440
441 pAioMgr->enmState = PDMACEPFILEMGRSTATE_FAULT;
442 ASMAtomicWriteU32((volatile uint32_t *)&pEpClassFile->enmMgrTypeOverride, PDMACEPFILEMGRTYPE_SIMPLE);
443
444 AssertMsgFailed(("Implement\n"));
445 return VINF_SUCCESS;
446}
447
448/**
449 * Put a list of tasks in the pending request list of an endpoint.
450 */
451DECLINLINE(void) pdmacFileAioMgrEpAddTaskList(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTaskHead)
452{
453 /* Add the rest of the tasks to the pending list */
454 if (!pEndpoint->AioMgr.pReqsPendingHead)
455 {
456 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
457 pEndpoint->AioMgr.pReqsPendingHead = pTaskHead;
458 }
459 else
460 {
461 Assert(pEndpoint->AioMgr.pReqsPendingTail);
462 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTaskHead;
463 }
464
465 /* Update the tail. */
466 while (pTaskHead->pNext)
467 pTaskHead = pTaskHead->pNext;
468
469 pEndpoint->AioMgr.pReqsPendingTail = pTaskHead;
470 pTaskHead->pNext = NULL;
471}
472
473/**
474 * Put one task in the pending request list of an endpoint.
475 */
476DECLINLINE(void) pdmacFileAioMgrEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask)
477{
478 /* Add the rest of the tasks to the pending list */
479 if (!pEndpoint->AioMgr.pReqsPendingHead)
480 {
481 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
482 pEndpoint->AioMgr.pReqsPendingHead = pTask;
483 }
484 else
485 {
486 Assert(pEndpoint->AioMgr.pReqsPendingTail);
487 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTask;
488 }
489
490 pEndpoint->AioMgr.pReqsPendingTail = pTask;
491 pTask->pNext = NULL;
492}
493
494/**
495 * Allocates a async I/O request.
496 *
497 * @returns Handle to the request.
498 * @param pAioMgr The I/O manager.
499 */
500static RTFILEAIOREQ pdmacFileAioMgrNormalRequestAlloc(PPDMACEPFILEMGR pAioMgr)
501{
502 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
503
504 /* Get a request handle. */
505 if (pAioMgr->iFreeEntry > 0)
506 {
507 pAioMgr->iFreeEntry--;
508 hReq = pAioMgr->pahReqsFree[pAioMgr->iFreeEntry];
509 pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] = NIL_RTFILEAIOREQ;
510 Assert(hReq != NIL_RTFILEAIOREQ);
511 }
512 else
513 {
514 int rc = RTFileAioReqCreate(&hReq);
515 AssertRC(rc);
516 }
517
518 return hReq;
519}
520
521/**
522 * Frees a async I/O request handle.
523 *
524 * @returns nothing.
525 * @param pAioMgr The I/O manager.
526 * @param hReq The I/O request handle to free.
527 */
528static void pdmacFileAioMgrNormalRequestFree(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq)
529{
530 Assert(pAioMgr->iFreeEntry < pAioMgr->cReqEntries);
531 Assert(pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] == NIL_RTFILEAIOREQ);
532
533 pAioMgr->pahReqsFree[pAioMgr->iFreeEntry] = hReq;
534 pAioMgr->iFreeEntry++;
535}
536
537/**
538 * Wrapper around RTFIleAioCtxSubmit() which is also doing error handling.
539 */
540static int pdmacFileAioMgrNormalReqsEnqueue(PPDMACEPFILEMGR pAioMgr,
541 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
542 PRTFILEAIOREQ pahReqs, unsigned cReqs)
543{
544 int rc;
545
546 pAioMgr->cRequestsActive += cReqs;
547 pEndpoint->AioMgr.cRequestsActive += cReqs;
548
549 LogFlow(("Enqueuing %d requests. I/O manager has a total of %d active requests now\n", cReqs, pAioMgr->cRequestsActive));
550 LogFlow(("Endpoint has a total of %d active requests now\n", pEndpoint->AioMgr.cRequestsActive));
551
552 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, pahReqs, cReqs);
553 if (RT_FAILURE(rc))
554 {
555 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
556 unsigned cReqsResubmit = 0;
557 RTFILEAIOREQ ahReqsResubmit[20];
558
559 /*
560 * We run out of resources.
561 * Need to check which requests got queued
562 * and put the rest on the pending list again.
563 */
564 for (size_t i = 0; i < cReqs; i++)
565 {
566 int rcReq = RTFileAioReqGetRC(pahReqs[i], NULL);
567
568 if (rcReq != VERR_FILE_AIO_IN_PROGRESS)
569 {
570 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(pahReqs[i]);
571
572 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
573 {
574 /* Mark as not supported. */
575 if (rcReq != VERR_FILE_AIO_NOT_SUBMITTED)
576 {
577 LogFlow(("Async flushes are not supported for this endpoint, disabling\n"));
578 pEndpoint->fAsyncFlushSupported = false;
579 pdmacFileAioMgrNormalRequestFree(pAioMgr, pahReqs[i]);
580 rc = VINF_SUCCESS;
581 }
582 else
583 {
584 AssertMsg(rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES, ("Flush wasn't submitted but we are not out of ressources\n"));
585 /* Clear the pending flush */
586 pdmacFileAioMgrEpAddTask(pEndpoint, pTask);
587 Assert(pEndpoint->pFlushReq == pTask);
588 pEndpoint->pFlushReq = NULL;
589 }
590 }
591 else
592 {
593 AssertMsg(rcReq == VERR_FILE_AIO_NOT_SUBMITTED,
594 ("Request returned unexpected return code: rc=%Rrc\n", rcReq));
595
596 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
597 {
598 PPDMACTASKFILE pTasksWaiting;
599
600 pdmacFileAioMgrNormalRequestFree(pAioMgr, pahReqs[i]);
601
602 if (pTask->cbBounceBuffer)
603 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
604
605 pTask->fPrefetch = false;
606 pTask->cbBounceBuffer = 0;
607
608 /* Free the lock and process pending tasks if neccessary */
609 pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
610
611 pdmacFileAioMgrEpAddTask(pEndpoint, pTask);
612 if (pTasksWaiting)
613 pdmacFileAioMgrEpAddTaskList(pEndpoint, pTasksWaiting);
614 }
615 else
616 {
617 ahReqsResubmit[cReqsResubmit] = pahReqs[i];
618 cReqsResubmit++;
619 }
620 }
621
622 pEndpoint->AioMgr.cRequestsActive--;
623 pAioMgr->cRequestsActive--;
624
625 if (cReqsResubmit == RT_ELEMENTS(ahReqsResubmit))
626 {
627 int rc2 = RTFileAioCtxSubmit(pAioMgr->hAioCtx, ahReqsResubmit, cReqsResubmit);
628 AssertRC(rc2);
629 cReqsResubmit = 0;
630 }
631 }
632
633 /* Resubmit tasks. */
634 if (cReqsResubmit)
635 {
636 int rc2 = RTFileAioCtxSubmit(pAioMgr->hAioCtx, ahReqsResubmit, cReqsResubmit);
637 AssertRC(rc2);
638 cReqsResubmit = 0;
639 }
640 else if ( pEndpoint->pFlushReq
641 && !pAioMgr->cRequestsActive
642 && !pEndpoint->fAsyncFlushSupported)
643 {
644 /*
645 * Complete a pending flush if we don't have requests enqueued and the host doesn't support
646 * the async flush API.
647 * Happens only if this we just noticed that this is not supported
648 * and the only active request was a flush.
649 */
650 PPDMACTASKFILE pFlush = pEndpoint->pFlushReq;
651 pEndpoint->pFlushReq = NULL;
652 pFlush->pfnCompleted(pFlush, pFlush->pvUser, VINF_SUCCESS);
653 pdmacFileTaskFree(pEndpoint, pFlush);
654 }
655 }
656
657 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
658 {
659 pAioMgr->cRequestsActiveMax = pAioMgr->cRequestsActive;
660
661 /* Print an entry in the release log */
662 if (RT_UNLIKELY(!pEpClass->fOutOfResourcesWarningPrinted))
663 {
664 pEpClass->fOutOfResourcesWarningPrinted = true;
665 LogRel(("AIOMgr: Host limits number of active IO requests to %u. Expect a performance impact.\n",
666 pAioMgr->cRequestsActive));
667 }
668 }
669
670 LogFlow(("Removed requests. I/O manager has a total of %u active requests now\n", pAioMgr->cRequestsActive));
671 LogFlow(("Endpoint has a total of %u active requests now\n", pEndpoint->AioMgr.cRequestsActive));
672 }
673
674 return rc;
675}
676
677static bool pdmacFileAioMgrNormalIsRangeLocked(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
678 RTFOFF offStart, size_t cbRange,
679 PPDMACTASKFILE pTask)
680{
681 PPDMACFILERANGELOCK pRangeLock = NULL; /** < Range lock */
682
683 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
684 || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ,
685 ("Invalid task type %d\n", pTask->enmTransferType));
686
687 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetRangeGet(pEndpoint->AioMgr.pTreeRangesLocked, offStart);
688 if (!pRangeLock)
689 {
690 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetGetBestFit(pEndpoint->AioMgr.pTreeRangesLocked, offStart, true);
691 /* Check if we intersect with the range. */
692 if ( !pRangeLock
693 || !( (pRangeLock->Core.Key) <= (offStart + (RTFOFF)cbRange - 1)
694 && (pRangeLock->Core.KeyLast) >= offStart))
695 {
696 pRangeLock = NULL; /* False alarm */
697 }
698 }
699
700 /* Check whether we have one of the situations explained below */
701 if ( pRangeLock
702#if 0 /** @todo: later. For now we will just block all requests if they interfere */
703 && ( (pRangeLock->fReadLock && pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
704 || (!pRangeLock->fReadLock)
705#endif
706 )
707 {
708 /* Add to the list. */
709 pTask->pNext = NULL;
710
711 if (!pRangeLock->pWaitingTasksHead)
712 {
713 Assert(!pRangeLock->pWaitingTasksTail);
714 pRangeLock->pWaitingTasksHead = pTask;
715 pRangeLock->pWaitingTasksTail = pTask;
716 }
717 else
718 {
719 AssertPtr(pRangeLock->pWaitingTasksTail);
720 pRangeLock->pWaitingTasksTail->pNext = pTask;
721 pRangeLock->pWaitingTasksTail = pTask;
722 }
723 return true;
724 }
725
726 return false;
727}
728
729static int pdmacFileAioMgrNormalRangeLock(PPDMACEPFILEMGR pAioMgr,
730 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
731 RTFOFF offStart, size_t cbRange,
732 PPDMACTASKFILE pTask)
733{
734 AssertMsg(!pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbRange, pTask),
735 ("Range is already locked offStart=%RTfoff cbRange=%u\n",
736 offStart, cbRange));
737
738 PPDMACFILERANGELOCK pRangeLock = (PPDMACFILERANGELOCK)RTMemCacheAlloc(pAioMgr->hMemCacheRangeLocks);
739 if (!pRangeLock)
740 return VERR_NO_MEMORY;
741
742 /* Init the lock. */
743 pRangeLock->Core.Key = offStart;
744 pRangeLock->Core.KeyLast = offStart + cbRange - 1;
745 pRangeLock->cRefs = 1;
746 pRangeLock->fReadLock = pTask->enmTransferType == PDMACTASKFILETRANSFER_READ;
747 pRangeLock->pWaitingTasksHead = NULL;
748 pRangeLock->pWaitingTasksTail = NULL;
749
750 bool fInserted = RTAvlrFileOffsetInsert(pEndpoint->AioMgr.pTreeRangesLocked, &pRangeLock->Core);
751 AssertMsg(fInserted, ("Range lock was not inserted!\n"));
752
753 /* Let the task point to its lock. */
754 pTask->pRangeLock = pRangeLock;
755
756 return VINF_SUCCESS;
757}
758
759static PPDMACTASKFILE pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr,
760 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
761 PPDMACFILERANGELOCK pRangeLock)
762{
763 PPDMACTASKFILE pTasksWaitingHead;
764
765 AssertPtr(pRangeLock);
766 Assert(pRangeLock->cRefs == 1);
767
768 RTAvlrFileOffsetRemove(pEndpoint->AioMgr.pTreeRangesLocked, pRangeLock->Core.Key);
769 pTasksWaitingHead = pRangeLock->pWaitingTasksHead;
770 pRangeLock->pWaitingTasksHead = NULL;
771 pRangeLock->pWaitingTasksTail = NULL;
772 RTMemCacheFree(pAioMgr->hMemCacheRangeLocks, pRangeLock);
773
774 return pTasksWaitingHead;
775}
776
777static int pdmacFileAioMgrNormalTaskPrepareBuffered(PPDMACEPFILEMGR pAioMgr,
778 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
779 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
780{
781 int rc = VINF_SUCCESS;
782 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
783 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
784 void *pvBuf = pTask->DataSeg.pvSeg;
785
786 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
787 || (uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) <= pEndpoint->cbFile,
788 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
789 pTask->Off, pTask->DataSeg.cbSeg, pEndpoint->cbFile));
790
791 pTask->fPrefetch = false;
792 pTask->cbBounceBuffer = 0;
793
794 /*
795 * Before we start to setup the request we have to check whether there is a task
796 * already active which range intersects with ours. We have to defer execution
797 * of this task in two cases:
798 * - The pending task is a write and the current is either read or write
799 * - The pending task is a read and the current task is a write task.
800 *
801 * To check whether a range is currently "locked" we use the AVL tree where every pending task
802 * is stored by its file offset range. The current task will be added to the active task
803 * and will be executed when the active one completes. (The method below
804 * which checks whether a range is already used will add the task)
805 *
806 * This is neccessary because of the requirement to align all requests to a 512 boundary
807 * which is enforced by the host OS (Linux and Windows atm). It is possible that
808 * we have to process unaligned tasks and need to align them using bounce buffers.
809 * While the data is fetched from the file another request might arrive writing to
810 * the same range. This will result in data corruption if both are executed concurrently.
811 */
812 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, pTask->Off, pTask->DataSeg.cbSeg, pTask);
813
814 if (!fLocked)
815 {
816 /* Get a request handle. */
817 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
818 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
819
820 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
821 {
822 /* Grow the file if needed. */
823 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
824 {
825 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
826 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
827 }
828
829 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
830 pTask->Off, pTask->DataSeg.pvSeg,
831 pTask->DataSeg.cbSeg, pTask);
832 }
833 else
834 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File,
835 pTask->Off, pTask->DataSeg.pvSeg,
836 pTask->DataSeg.cbSeg, pTask);
837 AssertRC(rc);
838
839 rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, pTask->Off,
840 pTask->DataSeg.cbSeg,
841 pTask);
842
843 if (RT_SUCCESS(rc))
844 *phReq = hReq;
845 }
846 else
847 LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
848
849 return rc;
850}
851
852static int pdmacFileAioMgrNormalTaskPrepareNonBuffered(PPDMACEPFILEMGR pAioMgr,
853 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
854 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
855{
856 int rc = VINF_SUCCESS;
857 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
858 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
859 void *pvBuf = pTask->DataSeg.pvSeg;
860
861 /*
862 * Check if the alignment requirements are met.
863 * Offset, transfer size and buffer address
864 * need to be on a 512 boundary.
865 */
866 RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
867 size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg + (pTask->Off - offStart), 512);
868 PDMACTASKFILETRANSFER enmTransferType = pTask->enmTransferType;
869
870 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
871 || (uint64_t)(offStart + cbToTransfer) <= pEndpoint->cbFile,
872 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
873 offStart, cbToTransfer, pEndpoint->cbFile));
874
875 pTask->fPrefetch = false;
876
877 /*
878 * Before we start to setup the request we have to check whether there is a task
879 * already active which range intersects with ours. We have to defer execution
880 * of this task in two cases:
881 * - The pending task is a write and the current is either read or write
882 * - The pending task is a read and the current task is a write task.
883 *
884 * To check whether a range is currently "locked" we use the AVL tree where every pending task
885 * is stored by its file offset range. The current task will be added to the active task
886 * and will be executed when the active one completes. (The method below
887 * which checks whether a range is already used will add the task)
888 *
889 * This is neccessary because of the requirement to align all requests to a 512 boundary
890 * which is enforced by the host OS (Linux and Windows atm). It is possible that
891 * we have to process unaligned tasks and need to align them using bounce buffers.
892 * While the data is fetched from the file another request might arrive writing to
893 * the same range. This will result in data corruption if both are executed concurrently.
894 */
895 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbToTransfer, pTask);
896
897 if (!fLocked)
898 {
899 /* Get a request handle. */
900 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
901 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
902
903 if ( RT_UNLIKELY(cbToTransfer != pTask->DataSeg.cbSeg)
904 || RT_UNLIKELY(offStart != pTask->Off)
905 || ((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) != (RTR3UINTPTR)pvBuf))
906 {
907 LogFlow(("Using bounce buffer for task %#p cbToTransfer=%zd cbSeg=%zd offStart=%RTfoff off=%RTfoff\n",
908 pTask, cbToTransfer, pTask->DataSeg.cbSeg, offStart, pTask->Off));
909
910 /* Create bounce buffer. */
911 pTask->cbBounceBuffer = cbToTransfer;
912
913 AssertMsg(pTask->Off >= offStart, ("Overflow in calculation Off=%llu offStart=%llu\n",
914 pTask->Off, offStart));
915 pTask->offBounceBuffer = pTask->Off - offStart;
916
917 /** @todo: I think we need something like a RTMemAllocAligned method here.
918 * Current assumption is that the maximum alignment is 4096byte
919 * (GPT disk on Windows)
920 * so we can use RTMemPageAlloc here.
921 */
922 pTask->pvBounceBuffer = RTMemPageAlloc(cbToTransfer);
923 if (RT_LIKELY(pTask->pvBounceBuffer))
924 {
925 pvBuf = pTask->pvBounceBuffer;
926
927 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
928 {
929 if ( RT_UNLIKELY(cbToTransfer != pTask->DataSeg.cbSeg)
930 || RT_UNLIKELY(offStart != pTask->Off))
931 {
932 /* We have to fill the buffer first before we can update the data. */
933 LogFlow(("Prefetching data for task %#p\n", pTask));
934 pTask->fPrefetch = true;
935 enmTransferType = PDMACTASKFILETRANSFER_READ;
936 }
937 else
938 memcpy(pvBuf, pTask->DataSeg.pvSeg, pTask->DataSeg.cbSeg);
939 }
940 }
941 else
942 rc = VERR_NO_MEMORY;
943 }
944 else
945 pTask->cbBounceBuffer = 0;
946
947 if (RT_SUCCESS(rc))
948 {
949 AssertMsg((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) == (RTR3UINTPTR)pvBuf,
950 ("AIO: Alignment restrictions not met! pvBuf=%p uBitmaskAlignment=%p\n", pvBuf, pEpClassFile->uBitmaskAlignment));
951
952 if (enmTransferType == PDMACTASKFILETRANSFER_WRITE)
953 {
954 /* Grow the file if needed. */
955 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
956 {
957 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
958 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
959 }
960
961 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
962 offStart, pvBuf, cbToTransfer, pTask);
963 }
964 else
965 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File,
966 offStart, pvBuf, cbToTransfer, pTask);
967 AssertRC(rc);
968
969 rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, offStart, cbToTransfer, pTask);
970
971 if (RT_SUCCESS(rc))
972 *phReq = hReq;
973 else
974 {
975 /* Cleanup */
976 if (pTask->cbBounceBuffer)
977 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
978 }
979 }
980 }
981 else
982 LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
983
984 return rc;
985}
986
987static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
988 PPDMACEPFILEMGR pAioMgr,
989 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
990{
991 RTFILEAIOREQ apReqs[20];
992 unsigned cRequests = 0;
993 unsigned cMaxRequests = pAioMgr->cRequestsActiveMax - pAioMgr->cRequestsActive;
994 int rc = VINF_SUCCESS;
995
996 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
997 ("Trying to process request lists of a non active endpoint!\n"));
998
999 /* Go through the list and queue the requests until we get a flush request */
1000 while ( pTaskHead
1001 && !pEndpoint->pFlushReq
1002 && (pAioMgr->cRequestsActive + cRequests < pAioMgr->cRequestsActiveMax)
1003 && RT_SUCCESS(rc))
1004 {
1005 PPDMACTASKFILE pCurr = pTaskHead;
1006
1007 if (!pdmacFileBwMgrIsTransferAllowed(pEndpoint->pBwMgr, (uint32_t)pCurr->DataSeg.cbSeg))
1008 {
1009 pAioMgr->fBwLimitReached = true;
1010 break;
1011 }
1012
1013 pTaskHead = pTaskHead->pNext;
1014
1015 pCurr->pNext = NULL;
1016
1017 AssertMsg(VALID_PTR(pCurr->pEndpoint) && (pCurr->pEndpoint == pEndpoint),
1018 ("Endpoints do not match\n"));
1019
1020 switch (pCurr->enmTransferType)
1021 {
1022 case PDMACTASKFILETRANSFER_FLUSH:
1023 {
1024 /* If there is no data transfer request this flush request finished immediately. */
1025 if (pEndpoint->fAsyncFlushSupported)
1026 {
1027 /* Issue a flush to the host. */
1028 RTFILEAIOREQ hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
1029 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
1030
1031 rc = RTFileAioReqPrepareFlush(hReq, pEndpoint->File, pCurr);
1032 if (RT_FAILURE(rc))
1033 {
1034 pEndpoint->fAsyncFlushSupported = false;
1035 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1036 rc = VINF_SUCCESS; /* Fake success */
1037 }
1038 else
1039 {
1040 apReqs[cRequests] = hReq;
1041 pEndpoint->AioMgr.cReqsProcessed++;
1042 cRequests++;
1043 }
1044 }
1045
1046 if ( !pEndpoint->AioMgr.cRequestsActive
1047 && !pEndpoint->fAsyncFlushSupported)
1048 {
1049 pCurr->pfnCompleted(pCurr, pCurr->pvUser, VINF_SUCCESS);
1050 pdmacFileTaskFree(pEndpoint, pCurr);
1051 }
1052 else
1053 {
1054 Assert(!pEndpoint->pFlushReq);
1055 pEndpoint->pFlushReq = pCurr;
1056 }
1057 break;
1058 }
1059 case PDMACTASKFILETRANSFER_READ:
1060 case PDMACTASKFILETRANSFER_WRITE:
1061 {
1062 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
1063
1064 if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_BUFFERED)
1065 rc = pdmacFileAioMgrNormalTaskPrepareBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
1066 else if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_NON_BUFFERED)
1067 rc = pdmacFileAioMgrNormalTaskPrepareNonBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
1068 else
1069 AssertMsgFailed(("Invalid backend type %d\n", pEndpoint->enmBackendType));
1070
1071 AssertRC(rc);
1072
1073 if (hReq != NIL_RTFILEAIOREQ)
1074 {
1075 apReqs[cRequests] = hReq;
1076 pEndpoint->AioMgr.cReqsProcessed++;
1077 cRequests++;
1078 }
1079 break;
1080 }
1081 default:
1082 AssertMsgFailed(("Invalid transfer type %d\n", pCurr->enmTransferType));
1083 } /* switch transfer type */
1084
1085 /* Queue the requests if the array is full. */
1086 if (cRequests == RT_ELEMENTS(apReqs))
1087 {
1088 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
1089 cRequests = 0;
1090 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1091 ("Unexpected return code\n"));
1092 }
1093 }
1094
1095 if (cRequests)
1096 {
1097 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
1098 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
1099 ("Unexpected return code rc=%Rrc\n", rc));
1100 }
1101
1102 if (pTaskHead)
1103 {
1104 /* Add the rest of the tasks to the pending list */
1105 pdmacFileAioMgrEpAddTaskList(pEndpoint, pTaskHead);
1106
1107 if (RT_UNLIKELY( pAioMgr->cRequestsActiveMax == pAioMgr->cRequestsActive
1108 && !pEndpoint->pFlushReq
1109 && !pAioMgr->fBwLimitReached))
1110 {
1111#if 0
1112 /*
1113 * The I/O manager has no room left for more requests
1114 * but there are still requests to process.
1115 * Create a new I/O manager and let it handle some endpoints.
1116 */
1117 pdmacFileAioMgrNormalBalanceLoad(pAioMgr);
1118#else
1119 /* Grow the I/O manager */
1120 pAioMgr->enmState = PDMACEPFILEMGRSTATE_GROWING;
1121#endif
1122 }
1123 }
1124
1125 /* Insufficient resources are not fatal. */
1126 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
1127 rc = VINF_SUCCESS;
1128
1129 return rc;
1130}
1131
1132/**
1133 * Adds all pending requests for the given endpoint
1134 * until a flush request is encountered or there is no
1135 * request anymore.
1136 *
1137 * @returns VBox status code.
1138 * @param pAioMgr The async I/O manager for the endpoint
1139 * @param pEndpoint The endpoint to get the requests from.
1140 */
1141static int pdmacFileAioMgrNormalQueueReqs(PPDMACEPFILEMGR pAioMgr,
1142 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
1143{
1144 int rc = VINF_SUCCESS;
1145 PPDMACTASKFILE pTasksHead = NULL;
1146
1147 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
1148 ("Trying to process request lists of a non active endpoint!\n"));
1149
1150 Assert(!pEndpoint->pFlushReq);
1151
1152 /* Check the pending list first */
1153 if (pEndpoint->AioMgr.pReqsPendingHead)
1154 {
1155 LogFlow(("Queuing pending requests first\n"));
1156
1157 pTasksHead = pEndpoint->AioMgr.pReqsPendingHead;
1158 /*
1159 * Clear the list as the processing routine will insert them into the list
1160 * again if it gets a flush request.
1161 */
1162 pEndpoint->AioMgr.pReqsPendingHead = NULL;
1163 pEndpoint->AioMgr.pReqsPendingTail = NULL;
1164 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
1165 AssertRC(rc);
1166 }
1167
1168 if (!pEndpoint->pFlushReq && !pEndpoint->AioMgr.pReqsPendingHead)
1169 {
1170 /* Now the request queue. */
1171 pTasksHead = pdmacFileEpGetNewTasks(pEndpoint);
1172 if (pTasksHead)
1173 {
1174 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
1175 AssertRC(rc);
1176 }
1177 }
1178
1179 return rc;
1180}
1181
1182static int pdmacFileAioMgrNormalProcessBlockingEvent(PPDMACEPFILEMGR pAioMgr)
1183{
1184 int rc = VINF_SUCCESS;
1185 bool fNotifyWaiter = false;
1186
1187 LogFlowFunc((": Enter\n"));
1188
1189 Assert(pAioMgr->fBlockingEventPending);
1190
1191 switch (pAioMgr->enmBlockingEvent)
1192 {
1193 case PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT:
1194 {
1195 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointNew = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint);
1196 AssertMsg(VALID_PTR(pEndpointNew), ("Adding endpoint event without a endpoint to add\n"));
1197
1198 pEndpointNew->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE;
1199
1200 pEndpointNew->AioMgr.pEndpointNext = pAioMgr->pEndpointsHead;
1201 pEndpointNew->AioMgr.pEndpointPrev = NULL;
1202 if (pAioMgr->pEndpointsHead)
1203 pAioMgr->pEndpointsHead->AioMgr.pEndpointPrev = pEndpointNew;
1204 pAioMgr->pEndpointsHead = pEndpointNew;
1205
1206 /* Assign the completion point to this file. */
1207 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pEndpointNew->File);
1208 fNotifyWaiter = true;
1209 pAioMgr->cEndpoints++;
1210 break;
1211 }
1212 case PDMACEPFILEAIOMGRBLOCKINGEVENT_REMOVE_ENDPOINT:
1213 {
1214 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint);
1215 AssertMsg(VALID_PTR(pEndpointRemove), ("Removing endpoint event without a endpoint to remove\n"));
1216
1217 pEndpointRemove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
1218 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointRemove);
1219 break;
1220 }
1221 case PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT:
1222 {
1223 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointClose = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint);
1224 AssertMsg(VALID_PTR(pEndpointClose), ("Close endpoint event without a endpoint to close\n"));
1225
1226 if (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1227 {
1228 LogFlowFunc((": Closing endpoint %#p{%s}\n", pEndpointClose, pEndpointClose->Core.pszUri));
1229
1230 /* Make sure all tasks finished. Process the queues a last time first. */
1231 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpointClose);
1232 AssertRC(rc);
1233
1234 pEndpointClose->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING;
1235 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointClose);
1236 }
1237 else if ( (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING)
1238 && (!pEndpointClose->AioMgr.cRequestsActive))
1239 fNotifyWaiter = true;
1240 break;
1241 }
1242 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN:
1243 {
1244 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SHUTDOWN;
1245 if (!pAioMgr->cRequestsActive)
1246 fNotifyWaiter = true;
1247 break;
1248 }
1249 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SUSPEND:
1250 {
1251 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SUSPENDING;
1252 break;
1253 }
1254 case PDMACEPFILEAIOMGRBLOCKINGEVENT_RESUME:
1255 {
1256 pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
1257 fNotifyWaiter = true;
1258 break;
1259 }
1260 default:
1261 AssertReleaseMsgFailed(("Invalid event type %d\n", pAioMgr->enmBlockingEvent));
1262 }
1263
1264 if (fNotifyWaiter)
1265 {
1266 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
1267 pAioMgr->enmBlockingEvent = PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID;
1268
1269 /* Release the waiting thread. */
1270 LogFlow(("Signalling waiter\n"));
1271 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
1272 AssertRC(rc);
1273 }
1274
1275 LogFlowFunc((": Leave\n"));
1276 return rc;
1277}
1278
1279/**
1280 * Checks all endpoints for pending events or new requests.
1281 *
1282 * @returns VBox status code.
1283 * @param pAioMgr The I/O manager handle.
1284 */
1285static int pdmacFileAioMgrNormalCheckEndpoints(PPDMACEPFILEMGR pAioMgr)
1286{
1287 /* Check the assigned endpoints for new tasks if there isn't a flush request active at the moment. */
1288 int rc = VINF_SUCCESS;
1289 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pAioMgr->pEndpointsHead;
1290
1291 pAioMgr->fBwLimitReached = false;
1292
1293 while (pEndpoint)
1294 {
1295 if (!pEndpoint->pFlushReq
1296 && (pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1297 && !pEndpoint->AioMgr.fMoving)
1298 {
1299 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint);
1300 if (RT_FAILURE(rc))
1301 return rc;
1302 }
1303 else if ( !pEndpoint->AioMgr.cRequestsActive
1304 && pEndpoint->enmState != PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1305 {
1306 /* Reopen the file so that the new endpoint can reassociate with the file */
1307 RTFileClose(pEndpoint->File);
1308 rc = RTFileOpen(&pEndpoint->File, pEndpoint->Core.pszUri, pEndpoint->fFlags);
1309 AssertRC(rc);
1310
1311 if (pEndpoint->AioMgr.fMoving)
1312 {
1313 pEndpoint->AioMgr.fMoving = false;
1314 pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1315 }
1316 else
1317 {
1318 Assert(pAioMgr->fBlockingEventPending);
1319 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
1320
1321 /* Release the waiting thread. */
1322 LogFlow(("Signalling waiter\n"));
1323 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
1324 AssertRC(rc);
1325 }
1326 }
1327
1328 pEndpoint = pEndpoint->AioMgr.pEndpointNext;
1329 }
1330
1331 return rc;
1332}
1333
1334static void pdmacFileAioMgrNormalReqComplete(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq)
1335{
1336 int rc = VINF_SUCCESS;
1337 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint;
1338 size_t cbTransfered = 0;
1339 int rcReq = RTFileAioReqGetRC(hReq, &cbTransfered);
1340 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(hReq);
1341 PPDMACTASKFILE pTasksWaiting;
1342
1343 pEndpoint = pTask->pEndpoint;
1344
1345 /*
1346 * It is possible that the request failed on Linux with kernels < 2.6.23
1347 * if the passed buffer was allocated with remap_pfn_range or if the file
1348 * is on an NFS endpoint which does not support async and direct I/O at the same time.
1349 * The endpoint will be migrated to a failsafe manager in case a request fails.
1350 */
1351 if (RT_FAILURE(rcReq))
1352 {
1353 /* Free bounce buffers and the IPRT request. */
1354 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1355
1356 pAioMgr->cRequestsActive--;
1357 pEndpoint->AioMgr.cRequestsActive--;
1358 pEndpoint->AioMgr.cReqsProcessed++;
1359
1360 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
1361 {
1362 LogFlow(("Async flushes are not supported for this endpoint, disabling\n"));
1363 pEndpoint->fAsyncFlushSupported = false;
1364 AssertMsg(pEndpoint->pFlushReq == pTask, ("Failed flush request doesn't match active one\n"));
1365 /* The other method will take over now. */
1366 }
1367 else
1368 {
1369 /* Free the lock and process pending tasks if neccessary */
1370 pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
1371 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksWaiting, pAioMgr, pEndpoint);
1372 AssertRC(rc);
1373
1374 if (pTask->cbBounceBuffer)
1375 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
1376
1377 /*
1378 * Fatal errors are reported to the guest and non-fatal errors
1379 * will cause a migration to the failsafe manager in the hope
1380 * that the error disappears.
1381 */
1382 if (!pdmacFileAioMgrNormalRcIsFatal(rcReq))
1383 {
1384 /* Queue the request on the pending list. */
1385 pTask->pNext = pEndpoint->AioMgr.pReqsPendingHead;
1386 pEndpoint->AioMgr.pReqsPendingHead = pTask;
1387
1388 /* Create a new failsafe manager if neccessary. */
1389 if (!pEndpoint->AioMgr.fMoving)
1390 {
1391 PPDMACEPFILEMGR pAioMgrFailsafe;
1392
1393 LogRel(("%s: Request %#p failed with rc=%Rrc, migrating endpoint %s to failsafe manager.\n",
1394 RTThreadGetName(pAioMgr->Thread), pTask, rcReq, pEndpoint->Core.pszUri));
1395
1396 pEndpoint->AioMgr.fMoving = true;
1397
1398 rc = pdmacFileAioMgrCreate((PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass,
1399 &pAioMgrFailsafe, PDMACEPFILEMGRTYPE_SIMPLE);
1400 AssertRC(rc);
1401
1402 pEndpoint->AioMgr.pAioMgrDst = pAioMgrFailsafe;
1403
1404 /* Update the flags to open the file with. Disable async I/O and enable the host cache. */
1405 pEndpoint->fFlags &= ~(RTFILE_O_ASYNC_IO | RTFILE_O_NO_CACHE);
1406 }
1407
1408 /* If this was the last request for the endpoint migrate it to the new manager. */
1409 if (!pEndpoint->AioMgr.cRequestsActive)
1410 {
1411 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
1412 Assert(!fReqsPending);
1413
1414 rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1415 AssertRC(rc);
1416 }
1417 }
1418 else
1419 {
1420 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1421 pdmacFileTaskFree(pEndpoint, pTask);
1422 }
1423 }
1424 }
1425 else
1426 {
1427 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH)
1428 {
1429 /* Clear pending flush */
1430 AssertMsg(pEndpoint->pFlushReq == pTask, ("Completed flush request doesn't match active one\n"));
1431 pEndpoint->pFlushReq = NULL;
1432 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1433
1434 pAioMgr->cRequestsActive--;
1435 pEndpoint->AioMgr.cRequestsActive--;
1436 pEndpoint->AioMgr.cReqsProcessed++;
1437
1438 /* Call completion callback */
1439 LogFlow(("Flush task=%#p completed with %Rrc\n", pTask, rcReq));
1440 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1441 pdmacFileTaskFree(pEndpoint, pTask);
1442 }
1443 else
1444 {
1445 /*
1446 * Restart an incomplete transfer.
1447 * This usually means that the request will return an error now
1448 * but to get the cause of the error (disk full, file too big, I/O error, ...)
1449 * the transfer needs to be continued.
1450 */
1451 if (RT_UNLIKELY( cbTransfered < pTask->DataSeg.cbSeg
1452 || ( pTask->cbBounceBuffer
1453 && cbTransfered < pTask->cbBounceBuffer)))
1454 {
1455 RTFOFF offStart;
1456 size_t cbToTransfer;
1457 uint8_t *pbBuf = NULL;
1458
1459 LogFlow(("Restarting incomplete transfer %#p (%zu bytes transfered)\n",
1460 pTask, cbTransfered));
1461 Assert(cbTransfered % 512 == 0);
1462
1463 if (pTask->cbBounceBuffer)
1464 {
1465 AssertPtr(pTask->pvBounceBuffer);
1466 offStart = (pTask->Off & ~((RTFOFF)512-1)) + cbTransfered;
1467 cbToTransfer = pTask->cbBounceBuffer - cbTransfered;
1468 pbBuf = (uint8_t *)pTask->pvBounceBuffer + cbTransfered;
1469 }
1470 else
1471 {
1472 Assert(!pTask->pvBounceBuffer);
1473 offStart = pTask->Off + cbTransfered;
1474 cbToTransfer = pTask->DataSeg.cbSeg - cbTransfered;
1475 pbBuf = (uint8_t *)pTask->DataSeg.pvSeg + cbTransfered;
1476 }
1477
1478 if (pTask->fPrefetch || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
1479 {
1480 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File, offStart,
1481 pbBuf, cbToTransfer, pTask);
1482 }
1483 else
1484 {
1485 AssertMsg(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE,
1486 ("Invalid transfer type\n"));
1487 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File, offStart,
1488 pbBuf, cbToTransfer, pTask);
1489 }
1490
1491 AssertRC(rc);
1492 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, &hReq, 1);
1493 AssertRC(rc);
1494 }
1495 else if (pTask->fPrefetch)
1496 {
1497 Assert(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE);
1498 Assert(pTask->cbBounceBuffer);
1499
1500 memcpy(((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer,
1501 pTask->DataSeg.pvSeg,
1502 pTask->DataSeg.cbSeg);
1503
1504 /* Write it now. */
1505 pTask->fPrefetch = false;
1506 size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg, 512);
1507 RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
1508
1509 /* Grow the file if needed. */
1510 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
1511 {
1512 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
1513 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
1514 }
1515
1516 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
1517 offStart, pTask->pvBounceBuffer, cbToTransfer, pTask);
1518 AssertRC(rc);
1519 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, &hReq, 1);
1520 AssertRC(rc);
1521 }
1522 else
1523 {
1524 if (RT_SUCCESS(rc) && pTask->cbBounceBuffer)
1525 {
1526 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
1527 memcpy(pTask->DataSeg.pvSeg,
1528 ((uint8_t *)pTask->pvBounceBuffer) + pTask->offBounceBuffer,
1529 pTask->DataSeg.cbSeg);
1530
1531 RTMemPageFree(pTask->pvBounceBuffer, pTask->cbBounceBuffer);
1532 }
1533
1534 pdmacFileAioMgrNormalRequestFree(pAioMgr, hReq);
1535
1536 pAioMgr->cRequestsActive--;
1537 pEndpoint->AioMgr.cRequestsActive--;
1538 pEndpoint->AioMgr.cReqsProcessed++;
1539
1540 /* Free the lock and process pending tasks if neccessary */
1541 pTasksWaiting = pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
1542 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksWaiting, pAioMgr, pEndpoint);
1543 AssertRC(rc);
1544
1545 /* Call completion callback */
1546 LogFlow(("Task=%#p completed with %Rrc\n", pTask, rcReq));
1547 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1548 pdmacFileTaskFree(pEndpoint, pTask);
1549
1550 /*
1551 * If there is no request left on the endpoint but a flush request is set
1552 * it completed now and we notify the owner.
1553 * Furthermore we look for new requests and continue.
1554 */
1555 if (!pEndpoint->AioMgr.cRequestsActive && pEndpoint->pFlushReq)
1556 {
1557 /* Call completion callback */
1558 pTask = pEndpoint->pFlushReq;
1559 pEndpoint->pFlushReq = NULL;
1560
1561 AssertMsg(pTask->pEndpoint == pEndpoint, ("Endpoint of the flush request does not match assigned one\n"));
1562
1563 pTask->pfnCompleted(pTask, pTask->pvUser, VINF_SUCCESS);
1564 pdmacFileTaskFree(pEndpoint, pTask);
1565 }
1566 else if (RT_UNLIKELY(!pEndpoint->AioMgr.cRequestsActive && pEndpoint->AioMgr.fMoving))
1567 {
1568 /* If the endpoint is about to be migrated do it now. */
1569 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
1570 Assert(!fReqsPending);
1571
1572 rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1573 AssertRC(rc);
1574 }
1575 }
1576 } /* Not a flush request */
1577 } /* request completed successfully */
1578}
1579
1580/** Helper macro for checking for error codes. */
1581#define CHECK_RC(pAioMgr, rc) \
1582 if (RT_FAILURE(rc)) \
1583 {\
1584 int rc2 = pdmacFileAioMgrNormalErrorHandler(pAioMgr, rc, RT_SRC_POS);\
1585 return rc2;\
1586 }
1587
1588/**
1589 * The normal I/O manager using the RTFileAio* API
1590 *
1591 * @returns VBox status code.
1592 * @param ThreadSelf Handle of the thread.
1593 * @param pvUser Opaque user data.
1594 */
1595int pdmacFileAioMgrNormal(RTTHREAD ThreadSelf, void *pvUser)
1596{
1597 int rc = VINF_SUCCESS;
1598 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser;
1599 uint64_t uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
1600
1601 while ( (pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING)
1602 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING)
1603 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING))
1604 {
1605 if (!pAioMgr->cRequestsActive)
1606 {
1607 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, true);
1608 if (!ASMAtomicReadBool(&pAioMgr->fWokenUp))
1609 rc = RTSemEventWait(pAioMgr->EventSem, RT_INDEFINITE_WAIT);
1610 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, false);
1611 AssertRC(rc);
1612
1613 LogFlow(("Got woken up\n"));
1614 ASMAtomicWriteBool(&pAioMgr->fWokenUp, false);
1615 }
1616
1617 /* Check for an external blocking event first. */
1618 if (pAioMgr->fBlockingEventPending)
1619 {
1620 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
1621 CHECK_RC(pAioMgr, rc);
1622 }
1623
1624 if (RT_LIKELY( pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING
1625 || pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING))
1626 {
1627 /* We got woken up because an endpoint issued new requests. Queue them. */
1628 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1629 CHECK_RC(pAioMgr, rc);
1630
1631 while ( pAioMgr->cRequestsActive
1632 || pAioMgr->fBwLimitReached)
1633 {
1634 if (pAioMgr->cRequestsActive)
1635 {
1636 RTFILEAIOREQ apReqs[20];
1637 uint32_t cReqsCompleted = 0;
1638 size_t cReqsWait;
1639
1640 if (pAioMgr->cRequestsActive > RT_ELEMENTS(apReqs))
1641 cReqsWait = RT_ELEMENTS(apReqs);
1642 else
1643 cReqsWait = pAioMgr->cRequestsActive;
1644
1645 LogFlow(("Waiting for %d of %d tasks to complete\n", pAioMgr->cRequestsActive, cReqsWait));
1646
1647 rc = RTFileAioCtxWait(pAioMgr->hAioCtx,
1648 cReqsWait,
1649 RT_INDEFINITE_WAIT, apReqs,
1650 RT_ELEMENTS(apReqs), &cReqsCompleted);
1651 if (RT_FAILURE(rc) && (rc != VERR_INTERRUPTED))
1652 CHECK_RC(pAioMgr, rc);
1653
1654 LogFlow(("%d tasks completed\n", cReqsCompleted));
1655
1656 for (uint32_t i = 0; i < cReqsCompleted; i++)
1657 pdmacFileAioMgrNormalReqComplete(pAioMgr, apReqs[i]);
1658
1659 /* Check for an external blocking event before we go to sleep again. */
1660 if (pAioMgr->fBlockingEventPending)
1661 {
1662 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
1663 CHECK_RC(pAioMgr, rc);
1664 }
1665
1666 /* Update load statistics. */
1667 uint64_t uMillisCurr = RTTimeMilliTS();
1668 if (uMillisCurr > uMillisEnd)
1669 {
1670 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointCurr = pAioMgr->pEndpointsHead;
1671
1672 /* Calculate timespan. */
1673 uMillisCurr -= uMillisEnd;
1674
1675 while (pEndpointCurr)
1676 {
1677 pEndpointCurr->AioMgr.cReqsPerSec = pEndpointCurr->AioMgr.cReqsProcessed / (uMillisCurr + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD);
1678 pEndpointCurr->AioMgr.cReqsProcessed = 0;
1679 pEndpointCurr = pEndpointCurr->AioMgr.pEndpointNext;
1680 }
1681
1682 /* Set new update interval */
1683 uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
1684 }
1685 }
1686 else
1687 {
1688 /*
1689 * Bandwidth limit reached for all endpoints.
1690 * Yield and wait until we have enough resources again.
1691 */
1692 RTThreadYield();
1693 }
1694
1695 /* Check endpoints for new requests. */
1696 if (pAioMgr->enmState != PDMACEPFILEMGRSTATE_GROWING)
1697 {
1698 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1699 CHECK_RC(pAioMgr, rc);
1700 }
1701 } /* while requests are active. */
1702
1703 if (pAioMgr->enmState == PDMACEPFILEMGRSTATE_GROWING)
1704 {
1705 rc = pdmacFileAioMgrNormalGrow(pAioMgr);
1706 AssertRC(rc);
1707 Assert(pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING);
1708
1709 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1710 CHECK_RC(pAioMgr, rc);
1711 }
1712 } /* if still running */
1713 } /* while running */
1714
1715 LogFlowFunc(("rc=%Rrc\n", rc));
1716 return rc;
1717}
1718
1719#undef CHECK_RC
1720
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette