VirtualBox

source: vbox/trunk/src/VBox/VMM/PDMAsyncCompletionFileNormal.cpp@ 28112

Last change on this file since 28112 was 28045, checked in by vboxsync, 15 years ago

ASyncCompletion: Fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 52.1 KB
Line 
1/* $Id: PDMAsyncCompletionFileNormal.cpp 28045 2010-04-07 11:34:11Z vboxsync $ */
2/** @file
3 * PDM Async I/O - Transport data asynchronous in R3 using EMT.
4 * Async File I/O manager.
5 */
6
7/*
8 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
19 * Clara, CA 95054 USA or visit http://www.sun.com if you need
20 * additional information or have any questions.
21 */
22#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
23#include <iprt/types.h>
24#include <iprt/asm.h>
25#include <iprt/file.h>
26#include <iprt/mem.h>
27#include <iprt/string.h>
28#include <iprt/assert.h>
29#include <VBox/log.h>
30
31#include "PDMAsyncCompletionFileInternal.h"
32
33/** The update period for the I/O load statistics in ms. */
34#define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 1000
35/** Maximum number of requests a manager will handle. */
36#define PDMACEPFILEMGR_REQS_MAX 512 /* @todo: Find better solution wrt. the request number*/
37
38/*******************************************************************************
39* Internal functions *
40*******************************************************************************/
41static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
42 PPDMACEPFILEMGR pAioMgr,
43 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint);
44
45
46int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr)
47{
48 int rc = VINF_SUCCESS;
49
50 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS);
51 if (rc == VERR_OUT_OF_RANGE)
52 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, PDMACEPFILEMGR_REQS_MAX);
53
54 if (RT_SUCCESS(rc))
55 {
56 /* Initialize request handle array. */
57 pAioMgr->iFreeEntryNext = 0;
58 pAioMgr->iFreeReqNext = 0;
59 pAioMgr->cReqEntries = PDMACEPFILEMGR_REQS_MAX + 1;
60 pAioMgr->pahReqsFree = (RTFILEAIOREQ *)RTMemAllocZ(pAioMgr->cReqEntries * sizeof(RTFILEAIOREQ));
61
62 if (pAioMgr->pahReqsFree)
63 {
64 /* Create the range lock memcache. */
65 rc = RTMemCacheCreate(&pAioMgr->hMemCacheRangeLocks, sizeof(PDMACFILERANGELOCK),
66 0, UINT32_MAX, NULL, NULL, NULL, 0);
67 if (RT_SUCCESS(rc))
68 return VINF_SUCCESS;
69
70 RTMemFree(pAioMgr->pahReqsFree);
71 }
72 else
73 {
74 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
75 rc = VERR_NO_MEMORY;
76 }
77 }
78
79 return rc;
80}
81
82void pdmacFileAioMgrNormalDestroy(PPDMACEPFILEMGR pAioMgr)
83{
84 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
85
86 while (pAioMgr->iFreeReqNext != pAioMgr->iFreeEntryNext)
87 {
88 RTFileAioReqDestroy(pAioMgr->pahReqsFree[pAioMgr->iFreeReqNext]);
89 pAioMgr->iFreeReqNext = (pAioMgr->iFreeReqNext + 1) % pAioMgr->cReqEntries;
90 }
91
92 RTMemFree(pAioMgr->pahReqsFree);
93 RTMemCacheDestroy(pAioMgr->hMemCacheRangeLocks);
94}
95
96/**
97 * Sorts the endpoint list with insertion sort.
98 */
99static void pdmacFileAioMgrNormalEndpointsSortByLoad(PPDMACEPFILEMGR pAioMgr)
100{
101 PPDMASYNCCOMPLETIONENDPOINTFILE pEpPrev, pEpCurr, pEpNextToSort;
102
103 pEpPrev = pAioMgr->pEndpointsHead;
104 pEpCurr = pEpPrev->AioMgr.pEndpointNext;
105
106 while (pEpCurr)
107 {
108 /* Remember the next element to sort because the list might change. */
109 pEpNextToSort = pEpCurr->AioMgr.pEndpointNext;
110
111 /* Unlink the current element from the list. */
112 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
113 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
114
115 if (pPrev)
116 pPrev->AioMgr.pEndpointNext = pNext;
117 else
118 pAioMgr->pEndpointsHead = pNext;
119
120 if (pNext)
121 pNext->AioMgr.pEndpointPrev = pPrev;
122
123 /* Go back until we reached the place to insert the current endpoint into. */
124 while (pEpPrev && (pEpPrev->AioMgr.cReqsPerSec < pEpCurr->AioMgr.cReqsPerSec))
125 pEpPrev = pEpPrev->AioMgr.pEndpointPrev;
126
127 /* Link the endpoint into the list. */
128 if (pEpPrev)
129 pNext = pEpPrev->AioMgr.pEndpointNext;
130 else
131 pNext = pAioMgr->pEndpointsHead;
132
133 pEpCurr->AioMgr.pEndpointNext = pNext;
134 pEpCurr->AioMgr.pEndpointPrev = pEpPrev;
135
136 if (pNext)
137 pNext->AioMgr.pEndpointPrev = pEpCurr;
138
139 if (pEpPrev)
140 pEpPrev->AioMgr.pEndpointNext = pEpCurr;
141 else
142 pAioMgr->pEndpointsHead = pEpCurr;
143
144 pEpCurr = pEpNextToSort;
145 }
146
147#ifdef DEBUG
148 /* Validate sorting alogrithm */
149 unsigned cEndpoints = 0;
150 pEpCurr = pAioMgr->pEndpointsHead;
151
152 AssertMsg(pEpCurr, ("No endpoint in the list?\n"));
153 AssertMsg(!pEpCurr->AioMgr.pEndpointPrev, ("First element in the list points to previous element\n"));
154
155 while (pEpCurr)
156 {
157 cEndpoints++;
158
159 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
160 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
161
162 Assert(!pNext || pNext->AioMgr.cReqsPerSec <= pEpCurr->AioMgr.cReqsPerSec);
163 Assert(!pPrev || pPrev->AioMgr.cReqsPerSec >= pEpCurr->AioMgr.cReqsPerSec);
164
165 pEpCurr = pNext;
166 }
167
168 AssertMsg(cEndpoints == pAioMgr->cEndpoints, ("Endpoints lost during sort!\n"));
169
170#endif
171}
172
173/**
174 * Removes an endpoint from the currently assigned manager.
175 *
176 * @returns TRUE if there are still requests pending on the current manager for this endpoint.
177 * FALSE otherwise.
178 * @param pEndpointRemove The endpoint to remove.
179 */
180static bool pdmacFileAioMgrNormalRemoveEndpoint(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove)
181{
182 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev;
183 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointRemove->AioMgr.pEndpointNext;
184 PPDMACEPFILEMGR pAioMgr = pEndpointRemove->pAioMgr;
185
186 pAioMgr->cEndpoints--;
187
188 if (pPrev)
189 pPrev->AioMgr.pEndpointNext = pNext;
190 else
191 pAioMgr->pEndpointsHead = pNext;
192
193 if (pNext)
194 pNext->AioMgr.pEndpointPrev = pPrev;
195
196 /* Make sure that there is no request pending on this manager for the endpoint. */
197 if (!pEndpointRemove->AioMgr.cRequestsActive)
198 {
199 Assert(!pEndpointRemove->pFlushReq);
200
201 /* Reopen the file so that the new endpoint can reassociate with the file */
202 RTFileClose(pEndpointRemove->File);
203 int rc = RTFileOpen(&pEndpointRemove->File, pEndpointRemove->Core.pszUri, pEndpointRemove->fFlags);
204 AssertRC(rc);
205 return false;
206 }
207
208 return true;
209}
210
211static bool pdmacFileAioMgrNormalIsBalancePossible(PPDMACEPFILEMGR pAioMgr)
212{
213 /* Balancing doesn't make sense with only one endpoint. */
214 if (pAioMgr->cEndpoints == 1)
215 return false;
216
217 /* Doesn't make sens to move endpoints if only one produces the whole load */
218 unsigned cEndpointsWithLoad = 0;
219
220 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead;
221
222 while (pCurr)
223 {
224 if (pCurr->AioMgr.cReqsPerSec)
225 cEndpointsWithLoad++;
226
227 pCurr = pCurr->AioMgr.pEndpointNext;
228 }
229
230 return (cEndpointsWithLoad > 1);
231}
232
233/**
234 * Creates a new I/O manager and spreads the I/O load of the endpoints
235 * between the given I/O manager and the new one.
236 *
237 * @returns nothing.
238 * @param pAioMgr The I/O manager with high I/O load.
239 */
240static void pdmacFileAioMgrNormalBalanceLoad(PPDMACEPFILEMGR pAioMgr)
241{
242 PPDMACEPFILEMGR pAioMgrNew = NULL;
243 int rc = VINF_SUCCESS;
244
245 /*
246 * Check if balancing would improve the situation.
247 */
248 if (pdmacFileAioMgrNormalIsBalancePossible(pAioMgr))
249 {
250 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
251
252 rc = pdmacFileAioMgrCreate(pEpClassFile, &pAioMgrNew, PDMACEPFILEMGRTYPE_ASYNC);
253 if (RT_SUCCESS(rc))
254 {
255 /* We will sort the list by request count per second. */
256 pdmacFileAioMgrNormalEndpointsSortByLoad(pAioMgr);
257
258 /* Now move some endpoints to the new manager. */
259 unsigned cReqsHere = pAioMgr->pEndpointsHead->AioMgr.cReqsPerSec;
260 unsigned cReqsOther = 0;
261 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead->AioMgr.pEndpointNext;
262
263 while (pCurr)
264 {
265 if (cReqsHere <= cReqsOther)
266 {
267 /*
268 * The other manager has more requests to handle now.
269 * We will keep the current endpoint.
270 */
271 Log(("Keeping endpoint %#p{%s} with %u reqs/s\n", pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
272 cReqsHere += pCurr->AioMgr.cReqsPerSec;
273 pCurr = pCurr->AioMgr.pEndpointNext;
274 }
275 else
276 {
277 /* Move to other endpoint. */
278 Log(("Moving endpoint %#p{%s} with %u reqs/s to other manager\n", pCurr, pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
279 cReqsOther += pCurr->AioMgr.cReqsPerSec;
280
281 PPDMASYNCCOMPLETIONENDPOINTFILE pMove = pCurr;
282
283 pCurr = pCurr->AioMgr.pEndpointNext;
284
285 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pMove);
286
287 if (fReqsPending)
288 {
289 pMove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
290 pMove->AioMgr.fMoving = true;
291 pMove->AioMgr.pAioMgrDst = pAioMgrNew;
292 }
293 else
294 {
295 pMove->AioMgr.fMoving = false;
296 pMove->AioMgr.pAioMgrDst = NULL;
297 pdmacFileAioMgrAddEndpoint(pAioMgrNew, pMove);
298 }
299 }
300 }
301 }
302 else
303 {
304 /* Don't process further but leave a log entry about reduced performance. */
305 LogRel(("AIOMgr: Could not create new I/O manager (rc=%Rrc). Expect reduced performance\n", rc));
306 }
307 }
308 else
309 Log(("AIOMgr: Load balancing would not improve anything\n"));
310}
311
312/**
313 * Error handler which will create the failsafe managers and destroy the failed I/O manager.
314 *
315 * @returns VBox status code
316 * @param pAioMgr The I/O manager the error ocurred on.
317 * @param rc The error code.
318 */
319static int pdmacFileAioMgrNormalErrorHandler(PPDMACEPFILEMGR pAioMgr, int rc, RT_SRC_POS_DECL)
320{
321 LogRel(("AIOMgr: I/O manager %#p encountered a critical error (rc=%Rrc) during operation. Falling back to failsafe mode. Expect reduced performance\n",
322 pAioMgr, rc));
323 LogRel(("AIOMgr: Error happened in %s:(%u){%s}\n", RT_SRC_POS_ARGS));
324 LogRel(("AIOMgr: Please contact the product vendor\n"));
325
326 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
327
328 pAioMgr->enmState = PDMACEPFILEMGRSTATE_FAULT;
329 ASMAtomicWriteU32((volatile uint32_t *)&pEpClassFile->enmMgrTypeOverride, PDMACEPFILEMGRTYPE_SIMPLE);
330
331 AssertMsgFailed(("Implement\n"));
332 return VINF_SUCCESS;
333}
334
335/**
336 * Put a list of tasks in the pending request list of an endpoint.
337 */
338DECLINLINE(void) pdmacFileAioMgrEpAddTaskList(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTaskHead)
339{
340 /* Add the rest of the tasks to the pending list */
341 if (!pEndpoint->AioMgr.pReqsPendingHead)
342 {
343 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
344 pEndpoint->AioMgr.pReqsPendingHead = pTaskHead;
345 }
346 else
347 {
348 Assert(pEndpoint->AioMgr.pReqsPendingTail);
349 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTaskHead;
350 }
351
352 /* Update the tail. */
353 while (pTaskHead->pNext)
354 pTaskHead = pTaskHead->pNext;
355
356 pEndpoint->AioMgr.pReqsPendingTail = pTaskHead;
357}
358
359/**
360 * Put one task in the pending request list of an endpoint.
361 */
362DECLINLINE(void) pdmacFileAioMgrEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask)
363{
364 /* Add the rest of the tasks to the pending list */
365 if (!pEndpoint->AioMgr.pReqsPendingHead)
366 {
367 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
368 pEndpoint->AioMgr.pReqsPendingHead = pTask;
369 }
370 else
371 {
372 Assert(pEndpoint->AioMgr.pReqsPendingTail);
373 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTask;
374 }
375
376 pEndpoint->AioMgr.pReqsPendingTail = pTask;
377}
378
379/**
380 * Wrapper around RTFIleAioCtxSubmit() which is also doing error handling.
381 */
382static int pdmacFileAioMgrNormalReqsEnqueue(PPDMACEPFILEMGR pAioMgr,
383 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
384 PRTFILEAIOREQ pahReqs, unsigned cReqs)
385{
386 int rc;
387
388 pAioMgr->cRequestsActive += cReqs;
389 pEndpoint->AioMgr.cRequestsActive += cReqs;
390
391 LogFlow(("Enqueuing %d requests. I/O manager has a total of %d active requests now\n", cReqs, pAioMgr->cRequestsActive));
392 LogFlow(("Endpoint has a total of %d active requests now\n", pEndpoint->AioMgr.cRequestsActive));
393
394 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, pahReqs, cReqs);
395 if (RT_FAILURE(rc))
396 {
397 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
398 {
399 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
400
401 /*
402 * We run out of resources.
403 * Need to check which requests got queued
404 * and put the rest on the pending list again.
405 */
406 if (RT_UNLIKELY(!pEpClass->fOutOfResourcesWarningPrinted))
407 {
408 pEpClass->fOutOfResourcesWarningPrinted = true;
409 LogRel(("AIOMgr: The operating system doesn't have enough resources "
410 "to handle the I/O load of the VM. Expect reduced I/O performance\n"));
411 }
412
413 for (size_t i = 0; i < cReqs; i++)
414 {
415 int rcReq = RTFileAioReqGetRC(pahReqs[i], NULL);
416
417 if (rcReq != VERR_FILE_AIO_IN_PROGRESS)
418 {
419 AssertMsg(rcReq == VERR_FILE_AIO_NOT_SUBMITTED,
420 ("Request returned unexpected return code: rc=%Rrc\n", rcReq));
421
422 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(pahReqs[i]);
423
424 /* Put the entry on the free array */
425 pAioMgr->pahReqsFree[pAioMgr->iFreeEntryNext] = pahReqs[i];
426 pAioMgr->iFreeEntryNext = (pAioMgr->iFreeEntryNext + 1) % pAioMgr->cReqEntries;
427
428 pdmacFileAioMgrEpAddTask(pEndpoint, pTask);
429 pAioMgr->cRequestsActive--;
430 pEndpoint->AioMgr.cRequestsActive--;
431 }
432 }
433 LogFlow(("Removed requests. I/O manager has a total of %d active requests now\n", pAioMgr->cRequestsActive));
434 LogFlow(("Endpoint has a total of %d active requests now\n", pEndpoint->AioMgr.cRequestsActive));
435 }
436 else
437 AssertMsgFailed(("Unexpected return code rc=%Rrc\n", rc));
438 }
439
440 return rc;
441}
442
443/**
444 * Allocates a async I/O request.
445 *
446 * @returns Handle to the request.
447 * @param pAioMgr The I/O manager.
448 */
449static RTFILEAIOREQ pdmacFileAioMgrNormalRequestAlloc(PPDMACEPFILEMGR pAioMgr)
450{
451 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
452
453 /* Get a request handle. */
454 if (pAioMgr->iFreeReqNext != pAioMgr->iFreeEntryNext)
455 {
456 hReq = pAioMgr->pahReqsFree[pAioMgr->iFreeReqNext];
457 pAioMgr->pahReqsFree[pAioMgr->iFreeReqNext] = NIL_RTFILEAIOREQ;
458 pAioMgr->iFreeReqNext = (pAioMgr->iFreeReqNext + 1) % pAioMgr->cReqEntries;
459 }
460 else
461 {
462 int rc = RTFileAioReqCreate(&hReq);
463 AssertRC(rc);
464 }
465
466 return hReq;
467}
468
469static bool pdmacFileAioMgrNormalIsRangeLocked(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
470 RTFOFF offStart, size_t cbRange,
471 PPDMACTASKFILE pTask)
472{
473 PPDMACFILERANGELOCK pRangeLock = NULL; /** < Range lock */
474
475 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
476 || pTask->enmTransferType == PDMACTASKFILETRANSFER_READ,
477 ("Invalid task type %d\n", pTask->enmTransferType));
478
479 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetRangeGet(pEndpoint->AioMgr.pTreeRangesLocked, offStart);
480 if (!pRangeLock)
481 {
482 pRangeLock = (PPDMACFILERANGELOCK)RTAvlrFileOffsetGetBestFit(pEndpoint->AioMgr.pTreeRangesLocked, offStart, true);
483 /* Check if we intersect with the range. */
484 if ( !pRangeLock
485 || !( (pRangeLock->Core.Key) <= (offStart + (RTFOFF)cbRange - 1)
486 && (pRangeLock->Core.KeyLast) >= offStart))
487 {
488 pRangeLock = NULL; /* False alarm */
489 }
490 }
491
492 /* Check whether we have one of the situations explained below */
493 if ( pRangeLock
494#if 0 /** @todo: later. For now we will just block all requests if they interfere */
495 && ( (pRangeLock->fReadLock && pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
496 || (!pRangeLock->fReadLock)
497#endif
498 )
499 {
500 /* Add to the list. */
501 pTask->pNext = NULL;
502
503 if (!pRangeLock->pWaitingTasksHead)
504 {
505 Assert(!pRangeLock->pWaitingTasksTail);
506 pRangeLock->pWaitingTasksHead = pTask;
507 pRangeLock->pWaitingTasksTail = pTask;
508 }
509 else
510 {
511 AssertPtr(pRangeLock->pWaitingTasksTail);
512 pRangeLock->pWaitingTasksTail->pNext = pTask;
513 pRangeLock->pWaitingTasksTail = pTask;
514 }
515 return true;
516 }
517
518 return false;
519}
520
521static int pdmacFileAioMgrNormalRangeLock(PPDMACEPFILEMGR pAioMgr,
522 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
523 RTFOFF offStart, size_t cbRange,
524 PPDMACTASKFILE pTask)
525{
526 AssertMsg(!pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbRange, pTask),
527 ("Range is already locked offStart=%RTfoff cbRange=%u\n",
528 offStart, cbRange));
529
530 PPDMACFILERANGELOCK pRangeLock = (PPDMACFILERANGELOCK)RTMemCacheAlloc(pAioMgr->hMemCacheRangeLocks);
531 if (!pRangeLock)
532 return VERR_NO_MEMORY;
533
534 /* Init the lock. */
535 pRangeLock->Core.Key = offStart;
536 pRangeLock->Core.KeyLast = offStart + cbRange - 1;
537 pRangeLock->cRefs = 1;
538 pRangeLock->fReadLock = pTask->enmTransferType == PDMACTASKFILETRANSFER_READ;
539 pRangeLock->pWaitingTasksHead = NULL;
540 pRangeLock->pWaitingTasksTail = NULL;
541
542 bool fInserted = RTAvlrFileOffsetInsert(pEndpoint->AioMgr.pTreeRangesLocked, &pRangeLock->Core);
543 AssertMsg(fInserted, ("Range lock was not inserted!\n"));
544
545 /* Let the task point to its lock. */
546 pTask->pRangeLock = pRangeLock;
547
548 return VINF_SUCCESS;
549}
550
551static int pdmacFileAioMgrNormalRangeLockFree(PPDMACEPFILEMGR pAioMgr,
552 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
553 PPDMACFILERANGELOCK pRangeLock)
554{
555 PPDMACTASKFILE pTasksWaitingHead;
556
557 AssertPtr(pRangeLock);
558 Assert(pRangeLock->cRefs == 1);
559
560 RTAvlrFileOffsetRemove(pEndpoint->AioMgr.pTreeRangesLocked, pRangeLock->Core.Key);
561 pTasksWaitingHead = pRangeLock->pWaitingTasksHead;
562 pRangeLock->pWaitingTasksHead = NULL;
563 pRangeLock->pWaitingTasksTail = NULL;
564 RTMemCacheFree(pAioMgr->hMemCacheRangeLocks, pRangeLock);
565
566 return pdmacFileAioMgrNormalProcessTaskList(pTasksWaitingHead, pAioMgr, pEndpoint);
567}
568
569static int pdmacFileAioMgrNormalTaskPrepareBuffered(PPDMACEPFILEMGR pAioMgr,
570 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
571 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
572{
573 int rc = VINF_SUCCESS;
574 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
575 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
576 void *pvBuf = pTask->DataSeg.pvSeg;
577
578 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
579 || (uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) <= pEndpoint->cbFile,
580 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
581 pTask->Off, pTask->DataSeg.cbSeg, pEndpoint->cbFile));
582
583 pTask->fPrefetch = false;
584 pTask->fBounceBuffer = false;
585
586 /*
587 * Before we start to setup the request we have to check whether there is a task
588 * already active which range intersects with ours. We have to defer execution
589 * of this task in two cases:
590 * - The pending task is a write and the current is either read or write
591 * - The pending task is a read and the current task is a write task.
592 *
593 * To check whether a range is currently "locked" we use the AVL tree where every pending task
594 * is stored by its file offset range. The current task will be added to the active task
595 * and will be executed when the active one completes. (The method below
596 * which checks whether a range is already used will add the task)
597 *
598 * This is neccessary because of the requirement to align all requests to a 512 boundary
599 * which is enforced by the host OS (Linux and Windows atm). It is possible that
600 * we have to process unaligned tasks and need to align them using bounce buffers.
601 * While the data is fetched from the file another request might arrive writing to
602 * the same range. This will result in data corruption if both are executed concurrently.
603 */
604 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, pTask->Off, pTask->DataSeg.cbSeg, pTask);
605
606 if (!fLocked)
607 {
608 /* Get a request handle. */
609 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
610 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
611
612 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
613 {
614 /* Grow the file if needed. */
615 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
616 {
617 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
618 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
619 }
620
621 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
622 pTask->Off, pTask->DataSeg.pvSeg,
623 pTask->DataSeg.cbSeg, pTask);
624 }
625 else
626 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File,
627 pTask->Off, pTask->DataSeg.pvSeg,
628 pTask->DataSeg.cbSeg, pTask);
629 AssertRC(rc);
630
631 rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, pTask->Off,
632 pTask->DataSeg.cbSeg,
633 pTask);
634
635 if (RT_SUCCESS(rc))
636 *phReq = hReq;
637 }
638 else
639 LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
640
641 return rc;
642}
643
644static int pdmacFileAioMgrNormalTaskPrepareNonBuffered(PPDMACEPFILEMGR pAioMgr,
645 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint,
646 PPDMACTASKFILE pTask, PRTFILEAIOREQ phReq)
647{
648 int rc = VINF_SUCCESS;
649 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
650 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
651 void *pvBuf = pTask->DataSeg.pvSeg;
652
653 /*
654 * Check if the alignment requirements are met.
655 * Offset, transfer size and buffer address
656 * need to be on a 512 boundary.
657 */
658 RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
659 size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg + (pTask->Off - offStart), 512);
660 PDMACTASKFILETRANSFER enmTransferType = pTask->enmTransferType;
661
662 AssertMsg( pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE
663 || (uint64_t)(offStart + cbToTransfer) <= pEndpoint->cbFile,
664 ("Read exceeds file size offStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
665 offStart, cbToTransfer, pEndpoint->cbFile));
666
667 pTask->fPrefetch = false;
668
669 /*
670 * Before we start to setup the request we have to check whether there is a task
671 * already active which range intersects with ours. We have to defer execution
672 * of this task in two cases:
673 * - The pending task is a write and the current is either read or write
674 * - The pending task is a read and the current task is a write task.
675 *
676 * To check whether a range is currently "locked" we use the AVL tree where every pending task
677 * is stored by its file offset range. The current task will be added to the active task
678 * and will be executed when the active one completes. (The method below
679 * which checks whether a range is already used will add the task)
680 *
681 * This is neccessary because of the requirement to align all requests to a 512 boundary
682 * which is enforced by the host OS (Linux and Windows atm). It is possible that
683 * we have to process unaligned tasks and need to align them using bounce buffers.
684 * While the data is fetched from the file another request might arrive writing to
685 * the same range. This will result in data corruption if both are executed concurrently.
686 */
687 bool fLocked = pdmacFileAioMgrNormalIsRangeLocked(pEndpoint, offStart, cbToTransfer, pTask);
688
689 if (!fLocked)
690 {
691 /* Get a request handle. */
692 hReq = pdmacFileAioMgrNormalRequestAlloc(pAioMgr);
693 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
694
695 if ( RT_UNLIKELY(cbToTransfer != pTask->DataSeg.cbSeg)
696 || RT_UNLIKELY(offStart != pTask->Off)
697 || ((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) != (RTR3UINTPTR)pvBuf))
698 {
699 LogFlow(("Using bounce buffer for task %#p cbToTransfer=%zd cbSeg=%zd offStart=%RTfoff off=%RTfoff\n",
700 pTask, cbToTransfer, pTask->DataSeg.cbSeg, offStart, pTask->Off));
701
702 /* Create bounce buffer. */
703 pTask->fBounceBuffer = true;
704
705 AssertMsg(pTask->Off >= offStart, ("Overflow in calculation Off=%llu offStart=%llu\n",
706 pTask->Off, offStart));
707 pTask->uBounceBufOffset = pTask->Off - offStart;
708
709 /** @todo: I think we need something like a RTMemAllocAligned method here.
710 * Current assumption is that the maximum alignment is 4096byte
711 * (GPT disk on Windows)
712 * so we can use RTMemPageAlloc here.
713 */
714 pTask->pvBounceBuffer = RTMemPageAlloc(cbToTransfer);
715 if (RT_LIKELY(pTask->pvBounceBuffer))
716 {
717 pvBuf = pTask->pvBounceBuffer;
718
719 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
720 {
721 if ( RT_UNLIKELY(cbToTransfer != pTask->DataSeg.cbSeg)
722 || RT_UNLIKELY(offStart != pTask->Off))
723 {
724 /* We have to fill the buffer first before we can update the data. */
725 LogFlow(("Prefetching data for task %#p\n", pTask));
726 pTask->fPrefetch = true;
727 enmTransferType = PDMACTASKFILETRANSFER_READ;
728 }
729 else
730 memcpy(pvBuf, pTask->DataSeg.pvSeg, pTask->DataSeg.cbSeg);
731 }
732 }
733 else
734 rc = VERR_NO_MEMORY;
735 }
736 else
737 pTask->fBounceBuffer = false;
738
739 if (RT_SUCCESS(rc))
740 {
741 AssertMsg((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) == (RTR3UINTPTR)pvBuf,
742 ("AIO: Alignment restrictions not met! pvBuf=%p uBitmaskAlignment=%p\n", pvBuf, pEpClassFile->uBitmaskAlignment));
743
744 if (enmTransferType == PDMACTASKFILETRANSFER_WRITE)
745 {
746 /* Grow the file if needed. */
747 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
748 {
749 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
750 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
751 }
752
753 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
754 offStart, pvBuf, cbToTransfer, pTask);
755 }
756 else
757 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File,
758 offStart, pvBuf, cbToTransfer, pTask);
759 AssertRC(rc);
760
761 rc = pdmacFileAioMgrNormalRangeLock(pAioMgr, pEndpoint, offStart, cbToTransfer, pTask);
762
763 if (RT_SUCCESS(rc))
764 *phReq = hReq;
765 else
766 {
767 /* Cleanup */
768 if (pTask->fBounceBuffer)
769 RTMemPageFree(pTask->pvBounceBuffer);
770 }
771 }
772 }
773 else
774 LogFlow(("Task %#p was deferred because the access range is locked\n", pTask));
775
776 return rc;
777}
778
779static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
780 PPDMACEPFILEMGR pAioMgr,
781 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
782{
783 RTFILEAIOREQ apReqs[20];
784 unsigned cRequests = 0;
785 unsigned cMaxRequests = PDMACEPFILEMGR_REQS_MAX - pAioMgr->cRequestsActive;
786 int rc = VINF_SUCCESS;
787
788 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
789 ("Trying to process request lists of a non active endpoint!\n"));
790
791 /* Go through the list and queue the requests until we get a flush request */
792 while ( pTaskHead
793 && !pEndpoint->pFlushReq
794 && (cMaxRequests > 0)
795 && RT_SUCCESS(rc))
796 {
797 PPDMACTASKFILE pCurr = pTaskHead;
798
799 if (!pdmacFileBwMgrIsTransferAllowed(pEndpoint->pBwMgr, (uint32_t)pCurr->DataSeg.cbSeg))
800 {
801 pAioMgr->fBwLimitReached = true;
802 break;
803 }
804
805 pTaskHead = pTaskHead->pNext;
806
807 pCurr->pNext = NULL;
808
809 AssertMsg(VALID_PTR(pCurr->pEndpoint) && (pCurr->pEndpoint == pEndpoint),
810 ("Endpoints do not match\n"));
811
812 switch (pCurr->enmTransferType)
813 {
814 case PDMACTASKFILETRANSFER_FLUSH:
815 {
816 /* If there is no data transfer request this flush request finished immediately. */
817 if (!pEndpoint->AioMgr.cRequestsActive)
818 {
819 pCurr->pfnCompleted(pCurr, pCurr->pvUser, VINF_SUCCESS);
820 pdmacFileTaskFree(pEndpoint, pCurr);
821 }
822 else
823 {
824 Assert(!pEndpoint->pFlushReq);
825 pEndpoint->pFlushReq = pCurr;
826 }
827 break;
828 }
829 case PDMACTASKFILETRANSFER_READ:
830 case PDMACTASKFILETRANSFER_WRITE:
831 {
832 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
833
834 if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_BUFFERED)
835 rc = pdmacFileAioMgrNormalTaskPrepareBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
836 else if (pEndpoint->enmBackendType == PDMACFILEEPBACKEND_NON_BUFFERED)
837 rc = pdmacFileAioMgrNormalTaskPrepareNonBuffered(pAioMgr, pEndpoint, pCurr, &hReq);
838 else
839 AssertMsgFailed(("Invalid backend type %d\n", pEndpoint->enmBackendType));
840
841 AssertRC(rc);
842
843 if (hReq != NIL_RTFILEAIOREQ)
844 {
845 apReqs[cRequests] = hReq;
846 pEndpoint->AioMgr.cReqsProcessed++;
847 cMaxRequests--;
848 cRequests++;
849 if (cRequests == RT_ELEMENTS(apReqs))
850 {
851 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
852 cRequests = 0;
853 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
854 ("Unexpected return code\n"));
855 }
856 }
857 break;
858 }
859 default:
860 AssertMsgFailed(("Invalid transfer type %d\n", pCurr->enmTransferType));
861 }
862 }
863
864 if (cRequests)
865 {
866 rc = pdmacFileAioMgrNormalReqsEnqueue(pAioMgr, pEndpoint, apReqs, cRequests);
867 AssertMsg(RT_SUCCESS(rc) || (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES),
868 ("Unexpected return code rc=%Rrc\n", rc));
869 }
870
871 if (pTaskHead)
872 {
873 /* Add the rest of the tasks to the pending list */
874 pdmacFileAioMgrEpAddTaskList(pEndpoint, pTaskHead);
875
876 if (RT_UNLIKELY( !cMaxRequests
877 && !pEndpoint->pFlushReq
878 && !pAioMgr->fBwLimitReached))
879 {
880 /*
881 * The I/O manager has no room left for more requests
882 * but there are still requests to process.
883 * Create a new I/O manager and let it handle some endpoints.
884 */
885 pdmacFileAioMgrNormalBalanceLoad(pAioMgr);
886 }
887 }
888
889 /* Insufficient resources are not fatal. */
890 if (rc == VERR_FILE_AIO_INSUFFICIENT_RESSOURCES)
891 rc = VINF_SUCCESS;
892
893 return rc;
894}
895
896/**
897 * Adds all pending requests for the given endpoint
898 * until a flush request is encountered or there is no
899 * request anymore.
900 *
901 * @returns VBox status code.
902 * @param pAioMgr The async I/O manager for the endpoint
903 * @param pEndpoint The endpoint to get the requests from.
904 */
905static int pdmacFileAioMgrNormalQueueReqs(PPDMACEPFILEMGR pAioMgr,
906 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
907{
908 int rc = VINF_SUCCESS;
909 PPDMACTASKFILE pTasksHead = NULL;
910
911 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
912 ("Trying to process request lists of a non active endpoint!\n"));
913
914 Assert(!pEndpoint->pFlushReq);
915
916 /* Check the pending list first */
917 if (pEndpoint->AioMgr.pReqsPendingHead)
918 {
919 LogFlow(("Queuing pending requests first\n"));
920
921 pTasksHead = pEndpoint->AioMgr.pReqsPendingHead;
922 /*
923 * Clear the list as the processing routine will insert them into the list
924 * again if it gets a flush request.
925 */
926 pEndpoint->AioMgr.pReqsPendingHead = NULL;
927 pEndpoint->AioMgr.pReqsPendingTail = NULL;
928 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
929 AssertRC(rc);
930 }
931
932 if (!pEndpoint->pFlushReq && !pEndpoint->AioMgr.pReqsPendingHead)
933 {
934 /* Now the request queue. */
935 pTasksHead = pdmacFileEpGetNewTasks(pEndpoint);
936 if (pTasksHead)
937 {
938 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
939 AssertRC(rc);
940 }
941 }
942
943 return rc;
944}
945
946static int pdmacFileAioMgrNormalProcessBlockingEvent(PPDMACEPFILEMGR pAioMgr)
947{
948 int rc = VINF_SUCCESS;
949 bool fNotifyWaiter = false;
950
951 LogFlowFunc((": Enter\n"));
952
953 Assert(pAioMgr->fBlockingEventPending);
954
955 switch (pAioMgr->enmBlockingEvent)
956 {
957 case PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT:
958 {
959 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointNew = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint);
960 AssertMsg(VALID_PTR(pEndpointNew), ("Adding endpoint event without a endpoint to add\n"));
961
962 pEndpointNew->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE;
963
964 pEndpointNew->AioMgr.pEndpointNext = pAioMgr->pEndpointsHead;
965 pEndpointNew->AioMgr.pEndpointPrev = NULL;
966 if (pAioMgr->pEndpointsHead)
967 pAioMgr->pEndpointsHead->AioMgr.pEndpointPrev = pEndpointNew;
968 pAioMgr->pEndpointsHead = pEndpointNew;
969
970 /* Assign the completion point to this file. */
971 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pEndpointNew->File);
972 fNotifyWaiter = true;
973 pAioMgr->cEndpoints++;
974 break;
975 }
976 case PDMACEPFILEAIOMGRBLOCKINGEVENT_REMOVE_ENDPOINT:
977 {
978 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint);
979 AssertMsg(VALID_PTR(pEndpointRemove), ("Removing endpoint event without a endpoint to remove\n"));
980
981 pEndpointRemove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
982 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointRemove);
983 break;
984 }
985 case PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT:
986 {
987 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointClose = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint);
988 AssertMsg(VALID_PTR(pEndpointClose), ("Close endpoint event without a endpoint to close\n"));
989
990 if (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
991 {
992 LogFlowFunc((": Closing endpoint %#p{%s}\n", pEndpointClose, pEndpointClose->Core.pszUri));
993
994 /* Make sure all tasks finished. Process the queues a last time first. */
995 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpointClose);
996 AssertRC(rc);
997
998 pEndpointClose->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING;
999 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointClose);
1000 }
1001 else if ( (pEndpointClose->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING)
1002 && (!pEndpointClose->AioMgr.cRequestsActive))
1003 fNotifyWaiter = true;
1004 break;
1005 }
1006 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN:
1007 {
1008 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SHUTDOWN;
1009 if (!pAioMgr->cRequestsActive)
1010 fNotifyWaiter = true;
1011 break;
1012 }
1013 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SUSPEND:
1014 {
1015 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SUSPENDING;
1016 break;
1017 }
1018 case PDMACEPFILEAIOMGRBLOCKINGEVENT_RESUME:
1019 {
1020 pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
1021 fNotifyWaiter = true;
1022 break;
1023 }
1024 default:
1025 AssertReleaseMsgFailed(("Invalid event type %d\n", pAioMgr->enmBlockingEvent));
1026 }
1027
1028 if (fNotifyWaiter)
1029 {
1030 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
1031 pAioMgr->enmBlockingEvent = PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID;
1032
1033 /* Release the waiting thread. */
1034 LogFlow(("Signalling waiter\n"));
1035 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
1036 AssertRC(rc);
1037 }
1038
1039 LogFlowFunc((": Leave\n"));
1040 return rc;
1041}
1042
1043/**
1044 * Checks all endpoints for pending events or new requests.
1045 *
1046 * @returns VBox status code.
1047 * @param pAioMgr The I/O manager handle.
1048 */
1049static int pdmacFileAioMgrNormalCheckEndpoints(PPDMACEPFILEMGR pAioMgr)
1050{
1051 /* Check the assigned endpoints for new tasks if there isn't a flush request active at the moment. */
1052 int rc = VINF_SUCCESS;
1053 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pAioMgr->pEndpointsHead;
1054
1055 pAioMgr->fBwLimitReached = false;
1056
1057 while (pEndpoint)
1058 {
1059 if (!pEndpoint->pFlushReq
1060 && (pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
1061 && !pEndpoint->AioMgr.fMoving)
1062 {
1063 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint);
1064 if (RT_FAILURE(rc))
1065 return rc;
1066 }
1067 else if (!pEndpoint->AioMgr.cRequestsActive)
1068 {
1069 /* Reopen the file so that the new endpoint can reassociate with the file */
1070 RTFileClose(pEndpoint->File);
1071 rc = RTFileOpen(&pEndpoint->File, pEndpoint->Core.pszUri, pEndpoint->fFlags);
1072 AssertRC(rc);
1073
1074 if (pEndpoint->AioMgr.fMoving)
1075 {
1076 pEndpoint->AioMgr.fMoving = false;
1077 pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1078 }
1079 else
1080 {
1081 Assert(pAioMgr->fBlockingEventPending);
1082 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
1083
1084 /* Release the waiting thread. */
1085 LogFlow(("Signalling waiter\n"));
1086 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
1087 AssertRC(rc);
1088 }
1089 }
1090
1091 pEndpoint = pEndpoint->AioMgr.pEndpointNext;
1092 }
1093
1094 return rc;
1095}
1096
1097static void pdmacFileAioMgrNormalReqComplete(PPDMACEPFILEMGR pAioMgr, RTFILEAIOREQ hReq)
1098{
1099 int rc = VINF_SUCCESS;
1100 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint;
1101 size_t cbTransfered = 0;
1102 int rcReq = RTFileAioReqGetRC(hReq, &cbTransfered);
1103 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(hReq);
1104
1105 pEndpoint = pTask->pEndpoint;
1106
1107 /*
1108 * It is possible that the request failed on Linux with kernels < 2.6.23
1109 * if the passed buffer was allocated with remap_pfn_range or if the file
1110 * is on an NFS endpoint which does not support async and direct I/O at the same time.
1111 * The endpoint will be migrated to a failsafe manager in case a request fails.
1112 */
1113 if (RT_FAILURE(rcReq))
1114 {
1115 /* Free bounce buffers and the IPRT request. */
1116 pAioMgr->pahReqsFree[pAioMgr->iFreeEntryNext] = hReq;
1117 pAioMgr->iFreeEntryNext = (pAioMgr->iFreeEntryNext + 1) % pAioMgr->cReqEntries;
1118
1119 /* Free the lock and process pending tasks if neccessary */
1120 pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
1121
1122 pAioMgr->cRequestsActive--;
1123 pEndpoint->AioMgr.cRequestsActive--;
1124 pEndpoint->AioMgr.cReqsProcessed++;
1125
1126 if (pTask->fBounceBuffer)
1127 RTMemFree(pTask->pvBounceBuffer);
1128
1129 /* Queue the request on the pending list. */
1130 pTask->pNext = pEndpoint->AioMgr.pReqsPendingHead;
1131 pEndpoint->AioMgr.pReqsPendingHead = pTask;
1132
1133 /* Create a new failsafe manager if neccessary. */
1134 if (!pEndpoint->AioMgr.fMoving)
1135 {
1136 PPDMACEPFILEMGR pAioMgrFailsafe;
1137
1138 LogRel(("%s: Request %#p failed with rc=%Rrc, migrating endpoint %s to failsafe manager.\n",
1139 RTThreadGetName(pAioMgr->Thread), pTask, rcReq, pEndpoint->Core.pszUri));
1140
1141 pEndpoint->AioMgr.fMoving = true;
1142
1143 rc = pdmacFileAioMgrCreate((PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass,
1144 &pAioMgrFailsafe, PDMACEPFILEMGRTYPE_SIMPLE);
1145 AssertRC(rc);
1146
1147 pEndpoint->AioMgr.pAioMgrDst = pAioMgrFailsafe;
1148
1149 /* Update the flags to open the file with. Disable async I/O and enable the host cache. */
1150 pEndpoint->fFlags &= ~(RTFILE_O_ASYNC_IO | RTFILE_O_NO_CACHE);
1151 }
1152
1153 /* If this was the last request for the endpoint migrate it to the new manager. */
1154 if (!pEndpoint->AioMgr.cRequestsActive)
1155 {
1156 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
1157 Assert(!fReqsPending);
1158
1159 rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1160 AssertRC(rc);
1161 }
1162 }
1163 else
1164 {
1165 AssertMsg( RT_FAILURE(rcReq)
1166 || ( (cbTransfered == pTask->DataSeg.cbSeg)
1167 || (pTask->fBounceBuffer && (cbTransfered >= pTask->DataSeg.cbSeg))),
1168 ("Task didn't completed successfully (rc=%Rrc) or was incomplete (cbTransfered=%u)\n", rcReq, cbTransfered));
1169
1170 if (pTask->fPrefetch)
1171 {
1172 Assert(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE);
1173 Assert(pTask->fBounceBuffer);
1174
1175 memcpy(((uint8_t *)pTask->pvBounceBuffer) + pTask->uBounceBufOffset,
1176 pTask->DataSeg.pvSeg,
1177 pTask->DataSeg.cbSeg);
1178
1179 /* Write it now. */
1180 pTask->fPrefetch = false;
1181 size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg, 512);
1182 RTFOFF offStart = pTask->Off & ~(RTFOFF)(512-1);
1183
1184 /* Grow the file if needed. */
1185 if (RT_UNLIKELY((uint64_t)(pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
1186 {
1187 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
1188 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
1189 }
1190
1191 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
1192 offStart, pTask->pvBounceBuffer, cbToTransfer, pTask);
1193 AssertRC(rc);
1194 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, &hReq, 1);
1195 AssertRC(rc);
1196 }
1197 else
1198 {
1199 if (RT_SUCCESS(rc) && pTask->fBounceBuffer)
1200 {
1201 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
1202 memcpy(pTask->DataSeg.pvSeg,
1203 ((uint8_t *)pTask->pvBounceBuffer) + pTask->uBounceBufOffset,
1204 pTask->DataSeg.cbSeg);
1205
1206 RTMemPageFree(pTask->pvBounceBuffer);
1207 }
1208
1209 /* Put the entry on the free array */
1210 pAioMgr->pahReqsFree[pAioMgr->iFreeEntryNext] = hReq;
1211 pAioMgr->iFreeEntryNext = (pAioMgr->iFreeEntryNext + 1) % pAioMgr->cReqEntries;
1212
1213 pAioMgr->cRequestsActive--;
1214 pEndpoint->AioMgr.cRequestsActive--;
1215 pEndpoint->AioMgr.cReqsProcessed++;
1216
1217 /* Free the lock and process pending tasks if neccessary */
1218 pdmacFileAioMgrNormalRangeLockFree(pAioMgr, pEndpoint, pTask->pRangeLock);
1219
1220 /* Call completion callback */
1221 pTask->pfnCompleted(pTask, pTask->pvUser, rcReq);
1222 pdmacFileTaskFree(pEndpoint, pTask);
1223
1224 /*
1225 * If there is no request left on the endpoint but a flush request is set
1226 * it completed now and we notify the owner.
1227 * Furthermore we look for new requests and continue.
1228 */
1229 if (!pEndpoint->AioMgr.cRequestsActive && pEndpoint->pFlushReq)
1230 {
1231 /* Call completion callback */
1232 pTask = pEndpoint->pFlushReq;
1233 pEndpoint->pFlushReq = NULL;
1234
1235 AssertMsg(pTask->pEndpoint == pEndpoint, ("Endpoint of the flush request does not match assigned one\n"));
1236
1237 pTask->pfnCompleted(pTask, pTask->pvUser, VINF_SUCCESS);
1238 pdmacFileTaskFree(pEndpoint, pTask);
1239 }
1240 else if (RT_UNLIKELY(!pEndpoint->AioMgr.cRequestsActive && pEndpoint->AioMgr.fMoving))
1241 {
1242 /* If the endpoint is about to be migrated do it now. */
1243 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pEndpoint);
1244 Assert(!fReqsPending);
1245
1246 rc = pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
1247 AssertRC(rc);
1248 }
1249 }
1250 } /* request completed successfully */
1251}
1252
1253/** Helper macro for checking for error codes. */
1254#define CHECK_RC(pAioMgr, rc) \
1255 if (RT_FAILURE(rc)) \
1256 {\
1257 int rc2 = pdmacFileAioMgrNormalErrorHandler(pAioMgr, rc, RT_SRC_POS);\
1258 return rc2;\
1259 }
1260
1261/**
1262 * The normal I/O manager using the RTFileAio* API
1263 *
1264 * @returns VBox status code.
1265 * @param ThreadSelf Handle of the thread.
1266 * @param pvUser Opaque user data.
1267 */
1268int pdmacFileAioMgrNormal(RTTHREAD ThreadSelf, void *pvUser)
1269{
1270 int rc = VINF_SUCCESS;
1271 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser;
1272 uint64_t uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
1273
1274 while ( (pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING)
1275 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING))
1276 {
1277 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, true);
1278 if (!ASMAtomicReadBool(&pAioMgr->fWokenUp))
1279 rc = RTSemEventWait(pAioMgr->EventSem, RT_INDEFINITE_WAIT);
1280 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, false);
1281 AssertRC(rc);
1282
1283 LogFlow(("Got woken up\n"));
1284 ASMAtomicWriteBool(&pAioMgr->fWokenUp, false);
1285
1286 /* Check for an external blocking event first. */
1287 if (pAioMgr->fBlockingEventPending)
1288 {
1289 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
1290 CHECK_RC(pAioMgr, rc);
1291 }
1292
1293 if (RT_LIKELY(pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING))
1294 {
1295 /* We got woken up because an endpoint issued new requests. Queue them. */
1296 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1297 CHECK_RC(pAioMgr, rc);
1298
1299 while ( pAioMgr->cRequestsActive
1300 || pAioMgr->fBwLimitReached)
1301 {
1302 if (pAioMgr->cRequestsActive)
1303 {
1304 RTFILEAIOREQ apReqs[20];
1305 uint32_t cReqsCompleted = 0;
1306 size_t cReqsWait;
1307
1308 if (pAioMgr->cRequestsActive > RT_ELEMENTS(apReqs))
1309 cReqsWait = RT_ELEMENTS(apReqs);
1310 else
1311 cReqsWait = pAioMgr->cRequestsActive;
1312
1313 LogFlow(("Waiting for %d of %d tasks to complete\n", pAioMgr->cRequestsActive, cReqsWait));
1314
1315 rc = RTFileAioCtxWait(pAioMgr->hAioCtx,
1316 cReqsWait,
1317 RT_INDEFINITE_WAIT, apReqs,
1318 RT_ELEMENTS(apReqs), &cReqsCompleted);
1319 if (RT_FAILURE(rc) && (rc != VERR_INTERRUPTED))
1320 CHECK_RC(pAioMgr, rc);
1321
1322 LogFlow(("%d tasks completed\n", cReqsCompleted));
1323
1324 for (uint32_t i = 0; i < cReqsCompleted; i++)
1325 pdmacFileAioMgrNormalReqComplete(pAioMgr, apReqs[i]);
1326
1327 /* Check for an external blocking event before we go to sleep again. */
1328 if (pAioMgr->fBlockingEventPending)
1329 {
1330 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
1331 CHECK_RC(pAioMgr, rc);
1332 }
1333
1334 /* Update load statistics. */
1335 uint64_t uMillisCurr = RTTimeMilliTS();
1336 if (uMillisCurr > uMillisEnd)
1337 {
1338 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointCurr = pAioMgr->pEndpointsHead;
1339
1340 /* Calculate timespan. */
1341 uMillisCurr -= uMillisEnd;
1342
1343 while (pEndpointCurr)
1344 {
1345 pEndpointCurr->AioMgr.cReqsPerSec = pEndpointCurr->AioMgr.cReqsProcessed / (uMillisCurr + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD);
1346 pEndpointCurr->AioMgr.cReqsProcessed = 0;
1347 pEndpointCurr = pEndpointCurr->AioMgr.pEndpointNext;
1348 }
1349
1350 /* Set new update interval */
1351 uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
1352 }
1353 }
1354 else
1355 {
1356 /*
1357 * Bandwidth limit reached for all endpoints.
1358 * Yield and wait until we have enough resources again.
1359 */
1360 RTThreadYield();
1361 }
1362
1363 /* Check endpoints for new requests. */
1364 rc = pdmacFileAioMgrNormalCheckEndpoints(pAioMgr);
1365 CHECK_RC(pAioMgr, rc);
1366 } /* while requests are active. */
1367 } /* if still running */
1368 } /* while running */
1369
1370 return rc;
1371}
1372
1373#undef CHECK_RC
1374
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette