VirtualBox

source: vbox/trunk/src/VBox/VMM/PDMAsyncCompletionFileNormal.cpp@ 22997

Last change on this file since 22997 was 22977, checked in by vboxsync, 15 years ago

PDMASyncCompletionFile: uBitmaskAlignment fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 33.4 KB
Line 
1/* $Id: PDMAsyncCompletionFileNormal.cpp 22977 2009-09-13 13:00:19Z vboxsync $ */
2/** @file
3 * PDM Async I/O - Transport data asynchronous in R3 using EMT.
4 * Async File I/O manager.
5 */
6
7/*
8 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
19 * Clara, CA 95054 USA or visit http://www.sun.com if you need
20 * additional information or have any questions.
21 */
22#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
23#include <iprt/types.h>
24#include <iprt/asm.h>
25#include <iprt/file.h>
26#include <iprt/mem.h>
27#include <iprt/string.h>
28#include <VBox/log.h>
29
30#include "PDMAsyncCompletionFileInternal.h"
31
32/** The update period for the I/O load statistics in ms. */
33#define PDMACEPFILEMGR_LOAD_UPDATE_PERIOD 1000
34/** Maximum number of requests a manager will handle. */
35#define PDMACEPFILEMGR_REQS_MAX 512 /* @todo: Find better solution wrt. the request number*/
36
37int pdmacFileAioMgrNormalInit(PPDMACEPFILEMGR pAioMgr)
38{
39 int rc = VINF_SUCCESS;
40
41 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, RTFILEAIO_UNLIMITED_REQS);
42 if (rc == VERR_OUT_OF_RANGE)
43 rc = RTFileAioCtxCreate(&pAioMgr->hAioCtx, PDMACEPFILEMGR_REQS_MAX);
44
45 if (RT_SUCCESS(rc))
46 {
47 /* Initialize request handle array. */
48 pAioMgr->iFreeEntryNext = 0;
49 pAioMgr->iFreeReqNext = 0;
50 pAioMgr->cReqEntries = PDMACEPFILEMGR_REQS_MAX + 1;
51 pAioMgr->pahReqsFree = (RTFILEAIOREQ *)RTMemAllocZ(pAioMgr->cReqEntries * sizeof(RTFILEAIOREQ));
52
53 if (pAioMgr->pahReqsFree)
54 {
55 return VINF_SUCCESS;
56 }
57 else
58 {
59 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
60 rc = VERR_NO_MEMORY;
61 }
62 }
63
64 return rc;
65}
66
67void pdmacFileAioMgrNormalDestroy(PPDMACEPFILEMGR pAioMgr)
68{
69 RTFileAioCtxDestroy(pAioMgr->hAioCtx);
70
71 while (pAioMgr->iFreeReqNext != pAioMgr->iFreeEntryNext)
72 {
73 RTFileAioReqDestroy(pAioMgr->pahReqsFree[pAioMgr->iFreeReqNext]);
74 pAioMgr->iFreeReqNext = (pAioMgr->iFreeReqNext + 1) % pAioMgr->cReqEntries;
75 }
76
77 RTMemFree(pAioMgr->pahReqsFree);
78}
79
80/**
81 * Sorts the endpoint list with insertion sort.
82 */
83static void pdmacFileAioMgrNormalEndpointsSortByLoad(PPDMACEPFILEMGR pAioMgr)
84{
85 PPDMASYNCCOMPLETIONENDPOINTFILE pEpPrev, pEpCurr, pEpNextToSort;
86
87 pEpPrev = pAioMgr->pEndpointsHead;
88 pEpCurr = pEpPrev->AioMgr.pEndpointNext;
89
90 while (pEpCurr)
91 {
92 /* Remember the next element to sort because the list might change. */
93 pEpNextToSort = pEpCurr->AioMgr.pEndpointNext;
94
95 /* Unlink the current element from the list. */
96 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
97 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
98
99 if (pPrev)
100 pPrev->AioMgr.pEndpointNext = pNext;
101 else
102 pAioMgr->pEndpointsHead = pNext;
103
104 if (pNext)
105 pNext->AioMgr.pEndpointPrev = pPrev;
106
107 /* Go back until we reached the place to insert the current endpoint into. */
108 while (pEpPrev && (pEpPrev->AioMgr.cReqsPerSec < pEpCurr->AioMgr.cReqsPerSec))
109 pEpPrev = pEpPrev->AioMgr.pEndpointPrev;
110
111 /* Link the endpoint into the list. */
112 if (pEpPrev)
113 pNext = pEpPrev->AioMgr.pEndpointNext;
114 else
115 pNext = pAioMgr->pEndpointsHead;
116
117 pEpCurr->AioMgr.pEndpointNext = pNext;
118 pEpCurr->AioMgr.pEndpointPrev = pEpPrev;
119 pNext->AioMgr.pEndpointPrev = pEpCurr;
120 if (pEpPrev)
121 pEpPrev->AioMgr.pEndpointNext = pEpCurr;
122 else
123 pAioMgr->pEndpointsHead = pEpCurr;
124
125 pEpCurr = pEpNextToSort;
126 }
127
128#ifdef DEBUG
129 /* Validate sorting alogrithm */
130 unsigned cEndpoints = 0;
131 pEpCurr = pAioMgr->pEndpointsHead;
132
133 AssertMsg(pEpCurr, ("No endpoint in the list?\n"));
134 AssertMsg(!pEpCurr->AioMgr.pEndpointPrev, ("First element in the list points to previous element\n"));
135
136 while (pEpCurr)
137 {
138 cEndpoints++;
139
140 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEpCurr->AioMgr.pEndpointNext;
141 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEpCurr->AioMgr.pEndpointPrev;
142
143 Assert(!pNext || pNext->AioMgr.cReqsPerSec <= pEpCurr->AioMgr.cReqsPerSec);
144 Assert(!pPrev || pPrev->AioMgr.cReqsPerSec >= pEpCurr->AioMgr.cReqsPerSec);
145
146 pEpCurr = pNext;
147 }
148
149 AssertMsg(cEndpoints == pAioMgr->cEndpoints, ("Endpoints lost during sort!\n"));
150
151#endif
152}
153
154/**
155 * Removes an endpoint from the currently assigned manager.
156 *
157 * @returns TRUE if there are still requests pending on the current manager for this endpoint.
158 * FALSE otherwise.
159 * @param pEndpointRemove The endpoint to remove.
160 */
161static bool pdmacFileAioMgrNormalRemoveEndpoint(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove)
162{
163 PPDMASYNCCOMPLETIONENDPOINTFILE pPrev = pEndpointRemove->AioMgr.pEndpointPrev;
164 PPDMASYNCCOMPLETIONENDPOINTFILE pNext = pEndpointRemove->AioMgr.pEndpointNext;
165 PPDMACEPFILEMGR pAioMgr = pEndpointRemove->pAioMgr;
166
167 pAioMgr->cEndpoints--;
168
169 if (pPrev)
170 pPrev->AioMgr.pEndpointNext = pNext;
171 else
172 pAioMgr->pEndpointsHead = pNext;
173
174 if (pNext)
175 pNext->AioMgr.pEndpointPrev = pPrev;
176
177 /* Make sure that there is no request pending on this manager for the endpoint. */
178 if (!pEndpointRemove->AioMgr.cRequestsActive)
179 {
180 Assert(!pEndpointRemove->pFlushReq);
181
182 /* Reopen the file so that the new endpoint can reassociate with the file */
183 RTFileClose(pEndpointRemove->File);
184 int rc = RTFileOpen(&pEndpointRemove->File, pEndpointRemove->Core.pszUri, pEndpointRemove->fFlags);
185 AssertRC(rc);
186 return false;
187 }
188
189 return true;
190}
191
192/**
193 * Creates a new I/O manager and spreads the I/O load of the endpoints
194 * between the given I/O manager and the new one.
195 *
196 * @returns nothing.
197 * @param pAioMgr The I/O manager with high I/O load.
198 */
199static void pdmacFileAioMgrNormalBalanceLoad(PPDMACEPFILEMGR pAioMgr)
200{
201 PPDMACEPFILEMGR pAioMgrNew = NULL;
202 int rc = VINF_SUCCESS;
203
204 /* Splitting can't be done with only one open endpoint. */
205 if (pAioMgr->cEndpoints > 1)
206 {
207 rc = pdmacFileAioMgrCreate((PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass,
208 &pAioMgrNew);
209 if (RT_SUCCESS(rc))
210 {
211 /* We will sort the list by request count per second. */
212 pdmacFileAioMgrNormalEndpointsSortByLoad(pAioMgr);
213
214 /* Now move some endpoints to the new manager. */
215 unsigned cReqsHere = pAioMgr->pEndpointsHead->AioMgr.cReqsPerSec;
216 unsigned cReqsOther = 0;
217 PPDMASYNCCOMPLETIONENDPOINTFILE pCurr = pAioMgr->pEndpointsHead->AioMgr.pEndpointNext;
218
219 while (pCurr)
220 {
221 if (cReqsHere <= cReqsOther)
222 {
223 /*
224 * The other manager has more requests to handle now.
225 * We will keep the current endpoint.
226 */
227 Log(("Keeping endpoint %#p{%s} with %u reqs/s\n", pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
228 cReqsHere += pCurr->AioMgr.cReqsPerSec;
229 pCurr = pCurr->AioMgr.pEndpointNext;
230 }
231 else
232 {
233 /* Move to other endpoint. */
234 Log(("Moving endpoint %#p{%s} with %u reqs/s to other manager\n", pCurr->Core.pszUri, pCurr->AioMgr.cReqsPerSec));
235 cReqsOther += pCurr->AioMgr.cReqsPerSec;
236
237 PPDMASYNCCOMPLETIONENDPOINTFILE pMove = pCurr;
238
239 pCurr = pCurr->AioMgr.pEndpointNext;
240
241 bool fReqsPending = pdmacFileAioMgrNormalRemoveEndpoint(pMove);
242
243 if (fReqsPending)
244 {
245 pMove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
246 pMove->AioMgr.fMoving = true;
247 pMove->AioMgr.pAioMgrDst = pAioMgrNew;
248 }
249 else
250 {
251 pMove->AioMgr.fMoving = false;
252 pMove->AioMgr.pAioMgrDst = NULL;
253 pdmacFileAioMgrAddEndpoint(pAioMgrNew, pMove);
254 }
255 }
256 }
257 }
258 else
259 {
260 /* Don't process further but leave a log entry about reduced performance. */
261 LogRel(("AIOMgr: Could not create new I/O manager (rc=%Rrc). Expect reduced performance\n", rc));
262 }
263 }
264}
265
266/**
267 * Error handler which will create the failsafe managers and destroy the failed I/O manager.
268 *
269 * @returns VBox status code
270 * @param pAioMgr The I/O manager the error ocurred on.
271 * @param rc The error code.
272 */
273static int pdmacFileAioMgrNormalErrorHandler(PPDMACEPFILEMGR pAioMgr, int rc, RT_SRC_POS_DECL)
274{
275 LogRel(("AIOMgr: I/O manager %#p encountered a critical error (rc=%Rrc) during operation. Falling back to failsafe mode. Expect reduced performance\n",
276 pAioMgr, rc));
277 LogRel(("AIOMgr: Error happened in %s:(%u){%s}\n", RT_SRC_POS_ARGS));
278 LogRel(("AIOMgr: Please contact the product vendor\n"));
279
280 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pAioMgr->pEndpointsHead->Core.pEpClass;
281
282 pAioMgr->enmState = PDMACEPFILEMGRSTATE_FAULT;
283 ASMAtomicWriteBool(&pEpClassFile->fFailsafe, true);
284
285 AssertMsgFailed(("Implement\n"));
286 return VINF_SUCCESS;
287}
288
289static int pdmacFileAioMgrNormalProcessTaskList(PPDMACTASKFILE pTaskHead,
290 PPDMACEPFILEMGR pAioMgr,
291 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
292{
293 RTFILEAIOREQ apReqs[20];
294 unsigned cRequests = 0;
295 unsigned cMaxRequests = PDMACEPFILEMGR_REQS_MAX - pAioMgr->cRequestsActive;
296 int rc = VINF_SUCCESS;
297 PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
298
299 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
300 ("Trying to process request lists of a non active endpoint!\n"));
301
302 /* Go through the list and queue the requests until we get a flush request */
303 while (pTaskHead && !pEndpoint->pFlushReq && (cMaxRequests > 0))
304 {
305 PPDMACTASKFILE pCurr = pTaskHead;
306
307 pTaskHead = pTaskHead->pNext;
308
309 AssertMsg(VALID_PTR(pCurr->pEndpoint) && (pCurr->pEndpoint == pEndpoint),
310 ("Endpoints do not match\n"));
311
312 switch (pCurr->enmTransferType)
313 {
314 case PDMACTASKFILETRANSFER_FLUSH:
315 {
316 /* If there is no data transfer request this flush request finished immediately. */
317 if (!pEndpoint->AioMgr.cRequestsActive)
318 {
319 pCurr->pfnCompleted(pCurr, pCurr->pvUser);
320 pdmacFileTaskFree(pEndpoint, pCurr);
321 }
322 else
323 {
324 pEndpoint->pFlushReq = pCurr;
325
326 if (pTaskHead)
327 {
328 /* Add the rest of the tasks to the pending list */
329 if (!pEndpoint->AioMgr.pReqsPendingHead)
330 {
331 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
332 pEndpoint->AioMgr.pReqsPendingHead = pTaskHead;
333 }
334 else
335 {
336 Assert(pEndpoint->AioMgr.pReqsPendingTail);
337 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTaskHead;
338 }
339
340 /* Update the tail. */
341 while (pTaskHead->pNext)
342 pTaskHead = pTaskHead->pNext;
343
344 pEndpoint->AioMgr.pReqsPendingTail = pTaskHead;
345 }
346 }
347 break;
348 }
349 case PDMACTASKFILETRANSFER_READ:
350 case PDMACTASKFILETRANSFER_WRITE:
351 {
352 RTFILEAIOREQ hReq = NIL_RTFILEAIOREQ;
353 void *pvBuf = pCurr->DataSeg.pvSeg;
354
355 /* Get a request handle. */
356 if (pAioMgr->iFreeReqNext != pAioMgr->iFreeEntryNext)
357 {
358 hReq = pAioMgr->pahReqsFree[pAioMgr->iFreeReqNext];
359 pAioMgr->pahReqsFree[pAioMgr->iFreeReqNext] = NIL_RTFILEAIOREQ;
360 pAioMgr->iFreeReqNext = (pAioMgr->iFreeReqNext + 1) % pAioMgr->cReqEntries;
361 }
362 else
363 {
364 rc = RTFileAioReqCreate(&hReq);
365 AssertRC(rc);
366 }
367
368 AssertMsg(hReq != NIL_RTFILEAIOREQ, ("Out of request handles\n"));
369
370 /* Check if the alignment requirements are met.
371 * Offset, transfer size and buffer address
372 * need to be on a 512 boundary. */
373 size_t cbToTransfer = RT_ALIGN_Z(pCurr->DataSeg.cbSeg, 512);
374 RTFOFF OffStart = pCurr->Off & ~(RTFOFF)(512-1);
375 PDMACTASKFILETRANSFER enmTransferType = pCurr->enmTransferType;
376
377 AssertMsg(( (pCurr->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
378 || (OffStart + cbToTransfer <= pEndpoint->cbFile)),
379 ("Read exceeds file size OffStart=%RTfoff cbToTransfer=%d cbFile=%llu\n",
380 OffStart, cbToTransfer, pEndpoint->cbFile));
381
382 pCurr->fPrefetch = false;
383
384 if ( RT_UNLIKELY(cbToTransfer != pCurr->DataSeg.cbSeg)
385 || RT_UNLIKELY(OffStart != pCurr->Off)
386 || ((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) != (RTR3UINTPTR)pvBuf))
387 {
388 /* Create bounce buffer. */
389 pCurr->fBounceBuffer = true;
390
391 AssertMsg(pCurr->Off >= OffStart, ("Overflow in calculation Off=%llu OffStart=%llu\n",
392 pCurr->Off, OffStart));
393 pCurr->uBounceBufOffset = pCurr->Off - OffStart;
394
395 /** @todo: I think we need something like a RTMemAllocAligned method here.
396 * Current assumption is that the maximum alignment is 4096byte
397 * (GPT disk on Windows)
398 * so we can use RTMemPageAlloc here.
399 */
400 pCurr->pvBounceBuffer = RTMemPageAlloc(cbToTransfer);
401 AssertPtr(pCurr->pvBounceBuffer);
402 pvBuf = pCurr->pvBounceBuffer;
403
404 if (pCurr->enmTransferType == PDMACTASKFILETRANSFER_WRITE)
405 {
406 if ( RT_UNLIKELY(cbToTransfer != pCurr->DataSeg.cbSeg)
407 || RT_UNLIKELY(OffStart != pCurr->Off))
408 {
409 /* We have to fill the buffer first before we can update the data. */
410 pCurr->fPrefetch = true;
411 enmTransferType = PDMACTASKFILETRANSFER_READ;
412 }
413 else
414 memcpy(pvBuf, pCurr->DataSeg.pvSeg, pCurr->DataSeg.cbSeg);
415 }
416 }
417 else
418 pCurr->fBounceBuffer = false;
419
420 AssertMsg((pEpClassFile->uBitmaskAlignment & (RTR3UINTPTR)pvBuf) == (RTR3UINTPTR)pvBuf,
421 ("AIO: Alignment restrictions not met! pvBuf=%p uBitmaskAlignment=%p\n", pvBuf, pEpClassFile->uBitmaskAlignment));
422
423 if (enmTransferType == PDMACTASKFILETRANSFER_WRITE)
424 {
425 /* Grow the file if needed. */
426 if (RT_UNLIKELY((pCurr->Off + pCurr->DataSeg.cbSeg) > pEndpoint->cbFile))
427 {
428 ASMAtomicWriteU64(&pEndpoint->cbFile, pCurr->Off + pCurr->DataSeg.cbSeg);
429 RTFileSetSize(pEndpoint->File, pCurr->Off + pCurr->DataSeg.cbSeg);
430 }
431
432 rc = RTFileAioReqPrepareWrite(hReq, pEndpoint->File,
433 OffStart, pvBuf, cbToTransfer, pCurr);
434 }
435 else
436 rc = RTFileAioReqPrepareRead(hReq, pEndpoint->File,
437 OffStart, pvBuf, cbToTransfer, pCurr);
438 AssertRC(rc);
439
440 apReqs[cRequests] = hReq;
441 pEndpoint->AioMgr.cReqsProcessed++;
442 cMaxRequests--;
443 cRequests++;
444 if (cRequests == RT_ELEMENTS(apReqs))
445 {
446 pAioMgr->cRequestsActive += cRequests;
447 pEndpoint->AioMgr.cRequestsActive += cRequests;
448 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, apReqs, cRequests);
449 if (RT_FAILURE(rc))
450 {
451 /* @todo implement */
452 AssertMsgFailed(("Implement\n"));
453 }
454
455 cRequests = 0;
456 }
457 break;
458 }
459 default:
460 AssertMsgFailed(("Invalid transfer type %d\n", pCurr->enmTransferType));
461 }
462 }
463
464 if (cRequests)
465 {
466 pAioMgr->cRequestsActive += cRequests;
467 pEndpoint->AioMgr.cRequestsActive += cRequests;
468 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, apReqs, cRequests);
469 AssertMsgReturn(RT_SUCCESS(rc), ("Could not submit %u requests %Rrc\n", cRequests, rc), rc);
470 }
471
472 if (RT_UNLIKELY(!cMaxRequests && pTaskHead && !pEndpoint->pFlushReq))
473 {
474 /*
475 * The I/O manager has no room left for more requests
476 * but there are still requests to process.
477 * Create a new I/O manager and let it handle some endpoints.
478 */
479
480 /* Add the rest of the tasks to the pending list first */
481 if (!pEndpoint->AioMgr.pReqsPendingHead)
482 {
483 Assert(!pEndpoint->AioMgr.pReqsPendingTail);
484 pEndpoint->AioMgr.pReqsPendingHead = pTaskHead;
485 }
486 else
487 {
488 Assert(pEndpoint->AioMgr.pReqsPendingTail);
489 pEndpoint->AioMgr.pReqsPendingTail->pNext = pTaskHead;
490 }
491
492 /* Update the tail. */
493 while (pTaskHead->pNext)
494 pTaskHead = pTaskHead->pNext;
495
496 pEndpoint->AioMgr.pReqsPendingTail = pTaskHead;
497
498 pdmacFileAioMgrNormalBalanceLoad(pAioMgr);
499 }
500
501 return rc;
502}
503
504/**
505 * Adds all pending requests for the given endpoint
506 * until a flush request is encountered or there is no
507 * request anymore.
508 *
509 * @returns VBox status code.
510 * @param pAioMgr The async I/O manager for the endpoint
511 * @param pEndpoint The endpoint to get the requests from.
512 */
513static int pdmacFileAioMgrNormalQueueReqs(PPDMACEPFILEMGR pAioMgr,
514 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
515{
516 int rc = VINF_SUCCESS;
517 PPDMACTASKFILE pTasksHead = NULL;
518
519 AssertMsg(pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE,
520 ("Trying to process request lists of a non active endpoint!\n"));
521
522 Assert(!pEndpoint->pFlushReq);
523
524 /* Check the pending list first */
525 if (pEndpoint->AioMgr.pReqsPendingHead)
526 {
527 pTasksHead = pEndpoint->AioMgr.pReqsPendingHead;
528 /*
529 * Clear the list as the processing routine will insert them into the list
530 * again if it gets a flush request.
531 */
532 pEndpoint->AioMgr.pReqsPendingHead = NULL;
533 pEndpoint->AioMgr.pReqsPendingTail = NULL;
534 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
535 AssertRC(rc);
536 }
537
538 if (!pEndpoint->pFlushReq)
539 {
540 /* Now the request queue. */
541 pTasksHead = pdmacFileEpGetNewTasks(pEndpoint);
542 if (pTasksHead)
543 {
544 rc = pdmacFileAioMgrNormalProcessTaskList(pTasksHead, pAioMgr, pEndpoint);
545 AssertRC(rc);
546 }
547 }
548
549 return rc;
550}
551
552static int pdmacFileAioMgrNormalProcessBlockingEvent(PPDMACEPFILEMGR pAioMgr)
553{
554 int rc = VINF_SUCCESS;
555 bool fNotifyWaiter = false;
556
557 Assert(pAioMgr->fBlockingEventPending);
558
559 switch (pAioMgr->enmBlockingEvent)
560 {
561 case PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT:
562 {
563 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointNew = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint);
564 AssertMsg(VALID_PTR(pEndpointNew), ("Adding endpoint event without a endpoint to add\n"));
565
566 pEndpointNew->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE;
567
568 pEndpointNew->AioMgr.pEndpointNext = pAioMgr->pEndpointsHead;
569 pEndpointNew->AioMgr.pEndpointPrev = NULL;
570 if (pAioMgr->pEndpointsHead)
571 pAioMgr->pEndpointsHead->AioMgr.pEndpointPrev = pEndpointNew;
572 pAioMgr->pEndpointsHead = pEndpointNew;
573
574 /* Assign the completion point to this file. */
575 rc = RTFileAioCtxAssociateWithFile(pAioMgr->hAioCtx, pEndpointNew->File);
576 fNotifyWaiter = true;
577 pAioMgr->cEndpoints++;
578 break;
579 }
580 case PDMACEPFILEAIOMGRBLOCKINGEVENT_REMOVE_ENDPOINT:
581 {
582 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointRemove = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint);
583 AssertMsg(VALID_PTR(pEndpointRemove), ("Removing endpoint event without a endpoint to remove\n"));
584
585 pEndpointRemove->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_REMOVING;
586 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointRemove);
587 break;
588 }
589 case PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT:
590 {
591 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointClose = (PPDMASYNCCOMPLETIONENDPOINTFILE)ASMAtomicReadPtr((void * volatile *)&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint);
592 AssertMsg(VALID_PTR(pEndpointClose), ("Close endpoint event without a endpoint to close\n"));
593
594 /* Make sure all tasks finished. Process the queues a last time first. */
595 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpointClose);
596 AssertRC(rc);
597
598 pEndpointClose->enmState = PDMASYNCCOMPLETIONENDPOINTFILESTATE_CLOSING;
599 fNotifyWaiter = !pdmacFileAioMgrNormalRemoveEndpoint(pEndpointClose);
600 break;
601 }
602 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SHUTDOWN:
603 {
604 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SHUTDOWN;
605 if (!pAioMgr->cRequestsActive)
606 fNotifyWaiter = true;
607 break;
608 }
609 case PDMACEPFILEAIOMGRBLOCKINGEVENT_SUSPEND:
610 {
611 pAioMgr->enmState = PDMACEPFILEMGRSTATE_SUSPENDING;
612 break;
613 }
614 case PDMACEPFILEAIOMGRBLOCKINGEVENT_RESUME:
615 {
616 pAioMgr->enmState = PDMACEPFILEMGRSTATE_RUNNING;
617 fNotifyWaiter = true;
618 break;
619 }
620 default:
621 AssertReleaseMsgFailed(("Invalid event type %d\n", pAioMgr->enmBlockingEvent));
622 }
623
624 if (fNotifyWaiter)
625 {
626 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
627 pAioMgr->enmBlockingEvent = PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID;
628
629 /* Release the waiting thread. */
630 LogFlow(("Signalling waiter\n"));
631 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
632 AssertRC(rc);
633 }
634
635 return rc;
636}
637
638/** Helper macro for checking for error codes. */
639#define CHECK_RC(pAioMgr, rc) \
640 if (RT_FAILURE(rc)) \
641 {\
642 int rc2 = pdmacFileAioMgrNormalErrorHandler(pAioMgr, rc, RT_SRC_POS);\
643 return rc2;\
644 }
645
646/**
647 * The normal I/O manager using the RTFileAio* API
648 *
649 * @returns VBox status code.
650 * @param ThreadSelf Handle of the thread.
651 * @param pvUser Opaque user data.
652 */
653int pdmacFileAioMgrNormal(RTTHREAD ThreadSelf, void *pvUser)
654{
655 int rc = VINF_SUCCESS;
656 PPDMACEPFILEMGR pAioMgr = (PPDMACEPFILEMGR)pvUser;
657 uint64_t uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
658
659 while ( (pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING)
660 || (pAioMgr->enmState == PDMACEPFILEMGRSTATE_SUSPENDING))
661 {
662 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, true);
663 if (!ASMAtomicReadBool(&pAioMgr->fWokenUp))
664 rc = RTSemEventWait(pAioMgr->EventSem, RT_INDEFINITE_WAIT);
665 ASMAtomicWriteBool(&pAioMgr->fWaitingEventSem, false);
666 AssertRC(rc);
667
668 LogFlow(("Got woken up\n"));
669 ASMAtomicWriteBool(&pAioMgr->fWokenUp, false);
670
671 /* Check for an external blocking event first. */
672 if (pAioMgr->fBlockingEventPending)
673 {
674 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
675 CHECK_RC(pAioMgr, rc);
676 }
677
678 if (RT_LIKELY(pAioMgr->enmState == PDMACEPFILEMGRSTATE_RUNNING))
679 {
680 /* Check the assigned endpoints for new tasks if there isn't a flush request active at the moment. */
681 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint = pAioMgr->pEndpointsHead;
682
683 while (pEndpoint)
684 {
685 if (!pEndpoint->pFlushReq && (pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE))
686 {
687 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint);
688 CHECK_RC(pAioMgr, rc);
689 }
690
691 pEndpoint = pEndpoint->AioMgr.pEndpointNext;
692 }
693
694 while (pAioMgr->cRequestsActive)
695 {
696 RTFILEAIOREQ apReqs[20];
697 uint32_t cReqsCompleted = 0;
698 size_t cReqsWait;
699
700 if (pAioMgr->cRequestsActive > RT_ELEMENTS(apReqs))
701 cReqsWait = RT_ELEMENTS(apReqs);
702 else
703 cReqsWait = pAioMgr->cRequestsActive;
704
705 rc = RTFileAioCtxWait(pAioMgr->hAioCtx,
706 cReqsWait,
707 RT_INDEFINITE_WAIT, apReqs,
708 RT_ELEMENTS(apReqs), &cReqsCompleted);
709 if (RT_FAILURE(rc) && (rc != VERR_INTERRUPTED))
710 CHECK_RC(pAioMgr, rc);
711
712 for (uint32_t i = 0; i < cReqsCompleted; i++)
713 {
714 size_t cbTransfered = 0;
715 int rcReq = RTFileAioReqGetRC(apReqs[i], &cbTransfered);
716 PPDMACTASKFILE pTask = (PPDMACTASKFILE)RTFileAioReqGetUser(apReqs[i]);
717
718 pEndpoint = pTask->pEndpoint;
719
720 AssertMsg( RT_SUCCESS(rcReq)
721 && ( (cbTransfered == pTask->DataSeg.cbSeg)
722 || (pTask->fBounceBuffer)),
723 ("Task didn't completed successfully (rc=%Rrc) or was incomplete (cbTransfered=%u)\n", rc, cbTransfered));
724
725 if (pTask->fPrefetch)
726 {
727 Assert(pTask->enmTransferType == PDMACTASKFILETRANSFER_WRITE);
728 Assert(pTask->fBounceBuffer);
729
730 memcpy(((uint8_t *)pTask->pvBounceBuffer) + pTask->uBounceBufOffset,
731 pTask->DataSeg.pvSeg,
732 pTask->DataSeg.cbSeg);
733
734 /* Write it now. */
735 pTask->fPrefetch = false;
736 size_t cbToTransfer = RT_ALIGN_Z(pTask->DataSeg.cbSeg, 512);
737 RTFOFF OffStart = pTask->Off & ~(RTFOFF)(512-1);
738
739 /* Grow the file if needed. */
740 if (RT_UNLIKELY((pTask->Off + pTask->DataSeg.cbSeg) > pEndpoint->cbFile))
741 {
742 ASMAtomicWriteU64(&pEndpoint->cbFile, pTask->Off + pTask->DataSeg.cbSeg);
743 RTFileSetSize(pEndpoint->File, pTask->Off + pTask->DataSeg.cbSeg);
744 }
745
746 rc = RTFileAioReqPrepareWrite(apReqs[i], pEndpoint->File,
747 OffStart, pTask->pvBounceBuffer, cbToTransfer, pTask);
748 AssertRC(rc);
749 rc = RTFileAioCtxSubmit(pAioMgr->hAioCtx, &apReqs[i], 1);
750 AssertRC(rc);
751 }
752 else
753 {
754 if (pTask->fBounceBuffer)
755 {
756 if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ)
757 memcpy(pTask->DataSeg.pvSeg,
758 ((uint8_t *)pTask->pvBounceBuffer) + pTask->uBounceBufOffset,
759 pTask->DataSeg.cbSeg);
760
761 RTMemPageFree(pTask->pvBounceBuffer);
762 }
763
764 /* Put the entry on the free array */
765 pAioMgr->pahReqsFree[pAioMgr->iFreeEntryNext] = apReqs[i];
766 pAioMgr->iFreeEntryNext = (pAioMgr->iFreeEntryNext + 1) %pAioMgr->cReqEntries;
767
768 pAioMgr->cRequestsActive--;
769 pEndpoint->AioMgr.cRequestsActive--;
770 pEndpoint->AioMgr.cReqsProcessed++;
771
772 /* Call completion callback */
773 pTask->pfnCompleted(pTask, pTask->pvUser);
774 pdmacFileTaskFree(pEndpoint, pTask);
775
776 /*
777 * If there is no request left on the endpoint but a flush request is set
778 * it completed now and we notify the owner.
779 * Furthermore we look for new requests and continue.
780 */
781 if (!pEndpoint->AioMgr.cRequestsActive && pEndpoint->pFlushReq)
782 {
783 /* Call completion callback */
784 pTask = pEndpoint->pFlushReq;
785 pEndpoint->pFlushReq = NULL;
786
787 AssertMsg(pTask->pEndpoint == pEndpoint, ("Endpoint of the flush request does not match assigned one\n"));
788
789 pTask->pfnCompleted(pTask, pTask->pvUser);
790 pdmacFileTaskFree(pEndpoint, pTask);
791 }
792 }
793
794 if (pEndpoint->enmState == PDMASYNCCOMPLETIONENDPOINTFILESTATE_ACTIVE)
795 {
796 if (!pEndpoint->pFlushReq)
797 {
798 /* Check if there are events on the endpoint. */
799 rc = pdmacFileAioMgrNormalQueueReqs(pAioMgr, pEndpoint);
800 CHECK_RC(pAioMgr, rc);
801 }
802 }
803 else if (!pEndpoint->AioMgr.cRequestsActive)
804 {
805 /* Reopen the file so that the new endpoint can reassociate with the file */
806 RTFileClose(pEndpoint->File);
807 rc = RTFileOpen(&pEndpoint->File, pEndpoint->Core.pszUri, pEndpoint->fFlags);
808 AssertRC(rc);
809
810 if (pEndpoint->AioMgr.fMoving)
811 {
812 pEndpoint->AioMgr.fMoving = false;
813 pdmacFileAioMgrAddEndpoint(pEndpoint->AioMgr.pAioMgrDst, pEndpoint);
814 }
815 else
816 {
817 Assert(pAioMgr->fBlockingEventPending);
818 ASMAtomicWriteBool(&pAioMgr->fBlockingEventPending, false);
819
820 /* Release the waiting thread. */
821 LogFlow(("Signalling waiter\n"));
822 rc = RTSemEventSignal(pAioMgr->EventSemBlock);
823 AssertRC(rc);
824 }
825 }
826 }
827
828 /* Check for an external blocking event before we go to sleep again. */
829 if (pAioMgr->fBlockingEventPending)
830 {
831 rc = pdmacFileAioMgrNormalProcessBlockingEvent(pAioMgr);
832 CHECK_RC(pAioMgr, rc);
833 }
834
835 /* Update load statistics. */
836 uint64_t uMillisCurr = RTTimeMilliTS();
837 if (uMillisCurr > uMillisEnd)
838 {
839 PPDMASYNCCOMPLETIONENDPOINTFILE pEndpointCurr = pAioMgr->pEndpointsHead;
840
841 /* Calculate timespan. */
842 uMillisCurr -= uMillisEnd;
843
844 while (pEndpointCurr)
845 {
846 pEndpointCurr->AioMgr.cReqsPerSec = pEndpointCurr->AioMgr.cReqsProcessed / (uMillisCurr + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD);
847 pEndpointCurr->AioMgr.cReqsProcessed = 0;
848 pEndpointCurr = pEndpointCurr->AioMgr.pEndpointNext;
849 }
850
851 /* Set new update interval */
852 uMillisEnd = RTTimeMilliTS() + PDMACEPFILEMGR_LOAD_UPDATE_PERIOD;
853 }
854 }
855 }
856 }
857
858 return rc;
859}
860
861#undef CHECK_RC
862
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette