VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/fileaio-posix.cpp@ 29698

Last change on this file since 29698 was 29477, checked in by vboxsync, 15 years ago

fileaio-posix.cpp: Fix assertion (It is possible to have 0 requests submitted on return if the host limit was reached already due to another process. OS X is an example which has a global limit of 16 concurrent aio requests for all processes. Reached easily when two VMs are running...)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 36.4 KB
Line 
1/* $Id: fileaio-posix.cpp 29477 2010-05-14 14:59:29Z vboxsync $ */
2/** @file
3 * IPRT - File async I/O, native implementation for POSIX compliant host platforms.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP RTLOGGROUP_DIR
32#include <iprt/asm.h>
33#include <iprt/file.h>
34#include <iprt/mem.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/err.h>
38#include <iprt/log.h>
39#include <iprt/thread.h>
40#include <iprt/semaphore.h>
41#include "internal/fileaio.h"
42
43#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
44# include <sys/types.h>
45# include <sys/sysctl.h> /* for sysctlbyname */
46#endif
47#if defined(RT_OS_FREEBSD)
48# include <fcntl.h> /* O_SYNC */
49#endif
50#include <aio.h>
51#include <errno.h>
52#include <time.h>
53
54/*
55 * Linux does not define this value.
56 * Just define it with really big
57 * value.
58 */
59#ifndef AIO_LISTIO_MAX
60# define AIO_LISTIO_MAX UINT32_MAX
61#endif
62
63#if 0 /* Only used for debugging */
64# undef AIO_LISTIO_MAX
65# define AIO_LISTIO_MAX 16
66#endif
67
68/** Invalid entry in the waiting array. */
69#define RTFILEAIOCTX_WAIT_ENTRY_INVALID (~0U)
70
71/** No-op replacement for rtFileAioCtxDump for non debug builds */
72#ifndef LOG_ENABLED
73# define rtFileAioCtxDump(pCtxInt) do {} while (0)
74#endif
75
76/*******************************************************************************
77* Structures and Typedefs *
78*******************************************************************************/
79/**
80 * Async I/O request state.
81 */
82typedef struct RTFILEAIOREQINTERNAL
83{
84 /** The aio control block. FIRST ELEMENT! */
85 struct aiocb AioCB;
86 /** Next element in the chain. */
87 struct RTFILEAIOREQINTERNAL *pNext;
88 /** Previous element in the chain. */
89 struct RTFILEAIOREQINTERNAL *pPrev;
90 /** Current state the request is in. */
91 RTFILEAIOREQSTATE enmState;
92 /** Flag whether this is a flush request. */
93 bool fFlush;
94 /** Flag indicating if the request was canceled. */
95 volatile bool fCanceled;
96 /** Opaque user data. */
97 void *pvUser;
98 /** Number of bytes actually transfered. */
99 size_t cbTransfered;
100 /** Status code. */
101 int Rc;
102 /** Completion context we are assigned to. */
103 struct RTFILEAIOCTXINTERNAL *pCtxInt;
104 /** Entry in the waiting list the request is in. */
105 unsigned iWaitingList;
106 /** Magic value (RTFILEAIOREQ_MAGIC). */
107 uint32_t u32Magic;
108} RTFILEAIOREQINTERNAL, *PRTFILEAIOREQINTERNAL;
109
110/**
111 * Async I/O completion context state.
112 */
113typedef struct RTFILEAIOCTXINTERNAL
114{
115 /** Current number of requests active on this context. */
116 volatile int32_t cRequests;
117 /** Maximum number of requests this context can handle. */
118 uint32_t cMaxRequests;
119 /** The ID of the thread which is currently waiting for requests. */
120 volatile RTTHREAD hThreadWait;
121 /** Flag whether the thread was woken up. */
122 volatile bool fWokenUp;
123 /** Flag whether the thread is currently waiting in the syscall. */
124 volatile bool fWaiting;
125 /** Magic value (RTFILEAIOCTX_MAGIC). */
126 uint32_t u32Magic;
127 /** Flag whether the thread was woken up due to a internal event. */
128 volatile bool fWokenUpInternal;
129 /** List of new requests which needs to be inserted into apReqs by the
130 * waiting thread. */
131 volatile PRTFILEAIOREQINTERNAL apReqsNewHead[5];
132 /** Special entry for requests which are canceled. Because only one
133 * request can be canceled at a time and the thread canceling the request
134 * has to wait we need only one entry. */
135 volatile PRTFILEAIOREQINTERNAL pReqToCancel;
136 /** Event semaphore the canceling thread is waiting for completion of
137 * the operation. */
138 RTSEMEVENT SemEventCancel;
139 /** Head of submitted elements waiting to get into the array. */
140 PRTFILEAIOREQINTERNAL pReqsWaitHead;
141 /** Tail of submitted elements waiting to get into the array. */
142 PRTFILEAIOREQINTERNAL pReqsWaitTail;
143 /** Maximum number of elements in the waiting array. */
144 unsigned cReqsWaitMax;
145 /** First free slot in the waiting list. */
146 unsigned iFirstFree;
147 /** List of requests we are currently waiting on.
148 * Size depends on cMaxRequests and AIO_LISTIO_MAX. */
149 volatile PRTFILEAIOREQINTERNAL apReqs[1];
150} RTFILEAIOCTXINTERNAL, *PRTFILEAIOCTXINTERNAL;
151
152/**
153 * Internal worker for waking up the waiting thread.
154 */
155static void rtFileAioCtxWakeup(PRTFILEAIOCTXINTERNAL pCtxInt)
156{
157 /*
158 * Read the thread handle before the status flag.
159 * If we read the handle after the flag we might
160 * end up with an invalid handle because the thread
161 * waiting in RTFileAioCtxWakeup() might get scheduled
162 * before we read the flag and returns.
163 * We can ensure that the handle is valid if fWaiting is true
164 * when reading the handle before the status flag.
165 */
166 RTTHREAD hThread;
167 ASMAtomicReadHandle(&pCtxInt->hThreadWait, &hThread);
168 bool fWaiting = ASMAtomicReadBool(&pCtxInt->fWaiting);
169 if (fWaiting)
170 {
171 /*
172 * If a thread waits the handle must be valid.
173 * It is possible that the thread returns from
174 * aio_suspend() before the signal is send.
175 * This is no problem because we already set fWokenUp
176 * to true which will let the thread return VERR_INTERRUPTED
177 * and the next call to RTFileAioCtxWait() will not
178 * return VERR_INTERRUPTED because signals are not saved
179 * and will simply vanish if the destination thread can't
180 * receive it.
181 */
182 Assert(hThread != NIL_RTTHREAD);
183 RTThreadPoke(hThread);
184 }
185}
186
187/**
188 * Internal worker processing events and inserting new requests into the waiting list.
189 */
190static int rtFileAioCtxProcessEvents(PRTFILEAIOCTXINTERNAL pCtxInt)
191{
192 int rc = VINF_SUCCESS;
193
194 /* Process new requests first. */
195 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, false);
196 if (fWokenUp)
197 {
198 for (unsigned iSlot = 0; iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead); iSlot++)
199 {
200 PRTFILEAIOREQINTERNAL pReqHead = (PRTFILEAIOREQINTERNAL)ASMAtomicXchgPtr((void* volatile*)&pCtxInt->apReqsNewHead[iSlot],
201 NULL);
202
203 while ( (pCtxInt->iFirstFree < pCtxInt->cReqsWaitMax)
204 && pReqHead)
205 {
206 RTFIELAIOREQ_ASSERT_STATE(pReqHead, SUBMITTED);
207 pCtxInt->apReqs[pCtxInt->iFirstFree] = pReqHead;
208 pReqHead->iWaitingList = pCtxInt->iFirstFree;
209 pReqHead = pReqHead->pNext;
210
211 /* Clear pointer to next and previous element just for safety. */
212 pCtxInt->apReqs[pCtxInt->iFirstFree]->pNext = NULL;
213 pCtxInt->apReqs[pCtxInt->iFirstFree]->pPrev = NULL;
214 pCtxInt->iFirstFree++;
215
216 Assert( (pCtxInt->iFirstFree <= pCtxInt->cMaxRequests)
217 && (pCtxInt->iFirstFree <= pCtxInt->cReqsWaitMax));
218 }
219
220 /* Append the rest to the wait list. */
221 if (pReqHead)
222 {
223 RTFIELAIOREQ_ASSERT_STATE(pReqHead, SUBMITTED);
224 if (!pCtxInt->pReqsWaitHead)
225 {
226 Assert(!pCtxInt->pReqsWaitTail);
227 pCtxInt->pReqsWaitHead = pReqHead;
228 pReqHead->pPrev = NULL;
229 }
230 else
231 {
232 AssertPtr(pCtxInt->pReqsWaitTail);
233
234 pCtxInt->pReqsWaitTail->pNext = pReqHead;
235 pReqHead->pPrev = pCtxInt->pReqsWaitTail;
236 }
237
238 /* Update tail. */
239 while (pReqHead->pNext)
240 {
241 RTFIELAIOREQ_ASSERT_STATE(pReqHead->pNext, SUBMITTED);
242 pReqHead = pReqHead->pNext;
243 }
244
245 pCtxInt->pReqsWaitTail = pReqHead;
246 pCtxInt->pReqsWaitTail->pNext = NULL;
247 }
248 }
249
250 /* Check if a request needs to be canceled. */
251 PRTFILEAIOREQINTERNAL pReqToCancel = (PRTFILEAIOREQINTERNAL)ASMAtomicReadPtr((void* volatile*)&pCtxInt->pReqToCancel);
252 if (pReqToCancel)
253 {
254 /* The request can be in the array waiting for completion or still in the list because it is full. */
255 if (pReqToCancel->iWaitingList != RTFILEAIOCTX_WAIT_ENTRY_INVALID)
256 {
257 /* Put it out of the waiting list. */
258 pCtxInt->apReqs[pReqToCancel->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
259 pCtxInt->apReqs[pReqToCancel->iWaitingList]->iWaitingList = pReqToCancel->iWaitingList;
260 }
261 else
262 {
263 /* Unlink from the waiting list. */
264 PRTFILEAIOREQINTERNAL pPrev = pReqToCancel->pPrev;
265 PRTFILEAIOREQINTERNAL pNext = pReqToCancel->pNext;
266
267 if (pNext)
268 pNext->pPrev = pPrev;
269 else
270 {
271 /* We canceled the tail. */
272 pCtxInt->pReqsWaitTail = pPrev;
273 }
274
275 if (pPrev)
276 pPrev->pNext = pNext;
277 else
278 {
279 /* We canceled the head. */
280 pCtxInt->pReqsWaitHead = pNext;
281 }
282 }
283
284 ASMAtomicDecS32(&pCtxInt->cRequests);
285 AssertMsg(pCtxInt->cRequests >= 0, ("Canceled request not which is not in this context\n"));
286 RTSemEventSignal(pCtxInt->SemEventCancel);
287 }
288 }
289 else
290 {
291 if (ASMAtomicXchgBool(&pCtxInt->fWokenUp, false))
292 rc = VERR_INTERRUPTED;
293 }
294
295 return rc;
296}
297
298RTR3DECL(int) RTFileAioGetLimits(PRTFILEAIOLIMITS pAioLimits)
299{
300 int rcBSD = 0;
301 AssertPtrReturn(pAioLimits, VERR_INVALID_POINTER);
302
303#if defined(RT_OS_DARWIN)
304 int cReqsOutstandingMax = 0;
305 size_t cbParameter = sizeof(int);
306
307 rcBSD = sysctlbyname("kern.aioprocmax", /* name */
308 &cReqsOutstandingMax, /* Where to store the old value. */
309 &cbParameter, /* Size of the memory pointed to. */
310 NULL, /* Where the new value is located. */
311 NULL); /* Where the size of the new value is stored. */
312 if (rcBSD == -1)
313 return RTErrConvertFromErrno(errno);
314
315 pAioLimits->cReqsOutstandingMax = cReqsOutstandingMax;
316 pAioLimits->cbBufferAlignment = 0;
317#elif defined(RT_OS_FREEBSD)
318 /*
319 * The AIO API is implemented in a kernel module which is not
320 * loaded by default.
321 * If it is loaded there are additional sysctl parameters.
322 */
323 int cReqsOutstandingMax = 0;
324 size_t cbParameter = sizeof(int);
325
326 rcBSD = sysctlbyname("vfs.aio.max_aio_per_proc", /* name */
327 &cReqsOutstandingMax, /* Where to store the old value. */
328 &cbParameter, /* Size of the memory pointed to. */
329 NULL, /* Where the new value is located. */
330 NULL); /* Where the size of the new value is stored. */
331 if (rcBSD == -1)
332 {
333 /* ENOENT means the value is unknown thus the module is not loaded. */
334 if (errno == ENOENT)
335 return VERR_NOT_SUPPORTED;
336 else
337 return RTErrConvertFromErrno(errno);
338 }
339
340 pAioLimits->cReqsOutstandingMax = cReqsOutstandingMax;
341 pAioLimits->cbBufferAlignment = 0;
342#else
343 pAioLimits->cReqsOutstandingMax = RTFILEAIO_UNLIMITED_REQS;
344 pAioLimits->cbBufferAlignment = 0;
345#endif
346
347 return VINF_SUCCESS;
348}
349
350RTR3DECL(int) RTFileAioReqCreate(PRTFILEAIOREQ phReq)
351{
352 AssertPtrReturn(phReq, VERR_INVALID_POINTER);
353
354 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOREQINTERNAL));
355 if (RT_UNLIKELY(!pReqInt))
356 return VERR_NO_MEMORY;
357
358 pReqInt->pCtxInt = NULL;
359 pReqInt->u32Magic = RTFILEAIOREQ_MAGIC;
360 pReqInt->iWaitingList = RTFILEAIOCTX_WAIT_ENTRY_INVALID;
361 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
362
363 *phReq = (RTFILEAIOREQ)pReqInt;
364
365 return VINF_SUCCESS;
366}
367
368
369RTDECL(int) RTFileAioReqDestroy(RTFILEAIOREQ hReq)
370{
371 /*
372 * Validate the handle and ignore nil.
373 */
374 if (hReq == NIL_RTFILEAIOREQ)
375 return VINF_SUCCESS;
376 PRTFILEAIOREQINTERNAL pReqInt = hReq;
377 RTFILEAIOREQ_VALID_RETURN(pReqInt);
378 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
379
380 /*
381 * Trash the magic and free it.
382 */
383 ASMAtomicUoWriteU32(&pReqInt->u32Magic, ~RTFILEAIOREQ_MAGIC);
384 RTMemFree(pReqInt);
385 return VINF_SUCCESS;
386}
387
388/**
389 * Worker setting up the request.
390 */
391DECLINLINE(int) rtFileAioReqPrepareTransfer(RTFILEAIOREQ hReq, RTFILE hFile,
392 unsigned uTransferDirection,
393 RTFOFF off, void *pvBuf, size_t cbTransfer,
394 void *pvUser)
395{
396 /*
397 * Validate the input.
398 */
399 PRTFILEAIOREQINTERNAL pReqInt = hReq;
400 RTFILEAIOREQ_VALID_RETURN(pReqInt);
401 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
402 Assert(hFile != NIL_RTFILE);
403 AssertPtr(pvBuf);
404 Assert(off >= 0);
405 Assert(cbTransfer > 0);
406
407 memset(&pReqInt->AioCB, 0, sizeof(struct aiocb));
408 pReqInt->fFlush = false;
409 pReqInt->AioCB.aio_lio_opcode = uTransferDirection;
410 pReqInt->AioCB.aio_fildes = (int)hFile;
411 pReqInt->AioCB.aio_offset = off;
412 pReqInt->AioCB.aio_nbytes = cbTransfer;
413 pReqInt->AioCB.aio_buf = pvBuf;
414 pReqInt->pvUser = pvUser;
415 pReqInt->pCtxInt = NULL;
416 pReqInt->Rc = VERR_FILE_AIO_IN_PROGRESS;
417 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
418
419 return VINF_SUCCESS;
420}
421
422
423RTDECL(int) RTFileAioReqPrepareRead(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
424 void *pvBuf, size_t cbRead, void *pvUser)
425{
426 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_READ,
427 off, pvBuf, cbRead, pvUser);
428}
429
430
431RTDECL(int) RTFileAioReqPrepareWrite(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
432 void const *pvBuf, size_t cbWrite, void *pvUser)
433{
434 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_WRITE,
435 off, (void *)pvBuf, cbWrite, pvUser);
436}
437
438
439RTDECL(int) RTFileAioReqPrepareFlush(RTFILEAIOREQ hReq, RTFILE hFile, void *pvUser)
440{
441 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)hReq;
442
443 RTFILEAIOREQ_VALID_RETURN(pReqInt);
444 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
445 Assert(hFile != NIL_RTFILE);
446
447 pReqInt->fFlush = true;
448 pReqInt->AioCB.aio_fildes = (int)hFile;
449 pReqInt->AioCB.aio_offset = 0;
450 pReqInt->AioCB.aio_nbytes = 0;
451 pReqInt->AioCB.aio_buf = NULL;
452 pReqInt->pvUser = pvUser;
453 pReqInt->Rc = VERR_FILE_AIO_IN_PROGRESS;
454 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
455
456 return VINF_SUCCESS;
457}
458
459
460RTDECL(void *) RTFileAioReqGetUser(RTFILEAIOREQ hReq)
461{
462 PRTFILEAIOREQINTERNAL pReqInt = hReq;
463 RTFILEAIOREQ_VALID_RETURN_RC(pReqInt, NULL);
464
465 return pReqInt->pvUser;
466}
467
468
469RTDECL(int) RTFileAioReqCancel(RTFILEAIOREQ hReq)
470{
471 PRTFILEAIOREQINTERNAL pReqInt = hReq;
472 RTFILEAIOREQ_VALID_RETURN(pReqInt);
473 RTFILEAIOREQ_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_NOT_SUBMITTED);
474
475 ASMAtomicXchgBool(&pReqInt->fCanceled, true);
476
477 int rcPosix = aio_cancel(pReqInt->AioCB.aio_fildes, &pReqInt->AioCB);
478
479 if (rcPosix == AIO_CANCELED)
480 {
481 PRTFILEAIOCTXINTERNAL pCtxInt = pReqInt->pCtxInt;
482 /*
483 * Notify the waiting thread that the request was canceled.
484 */
485 AssertMsg(VALID_PTR(pCtxInt),
486 ("Invalid state. Request was canceled but wasn't submitted\n"));
487
488 Assert(!pCtxInt->pReqToCancel);
489 ASMAtomicWritePtr((void* volatile*)&pCtxInt->pReqToCancel, pReqInt);
490 rtFileAioCtxWakeup(pCtxInt);
491
492 /* Wait for acknowledge. */
493 int rc = RTSemEventWait(pCtxInt->SemEventCancel, RT_INDEFINITE_WAIT);
494 AssertRC(rc);
495
496 ASMAtomicWritePtr((void* volatile*)&pCtxInt->pReqToCancel, NULL);
497 pReqInt->Rc = VERR_FILE_AIO_CANCELED;
498 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
499 return VINF_SUCCESS;
500 }
501 else if (rcPosix == AIO_ALLDONE)
502 return VERR_FILE_AIO_COMPLETED;
503 else if (rcPosix == AIO_NOTCANCELED)
504 return VERR_FILE_AIO_IN_PROGRESS;
505 else
506 return RTErrConvertFromErrno(errno);
507}
508
509
510RTDECL(int) RTFileAioReqGetRC(RTFILEAIOREQ hReq, size_t *pcbTransfered)
511{
512 PRTFILEAIOREQINTERNAL pReqInt = hReq;
513 RTFILEAIOREQ_VALID_RETURN(pReqInt);
514 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
515 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, PREPARED, VERR_FILE_AIO_NOT_SUBMITTED);
516 AssertPtrNull(pcbTransfered);
517
518 if ( (RT_SUCCESS(pReqInt->Rc))
519 && (pcbTransfered))
520 *pcbTransfered = pReqInt->cbTransfered;
521
522 return pReqInt->Rc;
523}
524
525
526RTDECL(int) RTFileAioCtxCreate(PRTFILEAIOCTX phAioCtx, uint32_t cAioReqsMax)
527{
528 PRTFILEAIOCTXINTERNAL pCtxInt;
529 unsigned cReqsWaitMax;
530
531 AssertPtrReturn(phAioCtx, VERR_INVALID_POINTER);
532
533 if (cAioReqsMax == RTFILEAIO_UNLIMITED_REQS)
534 return VERR_OUT_OF_RANGE;
535
536 cReqsWaitMax = RT_MIN(cAioReqsMax, AIO_LISTIO_MAX);
537
538 pCtxInt = (PRTFILEAIOCTXINTERNAL)RTMemAllocZ( sizeof(RTFILEAIOCTXINTERNAL)
539 + cReqsWaitMax * sizeof(PRTFILEAIOREQINTERNAL));
540 if (RT_UNLIKELY(!pCtxInt))
541 return VERR_NO_MEMORY;
542
543 /* Create event semaphore. */
544 int rc = RTSemEventCreate(&pCtxInt->SemEventCancel);
545 if (RT_FAILURE(rc))
546 {
547 RTMemFree(pCtxInt);
548 return rc;
549 }
550
551 pCtxInt->u32Magic = RTFILEAIOCTX_MAGIC;
552 pCtxInt->cMaxRequests = cAioReqsMax;
553 pCtxInt->cReqsWaitMax = cReqsWaitMax;
554 *phAioCtx = (RTFILEAIOCTX)pCtxInt;
555
556 return VINF_SUCCESS;
557}
558
559
560RTDECL(int) RTFileAioCtxDestroy(RTFILEAIOCTX hAioCtx)
561{
562 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
563
564 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
565
566 if (RT_UNLIKELY(pCtxInt->cRequests))
567 return VERR_FILE_AIO_BUSY;
568
569 RTSemEventDestroy(pCtxInt->SemEventCancel);
570 RTMemFree(pCtxInt);
571
572 return VINF_SUCCESS;
573}
574
575
576RTDECL(uint32_t) RTFileAioCtxGetMaxReqCount(RTFILEAIOCTX hAioCtx)
577{
578 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
579
580 if (hAioCtx == NIL_RTFILEAIOCTX)
581 return RTFILEAIO_UNLIMITED_REQS;
582 else
583 return pCtxInt->cMaxRequests;
584}
585
586RTDECL(int) RTFileAioCtxAssociateWithFile(RTFILEAIOCTX hAioCtx, RTFILE hFile)
587{
588 return VINF_SUCCESS;
589}
590
591#ifdef LOG_ENABLED
592/**
593 * Dumps the state of a async I/O context.
594 */
595static void rtFileAioCtxDump(PRTFILEAIOCTXINTERNAL pCtxInt)
596{
597 LogFlow(("cRequests=%d\n", pCtxInt->cRequests));
598 LogFlow(("cMaxRequests=%u\n", pCtxInt->cMaxRequests));
599 LogFlow(("hThreadWait=%#p\n", pCtxInt->hThreadWait));
600 LogFlow(("fWokenUp=%RTbool\n", pCtxInt->fWokenUp));
601 LogFlow(("fWaiting=%RTbool\n", pCtxInt->fWaiting));
602 LogFlow(("fWokenUpInternal=%RTbool\n", pCtxInt->fWokenUpInternal));
603 for (unsigned i = 0; i < RT_ELEMENTS(pCtxInt->apReqsNewHead); i++)
604 LogFlow(("apReqsNewHead[%u]=%#p\n", i, pCtxInt->apReqsNewHead[i]));
605 LogFlow(("pReqToCancel=%#p\n", pCtxInt->pReqToCancel));
606 LogFlow(("pReqsWaitHead=%#p\n", pCtxInt->pReqsWaitHead));
607 LogFlow(("pReqsWaitTail=%#p\n", pCtxInt->pReqsWaitTail));
608 LogFlow(("cReqsWaitMax=%u\n", pCtxInt->cReqsWaitMax));
609 LogFlow(("iFirstFree=%u\n", pCtxInt->iFirstFree));
610 for (unsigned i = 0; i < pCtxInt->cReqsWaitMax; i++)
611 LogFlow(("apReqs[%u]=%#p\n", i, pCtxInt->apReqs[i]));
612}
613#endif
614
615RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
616{
617 int rc = VINF_SUCCESS;
618 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
619
620 /* Parameter checks */
621 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
622 AssertReturn(cReqs != 0, VERR_INVALID_POINTER);
623 AssertPtrReturn(pahReqs, VERR_INVALID_PARAMETER);
624
625 rtFileAioCtxDump(pCtxInt);
626
627 /* Check that we don't exceed the limit */
628 if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests)
629 return VERR_FILE_AIO_LIMIT_EXCEEDED;
630
631 PRTFILEAIOREQINTERNAL pHead = NULL;
632
633 do
634 {
635 int rcPosix = 0;
636 size_t cReqsSubmit = 0;
637 size_t i = 0;
638 PRTFILEAIOREQINTERNAL pReqInt;
639
640 while ( (i < cReqs)
641 && (i < AIO_LISTIO_MAX))
642 {
643 pReqInt = pahReqs[i];
644 if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
645 {
646 /* Undo everything and stop submitting. */
647 for (size_t iUndo = 0; iUndo < i; iUndo++)
648 {
649 pReqInt = pahReqs[iUndo];
650 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
651 pReqInt->pCtxInt = NULL;
652
653 /* Unlink from the list again. */
654 PRTFILEAIOREQINTERNAL pNext, pPrev;
655 pNext = pReqInt->pNext;
656 pPrev = pReqInt->pPrev;
657 if (pNext)
658 pNext->pPrev = pPrev;
659 if (pPrev)
660 pPrev->pNext = pNext;
661 else
662 pHead = pNext;
663 }
664 rc = VERR_INVALID_HANDLE;
665 break;
666 }
667
668 pReqInt->pCtxInt = pCtxInt;
669
670 if (pReqInt->fFlush)
671 break;
672
673 /* Link them together. */
674 pReqInt->pNext = pHead;
675 if (pHead)
676 pHead->pPrev = pReqInt;
677 pReqInt->pPrev = NULL;
678 pHead = pReqInt;
679 RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);
680
681 cReqsSubmit++;
682 i++;
683 }
684
685 if (cReqsSubmit)
686 {
687 rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
688 if (RT_UNLIKELY(rcPosix < 0))
689 {
690 size_t cReqsSubmitted = cReqsSubmit;
691
692 if (errno == EAGAIN)
693 rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
694 else
695 rc = RTErrConvertFromErrno(errno);
696
697 /* Check which ones were not submitted. */
698 for (i = 0; i < cReqsSubmit; i++)
699 {
700 pReqInt = pahReqs[i];
701
702 rcPosix = aio_error(&pReqInt->AioCB);
703
704 if ((rcPosix != EINPROGRESS) && (rcPosix != 0))
705 {
706 cReqsSubmitted--;
707
708#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
709 if (errno == EINVAL)
710#else
711 if (rcPosix == EINVAL)
712#endif
713 {
714 /* Was not submitted. */
715 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
716 }
717 else
718 {
719 /* An error occurred. */
720 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
721
722 /*
723 * Looks like Apple and glibc interpret the standard in different ways.
724 * glibc returns the error code which would be in errno but Apple returns
725 * -1 and sets errno to the appropriate value
726 */
727#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
728 Assert(rcPosix == -1);
729 pReqInt->Rc = RTErrConvertFromErrno(errno);
730#elif defined(RT_OS_LINUX)
731 pReqInt->Rc = RTErrConvertFromErrno(rcPosix);
732#endif
733 pReqInt->cbTransfered = 0;
734 }
735 /* Unlink from the list. */
736 PRTFILEAIOREQINTERNAL pNext, pPrev;
737 pNext = pReqInt->pNext;
738 pPrev = pReqInt->pPrev;
739 if (pNext)
740 pNext->pPrev = pPrev;
741 if (pPrev)
742 pPrev->pNext = pNext;
743 else
744 pHead = pNext;
745
746 pReqInt->pNext = NULL;
747 pReqInt->pPrev = NULL;
748 }
749 }
750 ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted);
751 AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
752 break;
753 }
754
755 ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
756 AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
757 cReqs -= cReqsSubmit;
758 pahReqs += cReqsSubmit;
759 }
760
761 /*
762 * Check if we have a flush request now.
763 * If not we hit the AIO_LISTIO_MAX limit
764 * and will continue submitting requests
765 * above.
766 */
767 if (cReqs && RT_SUCCESS_NP(rc))
768 {
769 pReqInt = pahReqs[0];
770
771 if (pReqInt->fFlush)
772 {
773 /*
774 * lio_listio does not work with flush requests so
775 * we have to use aio_fsync directly.
776 */
777 rcPosix = aio_fsync(O_SYNC, &pReqInt->AioCB);
778 if (RT_UNLIKELY(rcPosix < 0))
779 {
780 if (errno == EAGAIN)
781 {
782 rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
783 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
784 }
785 else
786 {
787 rc = RTErrConvertFromErrno(errno);
788 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
789 pReqInt->Rc = rc;
790 }
791 pReqInt->cbTransfered = 0;
792 break;
793 }
794
795 /* Link them together. */
796 pReqInt->pNext = pHead;
797 if (pHead)
798 pHead->pPrev = pReqInt;
799 pReqInt->pPrev = NULL;
800 pHead = pReqInt;
801 RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);
802
803 ASMAtomicIncS32(&pCtxInt->cRequests);
804 AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n"));
805 cReqs--;
806 pahReqs++;
807 }
808 }
809 } while ( cReqs
810 && RT_SUCCESS_NP(rc));
811
812 if (pHead)
813 {
814 /*
815 * Forward successfully submitted requests to the thread waiting for requests.
816 * We search for a free slot first and if we don't find one
817 * we will grab the first one and append our list to the existing entries.
818 */
819 unsigned iSlot = 0;
820 while ( (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead))
821 && !ASMAtomicCmpXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[iSlot], pHead, NULL))
822 iSlot++;
823
824 if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead))
825 {
826 /* Nothing found. */
827 PRTFILEAIOREQINTERNAL pOldHead = (PRTFILEAIOREQINTERNAL)ASMAtomicXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[0],
828 NULL);
829
830 /* Find the end of the current head and link the old list to the current. */
831 PRTFILEAIOREQINTERNAL pTail = pHead;
832 while (pTail->pNext)
833 pTail = pTail->pNext;
834
835 pTail->pNext = pOldHead;
836
837 ASMAtomicXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[0], pHead);
838 }
839
840 /* Set the internal wakeup flag and wakeup the thread if possible. */
841 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true);
842 if (!fWokenUp)
843 rtFileAioCtxWakeup(pCtxInt);
844 }
845
846 rtFileAioCtxDump(pCtxInt);
847
848 return rc;
849}
850
851
852RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
853 PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
854{
855 int rc = VINF_SUCCESS;
856 int cRequestsCompleted = 0;
857 PRTFILEAIOCTXINTERNAL pCtxInt = (PRTFILEAIOCTXINTERNAL)hAioCtx;
858 struct timespec Timeout;
859 struct timespec *pTimeout = NULL;
860 uint64_t StartNanoTS = 0;
861
862 LogFlowFunc(("hAioCtx=%#p cMinReqs=%zu cMillies=%u pahReqs=%#p cReqs=%zu pcbReqs=%#p\n",
863 hAioCtx, cMinReqs, cMillies, pahReqs, cReqs, pcReqs));
864
865 /* Check parameters. */
866 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
867 AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
868 AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
869 AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
870 AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);
871
872 rtFileAioCtxDump(pCtxInt);
873
874 int32_t cRequestsWaiting = ASMAtomicReadS32(&pCtxInt->cRequests);
875
876 if (RT_UNLIKELY(cRequestsWaiting <= 0))
877 return VERR_FILE_AIO_NO_REQUEST;
878
879 if (RT_UNLIKELY(cMinReqs > (uint32_t)cRequestsWaiting))
880 return VERR_INVALID_PARAMETER;
881
882 if (cMillies != RT_INDEFINITE_WAIT)
883 {
884 Timeout.tv_sec = cMillies / 1000;
885 Timeout.tv_nsec = (cMillies % 1000) * 1000000;
886 pTimeout = &Timeout;
887 StartNanoTS = RTTimeNanoTS();
888 }
889
890 /* Wait for at least one. */
891 if (!cMinReqs)
892 cMinReqs = 1;
893
894 /* For the wakeup call. */
895 Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
896 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());
897
898 /* Update the waiting list once before we enter the loop. */
899 rc = rtFileAioCtxProcessEvents(pCtxInt);
900
901 while ( cMinReqs
902 && RT_SUCCESS_NP(rc))
903 {
904#ifdef RT_STRICT
905 if (RT_UNLIKELY(!pCtxInt->iFirstFree))
906 {
907 for (unsigned i = 0; i < pCtxInt->cReqsWaitMax; i++)
908 RTAssertMsg2Weak("wait[%d] = %#p\n", i, pCtxInt->apReqs[i]);
909
910 AssertMsgFailed(("No request to wait for. pReqsWaitHead=%#p pReqsWaitTail=%#p\n",
911 pCtxInt->pReqsWaitHead, pCtxInt->pReqsWaitTail));
912 }
913#endif
914
915 LogFlow(("Waiting for %d requests to complete\n", pCtxInt->iFirstFree));
916 rtFileAioCtxDump(pCtxInt);
917
918 ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
919 int rcPosix = aio_suspend((const struct aiocb * const *)pCtxInt->apReqs,
920 pCtxInt->iFirstFree, pTimeout);
921 ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
922 if (rcPosix < 0)
923 {
924 LogFlow(("aio_suspend failed %d nent=%u\n", errno, pCtxInt->iFirstFree));
925 /* Check that this is an external wakeup event. */
926 if (errno == EINTR)
927 rc = rtFileAioCtxProcessEvents(pCtxInt);
928 else
929 rc = RTErrConvertFromErrno(errno);
930 }
931 else
932 {
933 /* Requests finished. */
934 unsigned iReqCurr = 0;
935 unsigned cDone = 0;
936
937 /* Remove completed requests from the waiting list. */
938 while ( (iReqCurr < pCtxInt->iFirstFree)
939 && (cDone < cReqs))
940 {
941 PRTFILEAIOREQINTERNAL pReq = pCtxInt->apReqs[iReqCurr];
942 int rcReq = aio_error(&pReq->AioCB);
943
944 if (rcReq != EINPROGRESS)
945 {
946 /* Completed store the return code. */
947 if (rcReq == 0)
948 {
949 pReq->Rc = VINF_SUCCESS;
950 /* Call aio_return() to free ressources. */
951 pReq->cbTransfered = aio_return(&pReq->AioCB);
952 }
953 else
954 {
955#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
956 pReq->Rc = RTErrConvertFromErrno(errno);
957#else
958 pReq->Rc = RTErrConvertFromErrno(rcReq);
959#endif
960 }
961
962 /* Mark the request as finished. */
963 RTFILEAIOREQ_SET_STATE(pReq, COMPLETED);
964 cDone++;
965
966 /* If there are other entries waiting put the head into the now free entry. */
967 if (pCtxInt->pReqsWaitHead)
968 {
969 PRTFILEAIOREQINTERNAL pReqInsert = pCtxInt->pReqsWaitHead;
970
971 pCtxInt->pReqsWaitHead = pReqInsert->pNext;
972 if (!pCtxInt->pReqsWaitHead)
973 {
974 /* List is empty now. Clear tail too. */
975 pCtxInt->pReqsWaitTail = NULL;
976 }
977
978 pReqInsert->iWaitingList = pReq->iWaitingList;
979 pCtxInt->apReqs[pReqInsert->iWaitingList] = pReqInsert;
980 iReqCurr++;
981 }
982 else
983 {
984 /*
985 * Move the last entry into the current position to avoid holes
986 * but only if it is not the last element already.
987 */
988 if (pReq->iWaitingList < pCtxInt->iFirstFree - 1)
989 {
990 pCtxInt->apReqs[pReq->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
991 pCtxInt->apReqs[pReq->iWaitingList]->iWaitingList = pReq->iWaitingList;
992 }
993 else
994 pCtxInt->iFirstFree--;
995
996 pCtxInt->apReqs[pCtxInt->iFirstFree] = NULL;
997 }
998
999 /* Put the request into the completed list. */
1000 pahReqs[cRequestsCompleted++] = pReq;
1001 pReq->iWaitingList = RTFILEAIOCTX_WAIT_ENTRY_INVALID;
1002 }
1003 else
1004 iReqCurr++;
1005 }
1006
1007 AssertMsg((cDone <= cReqs), ("Overflow cReqs=%u cMinReqs=%u cDone=%u\n",
1008 cReqs, cDone));
1009 cReqs -= cDone;
1010 cMinReqs = RT_MAX(cMinReqs, cDone) - cDone;
1011 ASMAtomicSubS32(&pCtxInt->cRequests, cDone);
1012
1013 AssertMsg(pCtxInt->cRequests >= 0, ("Finished more requests than currently active\n"));
1014
1015 if (!cMinReqs)
1016 break;
1017
1018 if (cMillies != RT_INDEFINITE_WAIT)
1019 {
1020 uint64_t TimeDiff;
1021
1022 /* Recalculate the timeout. */
1023 TimeDiff = RTTimeSystemNanoTS() - StartNanoTS;
1024 Timeout.tv_sec = Timeout.tv_sec - (TimeDiff / 1000000);
1025 Timeout.tv_nsec = Timeout.tv_nsec - (TimeDiff % 1000000);
1026 }
1027
1028 /* Check for new elements. */
1029 rc = rtFileAioCtxProcessEvents(pCtxInt);
1030 }
1031 }
1032
1033 *pcReqs = cRequestsCompleted;
1034 Assert(pCtxInt->hThreadWait == RTThreadSelf());
1035 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);
1036
1037 rtFileAioCtxDump(pCtxInt);
1038
1039 return rc;
1040}
1041
1042
1043RTDECL(int) RTFileAioCtxWakeup(RTFILEAIOCTX hAioCtx)
1044{
1045 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
1046 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
1047
1048 /** @todo r=bird: Define the protocol for how to resume work after calling
1049 * this function. */
1050
1051 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUp, true);
1052 if (!fWokenUp)
1053 rtFileAioCtxWakeup(pCtxInt);
1054
1055 return VINF_SUCCESS;
1056}
1057
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette