VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/posix/fileaio-posix.cpp@ 25659

Last change on this file since 25659 was 25645, checked in by vboxsync, 15 years ago

IPRT,DoxyFile.Core: Mopped up the errors in the IPRT doxygen run.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 34.4 KB
Line 
1/* $Id: fileaio-posix.cpp 25645 2010-01-05 09:29:31Z vboxsync $ */
2/** @file
3 * IPRT - File async I/O, native implementation for POSIX compliant host platforms.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#define LOG_GROUP RTLOGGROUP_DIR
36#include <iprt/asm.h>
37#include <iprt/file.h>
38#include <iprt/mem.h>
39#include <iprt/assert.h>
40#include <iprt/string.h>
41#include <iprt/err.h>
42#include <iprt/log.h>
43#include <iprt/thread.h>
44#include <iprt/semaphore.h>
45#include "internal/fileaio.h"
46
47#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
48# include <sys/types.h>
49# include <sys/sysctl.h> /* for sysctlbyname */
50#endif
51#if defined(RT_OS_FREEBSD)
52# include <fcntl.h> /* O_SYNC */
53#endif
54#include <aio.h>
55#include <errno.h>
56#include <time.h>
57
58/*
59 * Linux does not define this value.
60 * Just define it with really big
61 * value.
62 */
63#ifndef AIO_LISTIO_MAX
64# define AIO_LISTIO_MAX UINT32_MAX
65#endif
66
67#if 0 /* Only used for debugging */
68# undef AIO_LISTIO_MAX
69# define AIO_LISTIO_MAX 16
70#endif
71
72/** Invalid entry in the waiting array. */
73#define RTFILEAIOCTX_WAIT_ENTRY_INVALID (~0U)
74
75/*******************************************************************************
76* Structures and Typedefs *
77*******************************************************************************/
78/**
79 * Async I/O request state.
80 */
81typedef struct RTFILEAIOREQINTERNAL
82{
83 /** The aio control block. FIRST ELEMENT! */
84 struct aiocb AioCB;
85 /** Next element in the chain. */
86 struct RTFILEAIOREQINTERNAL *pNext;
87 /** Previous element in the chain. */
88 struct RTFILEAIOREQINTERNAL *pPrev;
89 /** Current state the request is in. */
90 RTFILEAIOREQSTATE enmState;
91 /** Flag whether this is a flush request. */
92 bool fFlush;
93 /** Flag indicating if the request was canceled. */
94 volatile bool fCanceled;
95 /** Opaque user data. */
96 void *pvUser;
97 /** Number of bytes actually transfered. */
98 size_t cbTransfered;
99 /** Status code. */
100 int Rc;
101 /** Completion context we are assigned to. */
102 struct RTFILEAIOCTXINTERNAL *pCtxInt;
103 /** Entry in the waiting list the request is in. */
104 unsigned iWaitingList;
105 /** Magic value (RTFILEAIOREQ_MAGIC). */
106 uint32_t u32Magic;
107} RTFILEAIOREQINTERNAL, *PRTFILEAIOREQINTERNAL;
108
109/**
110 * Async I/O completion context state.
111 */
112typedef struct RTFILEAIOCTXINTERNAL
113{
114 /** Current number of requests active on this context. */
115 volatile int32_t cRequests;
116 /** Maximum number of requests this context can handle. */
117 uint32_t cMaxRequests;
118 /** The ID of the thread which is currently waiting for requests. */
119 volatile RTTHREAD hThreadWait;
120 /** Flag whether the thread was woken up. */
121 volatile bool fWokenUp;
122 /** Flag whether the thread is currently waiting in the syscall. */
123 volatile bool fWaiting;
124 /** Magic value (RTFILEAIOCTX_MAGIC). */
125 uint32_t u32Magic;
126 /** Flag whether the thread was woken up due to a internal event. */
127 volatile bool fWokenUpInternal;
128 /** List of new requests which needs to be inserted into apReqs by the
129 * waiting thread. */
130 volatile PRTFILEAIOREQINTERNAL apReqsNewHead[5];
131 /** Special entry for requests which are canceled. Because only one
132 * request can be canceled at a time and the thread canceling the request
133 * has to wait we need only one entry. */
134 volatile PRTFILEAIOREQINTERNAL pReqToCancel;
135 /** Event semaphore the canceling thread is waiting for completion of
136 * the operation. */
137 RTSEMEVENT SemEventCancel;
138 /** Head of submitted elements waiting to get into the array. */
139 PRTFILEAIOREQINTERNAL pReqsWaitHead;
140 /** Tail of submitted elements waiting to get into the array. */
141 PRTFILEAIOREQINTERNAL pReqsWaitTail;
142 /** Maximum number of elements in the waiting array. */
143 unsigned cReqsWaitMax;
144 /** First free slot in the waiting list. */
145 unsigned iFirstFree;
146 /** List of requests we are currently waiting on.
147 * Size depends on cMaxRequests and AIO_LISTIO_MAX. */
148 volatile PRTFILEAIOREQINTERNAL apReqs[1];
149} RTFILEAIOCTXINTERNAL, *PRTFILEAIOCTXINTERNAL;
150
151/**
152 * Internal worker for waking up the waiting thread.
153 */
154static void rtFileAioCtxWakeup(PRTFILEAIOCTXINTERNAL pCtxInt)
155{
156 /*
157 * Read the thread handle before the status flag.
158 * If we read the handle after the flag we might
159 * end up with an invalid handle because the thread
160 * waiting in RTFileAioCtxWakeup() might get scheduled
161 * before we read the flag and returns.
162 * We can ensure that the handle is valid if fWaiting is true
163 * when reading the handle before the status flag.
164 */
165 RTTHREAD hThread;
166 ASMAtomicReadHandle(&pCtxInt->hThreadWait, &hThread);
167 bool fWaiting = ASMAtomicReadBool(&pCtxInt->fWaiting);
168 if (fWaiting)
169 {
170 /*
171 * If a thread waits the handle must be valid.
172 * It is possible that the thread returns from
173 * aio_suspend() before the signal is send.
174 * This is no problem because we already set fWokenUp
175 * to true which will let the thread return VERR_INTERRUPTED
176 * and the next call to RTFileAioCtxWait() will not
177 * return VERR_INTERRUPTED because signals are not saved
178 * and will simply vanish if the destination thread can't
179 * receive it.
180 */
181 Assert(hThread != NIL_RTTHREAD);
182 RTThreadPoke(hThread);
183 }
184}
185
186/**
187 * Internal worker processing events and inserting new requests into the waiting list.
188 */
189static int rtFileAioCtxProcessEvents(PRTFILEAIOCTXINTERNAL pCtxInt)
190{
191 int rc = VINF_SUCCESS;
192
193 /* Process new requests first. */
194 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, false);
195 if (fWokenUp)
196 {
197 for (unsigned iSlot = 0; iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead); iSlot++)
198 {
199 PRTFILEAIOREQINTERNAL pReqHead = (PRTFILEAIOREQINTERNAL)ASMAtomicXchgPtr((void* volatile*)&pCtxInt->apReqsNewHead[iSlot],
200 NULL);
201
202 while ( (pCtxInt->iFirstFree < pCtxInt->cReqsWaitMax)
203 && pReqHead)
204 {
205 pCtxInt->apReqs[pCtxInt->iFirstFree] = pReqHead;
206 pReqHead->iWaitingList = pCtxInt->iFirstFree;
207 pReqHead = pReqHead->pNext;
208
209 /* Clear pointer to next and previous element just for safety. */
210 pCtxInt->apReqs[pCtxInt->iFirstFree]->pNext = NULL;
211 pCtxInt->apReqs[pCtxInt->iFirstFree]->pPrev = NULL;
212 pCtxInt->iFirstFree++;
213
214 Assert( (pCtxInt->iFirstFree <= pCtxInt->cMaxRequests)
215 && (pCtxInt->iFirstFree <= pCtxInt->cReqsWaitMax));
216 }
217
218 /* Append the rest to the wait list. */
219 if (pReqHead)
220 {
221 if (!pCtxInt->pReqsWaitHead)
222 {
223 Assert(!pCtxInt->pReqsWaitTail);
224 pCtxInt->pReqsWaitHead = pReqHead;
225 pReqHead->pPrev = NULL;
226 }
227 else
228 {
229 AssertPtr(pCtxInt->pReqsWaitTail);
230
231 pCtxInt->pReqsWaitTail->pNext = pReqHead;
232 pReqHead->pPrev = pCtxInt->pReqsWaitTail;
233 }
234
235 /* Update tail. */
236 while (pReqHead->pNext)
237 pReqHead = pReqHead->pNext;
238
239 pCtxInt->pReqsWaitTail = pReqHead;
240 pCtxInt->pReqsWaitTail->pNext = NULL;
241 }
242 }
243
244 /* Check if a request needs to be canceled. */
245 PRTFILEAIOREQINTERNAL pReqToCancel = (PRTFILEAIOREQINTERNAL)ASMAtomicReadPtr((void* volatile*)&pCtxInt->pReqToCancel);
246 if (pReqToCancel)
247 {
248 /* The request can be in the array waiting for completion or still in the list because it is full. */
249 if (pReqToCancel->iWaitingList != RTFILEAIOCTX_WAIT_ENTRY_INVALID)
250 {
251 /* Put it out of the waiting list. */
252 pCtxInt->apReqs[pReqToCancel->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
253 pCtxInt->apReqs[pReqToCancel->iWaitingList]->iWaitingList = pReqToCancel->iWaitingList;
254 }
255 else
256 {
257 /* Unlink from the waiting list. */
258 PRTFILEAIOREQINTERNAL pPrev = pReqToCancel->pPrev;
259 PRTFILEAIOREQINTERNAL pNext = pReqToCancel->pNext;
260
261 if (pNext)
262 pNext->pPrev = pPrev;
263 else
264 {
265 /* We canceled the tail. */
266 pCtxInt->pReqsWaitTail = pPrev;
267 }
268
269 if (pPrev)
270 pPrev->pNext = pNext;
271 else
272 {
273 /* We canceled the head. */
274 pCtxInt->pReqsWaitHead = pNext;
275 }
276 }
277
278 ASMAtomicDecS32(&pCtxInt->cRequests);
279 AssertMsg(pCtxInt->cRequests >= 0, ("Canceled request not which is not in this context\n"));
280 RTSemEventSignal(pCtxInt->SemEventCancel);
281 }
282 }
283 else
284 {
285 if (ASMAtomicXchgBool(&pCtxInt->fWokenUp, false))
286 rc = VERR_INTERRUPTED;
287 }
288
289 return rc;
290}
291
292RTR3DECL(int) RTFileAioGetLimits(PRTFILEAIOLIMITS pAioLimits)
293{
294 int rcBSD = 0;
295 AssertPtrReturn(pAioLimits, VERR_INVALID_POINTER);
296
297#if defined(RT_OS_DARWIN)
298 int cReqsOutstandingMax = 0;
299 size_t cbParameter = sizeof(int);
300
301 rcBSD = sysctlbyname("kern.aioprocmax", /* name */
302 &cReqsOutstandingMax, /* Where to store the old value. */
303 &cbParameter, /* Size of the memory pointed to. */
304 NULL, /* Where the new value is located. */
305 NULL); /* Where the size of the new value is stored. */
306 if (rcBSD == -1)
307 return RTErrConvertFromErrno(errno);
308
309 pAioLimits->cReqsOutstandingMax = cReqsOutstandingMax;
310 pAioLimits->cbBufferAlignment = 0;
311#elif defined(RT_OS_FREEBSD)
312 /*
313 * The AIO API is implemented in a kernel module which is not
314 * loaded by default.
315 * If it is loaded there are additional sysctl parameters.
316 */
317 int cReqsOutstandingMax = 0;
318 size_t cbParameter = sizeof(int);
319
320 rcBSD = sysctlbyname("vfs.aio.max_aio_per_proc", /* name */
321 &cReqsOutstandingMax, /* Where to store the old value. */
322 &cbParameter, /* Size of the memory pointed to. */
323 NULL, /* Where the new value is located. */
324 NULL); /* Where the size of the new value is stored. */
325 if (rcBSD == -1)
326 {
327 /* ENOENT means the value is unknown thus the module is not loaded. */
328 if (errno == ENOENT)
329 return VERR_NOT_SUPPORTED;
330 else
331 return RTErrConvertFromErrno(errno);
332 }
333
334 pAioLimits->cReqsOutstandingMax = cReqsOutstandingMax;
335 pAioLimits->cbBufferAlignment = 0;
336#else
337 pAioLimits->cReqsOutstandingMax = RTFILEAIO_UNLIMITED_REQS;
338 pAioLimits->cbBufferAlignment = 0;
339#endif
340
341 return VINF_SUCCESS;
342}
343
344RTR3DECL(int) RTFileAioReqCreate(PRTFILEAIOREQ phReq)
345{
346 AssertPtrReturn(phReq, VERR_INVALID_POINTER);
347
348 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOREQINTERNAL));
349 if (RT_UNLIKELY(!pReqInt))
350 return VERR_NO_MEMORY;
351
352 pReqInt->pCtxInt = NULL;
353 pReqInt->u32Magic = RTFILEAIOREQ_MAGIC;
354 pReqInt->iWaitingList = RTFILEAIOCTX_WAIT_ENTRY_INVALID;
355 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
356
357 *phReq = (RTFILEAIOREQ)pReqInt;
358
359 return VINF_SUCCESS;
360}
361
362
363RTDECL(int) RTFileAioReqDestroy(RTFILEAIOREQ hReq)
364{
365 /*
366 * Validate the handle and ignore nil.
367 */
368 if (hReq == NIL_RTFILEAIOREQ)
369 return VINF_SUCCESS;
370 PRTFILEAIOREQINTERNAL pReqInt = hReq;
371 RTFILEAIOREQ_VALID_RETURN(pReqInt);
372 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
373
374 /*
375 * Trash the magic and free it.
376 */
377 ASMAtomicUoWriteU32(&pReqInt->u32Magic, ~RTFILEAIOREQ_MAGIC);
378 RTMemFree(pReqInt);
379 return VINF_SUCCESS;
380}
381
382/**
383 * Worker setting up the request.
384 */
385DECLINLINE(int) rtFileAioReqPrepareTransfer(RTFILEAIOREQ hReq, RTFILE hFile,
386 unsigned uTransferDirection,
387 RTFOFF off, void *pvBuf, size_t cbTransfer,
388 void *pvUser)
389{
390 /*
391 * Validate the input.
392 */
393 PRTFILEAIOREQINTERNAL pReqInt = hReq;
394 RTFILEAIOREQ_VALID_RETURN(pReqInt);
395 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
396 Assert(hFile != NIL_RTFILE);
397 AssertPtr(pvBuf);
398 Assert(off >= 0);
399 Assert(cbTransfer > 0);
400
401 memset(&pReqInt->AioCB, 0, sizeof(struct aiocb));
402 pReqInt->AioCB.aio_lio_opcode = uTransferDirection;
403 pReqInt->AioCB.aio_fildes = (int)hFile;
404 pReqInt->AioCB.aio_offset = off;
405 pReqInt->AioCB.aio_nbytes = cbTransfer;
406 pReqInt->AioCB.aio_buf = pvBuf;
407 pReqInt->pvUser = pvUser;
408 pReqInt->pCtxInt = NULL;
409 pReqInt->Rc = VERR_FILE_AIO_IN_PROGRESS;
410 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
411
412 return VINF_SUCCESS;
413}
414
415
416RTDECL(int) RTFileAioReqPrepareRead(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
417 void *pvBuf, size_t cbRead, void *pvUser)
418{
419 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_READ,
420 off, pvBuf, cbRead, pvUser);
421}
422
423
424RTDECL(int) RTFileAioReqPrepareWrite(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
425 void const *pvBuf, size_t cbWrite, void *pvUser)
426{
427 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_WRITE,
428 off, (void *)pvBuf, cbWrite, pvUser);
429}
430
431
432RTDECL(int) RTFileAioReqPrepareFlush(RTFILEAIOREQ hReq, RTFILE hFile, void *pvUser)
433{
434 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)hReq;
435
436 RTFILEAIOREQ_VALID_RETURN(pReqInt);
437 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
438 Assert(hFile != NIL_RTFILE);
439
440 pReqInt->fFlush = true;
441 pReqInt->AioCB.aio_fildes = (int)hFile;
442 pReqInt->pvUser = pvUser;
443 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
444
445 return VINF_SUCCESS;
446}
447
448
449RTDECL(void *) RTFileAioReqGetUser(RTFILEAIOREQ hReq)
450{
451 PRTFILEAIOREQINTERNAL pReqInt = hReq;
452 RTFILEAIOREQ_VALID_RETURN_RC(pReqInt, NULL);
453
454 return pReqInt->pvUser;
455}
456
457
458RTDECL(int) RTFileAioReqCancel(RTFILEAIOREQ hReq)
459{
460 PRTFILEAIOREQINTERNAL pReqInt = hReq;
461 RTFILEAIOREQ_VALID_RETURN(pReqInt);
462 RTFILEAIOREQ_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_NOT_SUBMITTED);
463
464 ASMAtomicXchgBool(&pReqInt->fCanceled, true);
465
466 int rcPosix = aio_cancel(pReqInt->AioCB.aio_fildes, &pReqInt->AioCB);
467
468 if (rcPosix == AIO_CANCELED)
469 {
470 PRTFILEAIOCTXINTERNAL pCtxInt = pReqInt->pCtxInt;
471 /*
472 * Notify the waiting thread that the request was canceled.
473 */
474 AssertMsg(VALID_PTR(pCtxInt),
475 ("Invalid state. Request was canceled but wasn't submitted\n"));
476
477 Assert(!pCtxInt->pReqToCancel);
478 ASMAtomicWritePtr((void* volatile*)&pCtxInt->pReqToCancel, pReqInt);
479 rtFileAioCtxWakeup(pCtxInt);
480
481 /* Wait for acknowledge. */
482 int rc = RTSemEventWait(pCtxInt->SemEventCancel, RT_INDEFINITE_WAIT);
483 AssertRC(rc);
484
485 ASMAtomicWritePtr((void* volatile*)&pCtxInt->pReqToCancel, NULL);
486 pReqInt->Rc = VERR_FILE_AIO_CANCELED;
487 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
488 return VINF_SUCCESS;
489 }
490 else if (rcPosix == AIO_ALLDONE)
491 return VERR_FILE_AIO_COMPLETED;
492 else if (rcPosix == AIO_NOTCANCELED)
493 return VERR_FILE_AIO_IN_PROGRESS;
494 else
495 return RTErrConvertFromErrno(errno);
496}
497
498
499RTDECL(int) RTFileAioReqGetRC(RTFILEAIOREQ hReq, size_t *pcbTransfered)
500{
501 PRTFILEAIOREQINTERNAL pReqInt = hReq;
502 RTFILEAIOREQ_VALID_RETURN(pReqInt);
503 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
504 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, PREPARED, VERR_FILE_AIO_NOT_SUBMITTED);
505 AssertPtrNull(pcbTransfered);
506
507 if ( (RT_SUCCESS(pReqInt->Rc))
508 && (pcbTransfered))
509 *pcbTransfered = pReqInt->cbTransfered;
510
511 return pReqInt->Rc;
512}
513
514
515RTDECL(int) RTFileAioCtxCreate(PRTFILEAIOCTX phAioCtx, uint32_t cAioReqsMax)
516{
517 PRTFILEAIOCTXINTERNAL pCtxInt;
518 unsigned cReqsWaitMax;
519
520 AssertPtrReturn(phAioCtx, VERR_INVALID_POINTER);
521
522 if (cAioReqsMax == RTFILEAIO_UNLIMITED_REQS)
523 return VERR_OUT_OF_RANGE;
524
525 cReqsWaitMax = RT_MIN(cAioReqsMax, AIO_LISTIO_MAX);
526
527 pCtxInt = (PRTFILEAIOCTXINTERNAL)RTMemAllocZ( sizeof(RTFILEAIOCTXINTERNAL)
528 + cReqsWaitMax * sizeof(PRTFILEAIOREQINTERNAL));
529 if (RT_UNLIKELY(!pCtxInt))
530 return VERR_NO_MEMORY;
531
532 /* Create event semaphore. */
533 int rc = RTSemEventCreate(&pCtxInt->SemEventCancel);
534 if (RT_FAILURE(rc))
535 {
536 RTMemFree(pCtxInt);
537 return rc;
538 }
539
540 pCtxInt->u32Magic = RTFILEAIOCTX_MAGIC;
541 pCtxInt->cMaxRequests = cAioReqsMax;
542 pCtxInt->cReqsWaitMax = cReqsWaitMax;
543 *phAioCtx = (RTFILEAIOCTX)pCtxInt;
544
545 return VINF_SUCCESS;
546}
547
548
549RTDECL(int) RTFileAioCtxDestroy(RTFILEAIOCTX hAioCtx)
550{
551 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
552
553 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
554
555 if (RT_UNLIKELY(pCtxInt->cRequests))
556 return VERR_FILE_AIO_BUSY;
557
558 RTSemEventDestroy(pCtxInt->SemEventCancel);
559 RTMemFree(pCtxInt);
560
561 return VINF_SUCCESS;
562}
563
564
565RTDECL(uint32_t) RTFileAioCtxGetMaxReqCount(RTFILEAIOCTX hAioCtx)
566{
567 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
568
569 if (hAioCtx == NIL_RTFILEAIOCTX)
570 return RTFILEAIO_UNLIMITED_REQS;
571 else
572 return pCtxInt->cMaxRequests;
573}
574
575RTDECL(int) RTFileAioCtxAssociateWithFile(RTFILEAIOCTX hAioCtx, RTFILE hFile)
576{
577 return VINF_SUCCESS;
578}
579
580RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
581{
582 int rc = VINF_SUCCESS;
583 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
584
585 /* Parameter checks */
586 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
587 AssertReturn(cReqs != 0, VERR_INVALID_POINTER);
588 AssertPtrReturn(pahReqs, VERR_INVALID_PARAMETER);
589
590 /* Check that we don't exceed the limit */
591 if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests)
592 return VERR_FILE_AIO_LIMIT_EXCEEDED;
593
594 PRTFILEAIOREQINTERNAL pHead = NULL;
595
596 do
597 {
598 int rcPosix = 0;
599 size_t cReqsSubmit = 0;
600 size_t i = 0;
601 PRTFILEAIOREQINTERNAL pReqInt;
602
603 while ( (i < cReqs)
604 && (i < AIO_LISTIO_MAX))
605 {
606 pReqInt = pahReqs[i];
607 if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
608 {
609 /* Undo everything and stop submitting. */
610 for (size_t iUndo = 0; iUndo < i; iUndo++)
611 {
612 pReqInt = pahReqs[iUndo];
613 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
614 pReqInt->pCtxInt = NULL;
615
616 /* Unlink from the list again. */
617 PRTFILEAIOREQINTERNAL pNext, pPrev;
618 pNext = pReqInt->pNext;
619 pPrev = pReqInt->pPrev;
620 if (pNext)
621 pNext->pPrev = pPrev;
622 if (pPrev)
623 pPrev->pNext = pNext;
624 else
625 pHead = pNext;
626 }
627 rc = VERR_INVALID_HANDLE;
628 break;
629 }
630
631 pReqInt->pCtxInt = pCtxInt;
632
633 /* Link them together. */
634 pReqInt->pNext = pHead;
635 if (pHead)
636 pHead->pPrev = pReqInt;
637 pReqInt->pPrev = NULL;
638 pHead = pReqInt;
639 RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);
640
641 if (pReqInt->fFlush)
642 break;
643
644 cReqsSubmit++;
645 i++;
646 }
647
648 if (cReqsSubmit)
649 {
650 rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
651 if (RT_UNLIKELY(rcPosix < 0))
652 {
653 size_t cReqsSubmitted = cReqsSubmit;
654
655 if (errno == EAGAIN)
656 rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
657 else
658 rc = RTErrConvertFromErrno(errno);
659
660 /* Check which ones were not submitted. */
661 for (i = 0; i < cReqsSubmit; i++)
662 {
663 pReqInt = pahReqs[i];
664
665 rcPosix = aio_error(&pReqInt->AioCB);
666
667 if ((rcPosix != EINPROGRESS) && (rcPosix != 0))
668 {
669 cReqsSubmitted--;
670
671#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
672 if (errno == EINVAL)
673#else
674 if (rcPosix == EINVAL)
675#endif
676 {
677 /* Was not submitted. */
678 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
679 }
680 else
681 {
682 /* An error occurred. */
683 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
684
685 /*
686 * Looks like Apple and glibc interpret the standard in different ways.
687 * glibc returns the error code which would be in errno but Apple returns
688 * -1 and sets errno to the appropriate value
689 */
690#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
691 Assert(rcPosix == -1);
692 pReqInt->Rc = RTErrConvertFromErrno(errno);
693#elif defined(RT_OS_LINUX)
694 pReqInt->Rc = RTErrConvertFromErrno(rcPosix);
695#endif
696 pReqInt->cbTransfered = 0;
697 }
698 /* Unlink from the list. */
699 PRTFILEAIOREQINTERNAL pNext, pPrev;
700 pNext = pReqInt->pNext;
701 pPrev = pReqInt->pPrev;
702 if (pNext)
703 pNext->pPrev = pPrev;
704 if (pPrev)
705 pPrev->pNext = pNext;
706 else
707 pHead = pNext;
708
709 pReqInt->pNext = NULL;
710 pReqInt->pPrev = NULL;
711 }
712 }
713 ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted);
714 AssertMsg(pCtxInt->cRequests > 0, ("Adding requests resulted in overflow\n"));
715 break;
716 }
717
718 ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
719 AssertMsg(pCtxInt->cRequests > 0, ("Adding requests resulted in overflow\n"));
720 cReqs -= cReqsSubmit;
721 pahReqs += cReqsSubmit;
722 }
723
724 /*
725 * Check if we have a flush request now.
726 * If not we hit the AIO_LISTIO_MAX limit
727 * and will continue submitting requests
728 * above.
729 */
730 if (cReqs && RT_SUCCESS_NP(rc))
731 {
732 pReqInt = pahReqs[0];
733 RTFILEAIOREQ_VALID_RETURN(pReqInt);
734
735 if (pReqInt->fFlush)
736 {
737 /*
738 * lio_listio does not work with flush requests so
739 * we have to use aio_fsync directly.
740 */
741 rcPosix = aio_fsync(O_SYNC, &pReqInt->AioCB);
742 if (RT_UNLIKELY(rcPosix < 0))
743 {
744 rc = RTErrConvertFromErrno(errno);
745 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
746 pReqInt->Rc = rc;
747 pReqInt->cbTransfered = 0;
748
749 /* Unlink from the list. */
750 PRTFILEAIOREQINTERNAL pNext, pPrev;
751 pNext = pReqInt->pNext;
752 pPrev = pReqInt->pPrev;
753 if (pNext)
754 pNext->pPrev = pPrev;
755 if (pPrev)
756 pPrev->pNext = pNext;
757 else
758 pHead = pNext;
759 break;
760 }
761
762 ASMAtomicIncS32(&pCtxInt->cRequests);
763 AssertMsg(pCtxInt->cRequests > 0, ("Adding requests resulted in overflow\n"));
764 cReqs--;
765 pahReqs++;
766 }
767 }
768 } while ( cReqs
769 && RT_SUCCESS_NP(rc));
770
771 if (pHead)
772 {
773 /*
774 * Forward successfully submitted requests to the thread waiting for requests.
775 * We search for a free slot first and if we don't find one
776 * we will grab the first one and append our list to the existing entries.
777 */
778 unsigned iSlot = 0;
779 while ( (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead))
780 && !ASMAtomicCmpXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[iSlot], pHead, NULL))
781 iSlot++;
782
783 if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead))
784 {
785 /* Nothing found. */
786 PRTFILEAIOREQINTERNAL pOldHead = (PRTFILEAIOREQINTERNAL)ASMAtomicXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[0],
787 NULL);
788
789 /* Find the end of the current head and link the old list to the current. */
790 PRTFILEAIOREQINTERNAL pTail = pHead;
791 while (pTail->pNext)
792 pTail = pTail->pNext;
793
794 pTail->pNext = pOldHead;
795
796 ASMAtomicXchgPtr((void * volatile *)&pCtxInt->apReqsNewHead[0], pHead);
797 }
798
799 /* Set the internal wakeup flag and wakeup the thread if possible. */
800 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true);
801 if (!fWokenUp)
802 rtFileAioCtxWakeup(pCtxInt);
803 }
804
805 return rc;
806}
807
808
809RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, unsigned cMillisTimeout,
810 PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
811{
812 int rc = VINF_SUCCESS;
813 int cRequestsCompleted = 0;
814 PRTFILEAIOCTXINTERNAL pCtxInt = (PRTFILEAIOCTXINTERNAL)hAioCtx;
815 struct timespec Timeout;
816 struct timespec *pTimeout = NULL;
817 uint64_t StartNanoTS = 0;
818
819 /* Check parameters. */
820 AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE);
821 AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
822 AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
823 AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
824 AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);
825
826 int32_t cRequestsWaiting = ASMAtomicReadS32(&pCtxInt->cRequests);
827
828 if (RT_UNLIKELY(cRequestsWaiting <= 0))
829 return VERR_FILE_AIO_NO_REQUEST;
830
831 if (RT_UNLIKELY(cMinReqs > (uint32_t)cRequestsWaiting))
832 return VERR_INVALID_PARAMETER;
833
834 if (cMillisTimeout != RT_INDEFINITE_WAIT)
835 {
836 Timeout.tv_sec = cMillisTimeout / 1000;
837 Timeout.tv_nsec = (cMillisTimeout % 1000) * 1000000;
838 pTimeout = &Timeout;
839 StartNanoTS = RTTimeNanoTS();
840 }
841
842 /* Wait for at least one. */
843 if (!cMinReqs)
844 cMinReqs = 1;
845
846 /* For the wakeup call. */
847 Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
848 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());
849
850 /* Update the waiting list once before we enter the loop. */
851 rc = rtFileAioCtxProcessEvents(pCtxInt);
852
853 while ( cMinReqs
854 && RT_SUCCESS_NP(rc))
855 {
856#ifdef RT_STRICT
857 if (RT_UNLIKELY(!pCtxInt->iFirstFree))
858 {
859 for (unsigned i = 0; i < pCtxInt->cReqsWaitMax; i++)
860 RTAssertMsg2Weak("wait[%d] = %#p\n", i, pCtxInt->apReqs[i]);
861
862 AssertMsgFailed(("No request to wait for. pReqsWaitHead=%#p pReqsWaitTail=%#p\n",
863 pCtxInt->pReqsWaitHead, pCtxInt->pReqsWaitTail));
864 }
865#endif
866
867 ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
868 int rcPosix = aio_suspend((const struct aiocb * const *)pCtxInt->apReqs,
869 pCtxInt->iFirstFree, pTimeout);
870 ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
871 if (rcPosix < 0)
872 {
873 /* Check that this is an external wakeup event. */
874 if (errno == EINTR)
875 rc = rtFileAioCtxProcessEvents(pCtxInt);
876 else
877 rc = RTErrConvertFromErrno(errno);
878 }
879 else
880 {
881 /* Requests finished. */
882 unsigned iReqCurr = 0;
883 unsigned cDone = 0;
884
885 /* Remove completed requests from the waiting list. */
886 while ( (iReqCurr < pCtxInt->iFirstFree)
887 && (cDone < cReqs))
888 {
889 PRTFILEAIOREQINTERNAL pReq = pCtxInt->apReqs[iReqCurr];
890 int rcReq = aio_error(&pReq->AioCB);
891
892 if (rcReq != EINPROGRESS)
893 {
894 /* Completed store the return code. */
895 if (rcReq == 0)
896 {
897 pReq->Rc = VINF_SUCCESS;
898 /* Call aio_return() to free ressources. */
899 pReq->cbTransfered = aio_return(&pReq->AioCB);
900 }
901 else
902 {
903#if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD)
904 pReq->Rc = RTErrConvertFromErrno(errno);
905#else
906 pReq->Rc = RTErrConvertFromErrno(rcReq);
907#endif
908 }
909
910 /* Mark the request as finished. */
911 RTFILEAIOREQ_SET_STATE(pReq, COMPLETED);
912 cDone++;
913
914 /* If there are other entries waiting put the head into the now free entry. */
915 if (pCtxInt->pReqsWaitHead)
916 {
917 PRTFILEAIOREQINTERNAL pReqInsert = pCtxInt->pReqsWaitHead;
918
919 pCtxInt->pReqsWaitHead = pReqInsert->pNext;
920 if (!pCtxInt->pReqsWaitHead)
921 {
922 /* List is empty now. Clear tail too. */
923 pCtxInt->pReqsWaitTail = NULL;
924 }
925
926 pReqInsert->iWaitingList = pReq->iWaitingList;
927 pCtxInt->apReqs[pReqInsert->iWaitingList] = pReqInsert;
928 iReqCurr++;
929 }
930 else
931 {
932 /*
933 * Move the last entry into the current position to avoid holes
934 * but only if it is not the last element already.
935 */
936 if (pReq->iWaitingList < pCtxInt->iFirstFree - 1)
937 {
938 pCtxInt->apReqs[pReq->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree];
939 pCtxInt->apReqs[pReq->iWaitingList]->iWaitingList = pReq->iWaitingList;
940 }
941 else
942 pCtxInt->iFirstFree--;
943
944 pCtxInt->apReqs[pCtxInt->iFirstFree] = NULL;
945 }
946
947 /* Put the request into the completed list. */
948 pahReqs[cRequestsCompleted++] = pReq;
949 pReq->iWaitingList = RTFILEAIOCTX_WAIT_ENTRY_INVALID;
950 }
951 else
952 iReqCurr++;
953 }
954
955 AssertMsg((cDone <= cReqs), ("Overflow cReqs=%u cMinReqs=%u cDone=%u\n",
956 cReqs, cDone));
957 cReqs -= cDone;
958 cMinReqs = RT_MAX(cMinReqs, cDone) - cDone;
959 ASMAtomicSubS32(&pCtxInt->cRequests, cDone);
960
961 AssertMsg(pCtxInt->cRequests >= 0, ("Finished more requests than currently active\n"));
962
963 if (!cMinReqs)
964 break;
965
966 if (cMillisTimeout != RT_INDEFINITE_WAIT)
967 {
968 uint64_t TimeDiff;
969
970 /* Recalculate the timeout. */
971 TimeDiff = RTTimeSystemNanoTS() - StartNanoTS;
972 Timeout.tv_sec = Timeout.tv_sec - (TimeDiff / 1000000);
973 Timeout.tv_nsec = Timeout.tv_nsec - (TimeDiff % 1000000);
974 }
975
976 /* Check for new elements. */
977 rc = rtFileAioCtxProcessEvents(pCtxInt);
978 }
979 }
980
981 *pcReqs = cRequestsCompleted;
982 Assert(pCtxInt->hThreadWait == RTThreadSelf());
983 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);
984
985 return rc;
986}
987
988
989RTDECL(int) RTFileAioCtxWakeup(RTFILEAIOCTX hAioCtx)
990{
991 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
992 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
993
994 /** @todo r=bird: Define the protocol for how to resume work after calling
995 * this function. */
996
997 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUp, true);
998 if (!fWokenUp)
999 rtFileAioCtxWakeup(pCtxInt);
1000
1001 return VINF_SUCCESS;
1002}
1003
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette