VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/freebsd/fileaio-freebsd.cpp@ 38332

Last change on this file since 38332 was 38332, checked in by vboxsync, 13 years ago

Runtime/fileaio-freebsd: Fix error check for aio_error (detected by Bernhard Froehlich)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.7 KB
Line 
1/* $Id: fileaio-freebsd.cpp 38332 2011-08-05 15:32:09Z vboxsync $ */
2/** @file
3 * IPRT - File async I/O, native implementation for the FreeBSD host platform.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP RTLOGGROUP_FILE
32#include <iprt/asm.h>
33#include <iprt/file.h>
34#include <iprt/mem.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/err.h>
38#include <iprt/log.h>
39#include <iprt/thread.h>
40#include "internal/fileaio.h"
41
42#include <sys/types.h>
43#include <sys/event.h>
44#include <sys/time.h>
45#include <sys/sysctl.h>
46#include <aio.h>
47#include <errno.h>
48#include <unistd.h>
49#include <fcntl.h>
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * Async I/O completion context state.
56 */
57typedef struct RTFILEAIOCTXINTERNAL
58{
59 /** Handle to the kernel queue. */
60 int iKQueue;
61 /** Current number of requests active on this context. */
62 volatile int32_t cRequests;
63 /** The ID of the thread which is currently waiting for requests. */
64 volatile RTTHREAD hThreadWait;
65 /** Flag whether the thread was woken up. */
66 volatile bool fWokenUp;
67 /** Flag whether the thread is currently waiting in the syscall. */
68 volatile bool fWaiting;
69 /** Magic value (RTFILEAIOCTX_MAGIC). */
70 uint32_t u32Magic;
71} RTFILEAIOCTXINTERNAL;
72/** Pointer to an internal context structure. */
73typedef RTFILEAIOCTXINTERNAL *PRTFILEAIOCTXINTERNAL;
74
75/**
76 * Async I/O request state.
77 */
78typedef struct RTFILEAIOREQINTERNAL
79{
80 /** The aio control block. Must be the FIRST
81 * element. */
82 struct aiocb AioCB;
83 /** Current state the request is in. */
84 RTFILEAIOREQSTATE enmState;
85 /** Flag whether this is a flush request. */
86 bool fFlush;
87 /** Opaque user data. */
88 void *pvUser;
89 /** Completion context we are assigned to. */
90 PRTFILEAIOCTXINTERNAL pCtxInt;
91 /** Number of bytes actually transferred. */
92 size_t cbTransfered;
93 /** Status code. */
94 int Rc;
95 /** Magic value (RTFILEAIOREQ_MAGIC). */
96 uint32_t u32Magic;
97} RTFILEAIOREQINTERNAL;
98/** Pointer to an internal request structure. */
99typedef RTFILEAIOREQINTERNAL *PRTFILEAIOREQINTERNAL;
100
101
102/*******************************************************************************
103* Defined Constants And Macros *
104*******************************************************************************/
105/** The max number of events to get in one call. */
106#define AIO_MAXIMUM_REQUESTS_PER_CONTEXT 64
107
108RTR3DECL(int) RTFileAioGetLimits(PRTFILEAIOLIMITS pAioLimits)
109{
110 int rcBSD = 0;
111 AssertPtrReturn(pAioLimits, VERR_INVALID_POINTER);
112
113 /*
114 * The AIO API is implemented in a kernel module which is not
115 * loaded by default.
116 * If it is loaded there are additional sysctl parameters.
117 */
118 int cReqsOutstandingMax = 0;
119 size_t cbParameter = sizeof(int);
120
121 rcBSD = sysctlbyname("vfs.aio.max_aio_per_proc", /* name */
122 &cReqsOutstandingMax, /* Where to store the old value. */
123 &cbParameter, /* Size of the memory pointed to. */
124 NULL, /* Where the new value is located. */
125 NULL); /* Where the size of the new value is stored. */
126 if (rcBSD == -1)
127 {
128 /* ENOENT means the value is unknown thus the module is not loaded. */
129 if (errno == ENOENT)
130 return VERR_NOT_SUPPORTED;
131 else
132 return RTErrConvertFromErrno(errno);
133 }
134
135 pAioLimits->cReqsOutstandingMax = cReqsOutstandingMax;
136 pAioLimits->cbBufferAlignment = 0;
137
138 return VINF_SUCCESS;
139}
140
141RTR3DECL(int) RTFileAioReqCreate(PRTFILEAIOREQ phReq)
142{
143 AssertPtrReturn(phReq, VERR_INVALID_POINTER);
144
145 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOREQINTERNAL));
146 if (RT_UNLIKELY(!pReqInt))
147 return VERR_NO_MEMORY;
148
149 /* Ininitialize static parts. */
150 pReqInt->AioCB.aio_sigevent.sigev_notify = SIGEV_KEVENT;
151 pReqInt->AioCB.aio_sigevent.sigev_value.sival_ptr = pReqInt;
152 pReqInt->pCtxInt = NULL;
153 pReqInt->u32Magic = RTFILEAIOREQ_MAGIC;
154 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
155
156 *phReq = (RTFILEAIOREQ)pReqInt;
157
158 return VINF_SUCCESS;
159}
160
161RTDECL(int) RTFileAioReqDestroy(RTFILEAIOREQ hReq)
162{
163 /*
164 * Validate the handle and ignore nil.
165 */
166 if (hReq == NIL_RTFILEAIOREQ)
167 return VINF_SUCCESS;
168 PRTFILEAIOREQINTERNAL pReqInt = hReq;
169 RTFILEAIOREQ_VALID_RETURN(pReqInt);
170 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
171
172 /*
173 * Trash the magic and free it.
174 */
175 ASMAtomicUoWriteU32(&pReqInt->u32Magic, ~RTFILEAIOREQ_MAGIC);
176 RTMemFree(pReqInt);
177 return VINF_SUCCESS;
178}
179
180/**
181 * Worker setting up the request.
182 */
183DECLINLINE(int) rtFileAioReqPrepareTransfer(RTFILEAIOREQ hReq, RTFILE hFile,
184 unsigned uTransferDirection,
185 RTFOFF off, void *pvBuf, size_t cbTransfer,
186 void *pvUser)
187{
188 /*
189 * Validate the input.
190 */
191 PRTFILEAIOREQINTERNAL pReqInt = hReq;
192 RTFILEAIOREQ_VALID_RETURN(pReqInt);
193 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
194 Assert(hFile != NIL_RTFILE);
195 AssertPtr(pvBuf);
196 Assert(off >= 0);
197 Assert(cbTransfer > 0);
198
199 pReqInt->AioCB.aio_sigevent.sigev_notify = SIGEV_KEVENT;
200 pReqInt->AioCB.aio_sigevent.sigev_value.sival_ptr = pReqInt;
201 pReqInt->AioCB.aio_lio_opcode = uTransferDirection;
202 pReqInt->AioCB.aio_fildes = RTFileToNative(hFile);
203 pReqInt->AioCB.aio_offset = off;
204 pReqInt->AioCB.aio_nbytes = cbTransfer;
205 pReqInt->AioCB.aio_buf = pvBuf;
206 pReqInt->fFlush = false;
207 pReqInt->pvUser = pvUser;
208 pReqInt->pCtxInt = NULL;
209 pReqInt->Rc = VERR_FILE_AIO_IN_PROGRESS;
210 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
211
212 return VINF_SUCCESS;
213}
214
215RTDECL(int) RTFileAioReqPrepareRead(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
216 void *pvBuf, size_t cbRead, void *pvUser)
217{
218 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_READ,
219 off, pvBuf, cbRead, pvUser);
220}
221
222RTDECL(int) RTFileAioReqPrepareWrite(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
223 void const *pvBuf, size_t cbWrite, void *pvUser)
224{
225 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_WRITE,
226 off, (void *)pvBuf, cbWrite, pvUser);
227}
228
229RTDECL(int) RTFileAioReqPrepareFlush(RTFILEAIOREQ hReq, RTFILE hFile, void *pvUser)
230{
231 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)hReq;
232
233 RTFILEAIOREQ_VALID_RETURN(pReqInt);
234 Assert(hFile != NIL_RTFILE);
235 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
236
237 pReqInt->fFlush = true;
238 pReqInt->AioCB.aio_fildes = RTFileToNative(hFile);
239 pReqInt->AioCB.aio_offset = 0;
240 pReqInt->AioCB.aio_nbytes = 0;
241 pReqInt->AioCB.aio_buf = NULL;
242 pReqInt->pvUser = pvUser;
243 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
244
245 return VINF_SUCCESS;
246}
247
248RTDECL(void *) RTFileAioReqGetUser(RTFILEAIOREQ hReq)
249{
250 PRTFILEAIOREQINTERNAL pReqInt = hReq;
251 RTFILEAIOREQ_VALID_RETURN_RC(pReqInt, NULL);
252
253 return pReqInt->pvUser;
254}
255
256RTDECL(int) RTFileAioReqCancel(RTFILEAIOREQ hReq)
257{
258 PRTFILEAIOREQINTERNAL pReqInt = hReq;
259 RTFILEAIOREQ_VALID_RETURN(pReqInt);
260 RTFILEAIOREQ_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_NOT_SUBMITTED);
261
262
263 int rcBSD = aio_cancel(pReqInt->AioCB.aio_fildes, &pReqInt->AioCB);
264
265 if (rcBSD == AIO_CANCELED)
266 {
267 /*
268 * Decrement request count because the request will never arrive at the
269 * completion port.
270 */
271 AssertMsg(VALID_PTR(pReqInt->pCtxInt),
272 ("Invalid state. Request was canceled but wasn't submitted\n"));
273
274 ASMAtomicDecS32(&pReqInt->pCtxInt->cRequests);
275 pReqInt->Rc = VERR_FILE_AIO_CANCELED;
276 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
277 return VINF_SUCCESS;
278 }
279 else if (rcBSD == AIO_ALLDONE)
280 return VERR_FILE_AIO_COMPLETED;
281 else if (rcBSD == AIO_NOTCANCELED)
282 return VERR_FILE_AIO_IN_PROGRESS;
283 else
284 return RTErrConvertFromErrno(errno);
285}
286
287RTDECL(int) RTFileAioReqGetRC(RTFILEAIOREQ hReq, size_t *pcbTransfered)
288{
289 PRTFILEAIOREQINTERNAL pReqInt = hReq;
290 RTFILEAIOREQ_VALID_RETURN(pReqInt);
291 AssertPtrNull(pcbTransfered);
292 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
293 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, PREPARED, VERR_FILE_AIO_NOT_SUBMITTED);
294
295 if ( (RT_SUCCESS(pReqInt->Rc))
296 && (pcbTransfered))
297 *pcbTransfered = pReqInt->cbTransfered;
298
299 return pReqInt->Rc;
300}
301
302RTDECL(int) RTFileAioCtxCreate(PRTFILEAIOCTX phAioCtx, uint32_t cAioReqsMax)
303{
304 int rc = VINF_SUCCESS;
305 PRTFILEAIOCTXINTERNAL pCtxInt;
306 AssertPtrReturn(phAioCtx, VERR_INVALID_POINTER);
307
308 pCtxInt = (PRTFILEAIOCTXINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOCTXINTERNAL));
309 if (RT_UNLIKELY(!pCtxInt))
310 return VERR_NO_MEMORY;
311
312 /* Init the event handle. */
313 pCtxInt->iKQueue = kqueue();
314 if (RT_LIKELY(pCtxInt->iKQueue > 0))
315 {
316 pCtxInt->u32Magic = RTFILEAIOCTX_MAGIC;
317 *phAioCtx = (RTFILEAIOCTX)pCtxInt;
318 }
319 else
320 {
321 RTMemFree(pCtxInt);
322 rc = RTErrConvertFromErrno(errno);
323 }
324
325 return rc;
326}
327
328RTDECL(int) RTFileAioCtxDestroy(RTFILEAIOCTX hAioCtx)
329{
330 /* Validate the handle and ignore nil. */
331 if (hAioCtx == NIL_RTFILEAIOCTX)
332 return VINF_SUCCESS;
333 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
334 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
335
336 /* Cannot destroy a busy context. */
337 if (RT_UNLIKELY(pCtxInt->cRequests))
338 return VERR_FILE_AIO_BUSY;
339
340 close(pCtxInt->iKQueue);
341 ASMAtomicUoWriteU32(&pCtxInt->u32Magic, RTFILEAIOCTX_MAGIC_DEAD);
342 RTMemFree(pCtxInt);
343
344 return VINF_SUCCESS;
345}
346
347RTDECL(uint32_t) RTFileAioCtxGetMaxReqCount(RTFILEAIOCTX hAioCtx)
348{
349 return RTFILEAIO_UNLIMITED_REQS;
350}
351
352RTDECL(int) RTFileAioCtxAssociateWithFile(RTFILEAIOCTX hAioCtx, RTFILE hFile)
353{
354 return VINF_SUCCESS;
355}
356
357RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
358{
359 /*
360 * Parameter validation.
361 */
362 int rc = VINF_SUCCESS;
363 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
364 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
365 AssertReturn(cReqs > 0, VERR_INVALID_PARAMETER);
366 AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
367
368 do
369 {
370 int rcBSD = 0;
371 size_t cReqsSubmit = 0;
372 size_t i = 0;
373 PRTFILEAIOREQINTERNAL pReqInt;
374
375 while ( (i < cReqs)
376 && (i < AIO_LISTIO_MAX))
377 {
378 pReqInt = pahReqs[i];
379 if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
380 {
381 /* Undo everything and stop submitting. */
382 for (size_t iUndo = 0; iUndo < i; iUndo++)
383 {
384 pReqInt = pahReqs[iUndo];
385 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
386 pReqInt->pCtxInt = NULL;
387 pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = 0;
388 }
389 rc = VERR_INVALID_HANDLE;
390 break;
391 }
392
393 pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = pCtxInt->iKQueue;
394 pReqInt->pCtxInt = pCtxInt;
395 RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);
396
397 if (pReqInt->fFlush)
398 break;
399
400 cReqsSubmit++;
401 i++;
402 }
403
404 if (cReqsSubmit)
405 {
406 rcBSD = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
407 if (RT_UNLIKELY(rcBSD < 0))
408 {
409 if (errno == EAGAIN)
410 rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
411 else
412 rc = RTErrConvertFromErrno(errno);
413
414 /* Check which requests got actually submitted and which not. */
415 for (i = 0; i < cReqs; i++)
416 {
417 pReqInt = pahReqs[i];
418 rcBSD = aio_error(&pReqInt->AioCB);
419 if ( rcBSD == -1
420 && errno == EINVAL)
421 {
422 /* Was not submitted. */
423 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
424 pReqInt->pCtxInt = NULL;
425 }
426 else if (rcBSD != EINPROGRESS)
427 {
428 /* The request encountered an error. */
429 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
430 pReqInt->Rc = RTErrConvertFromErrno(rcBSD);
431 pReqInt->pCtxInt = NULL;
432 pReqInt->cbTransfered = 0;
433 }
434 }
435 break;
436 }
437
438 ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
439 cReqs -= cReqsSubmit;
440 pahReqs += cReqsSubmit;
441 }
442
443 /* Check if we have a flush request now. */
444 if (cReqs && RT_SUCCESS_NP(rc))
445 {
446 pReqInt = pahReqs[0];
447 RTFILEAIOREQ_VALID_RETURN(pReqInt);
448
449 if (pReqInt->fFlush)
450 {
451 /*
452 * lio_listio does not work with flush requests so
453 * we have to use aio_fsync directly.
454 */
455 rcBSD = aio_fsync(O_SYNC, &pReqInt->AioCB);
456 if (RT_UNLIKELY(rcBSD < 0))
457 {
458 if (rcBSD == EAGAIN)
459 {
460 /* Was not submitted. */
461 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
462 pReqInt->pCtxInt = NULL;
463 return VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
464 }
465 else
466 {
467 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
468 pReqInt->Rc = RTErrConvertFromErrno(errno);
469 pReqInt->cbTransfered = 0;
470 return pReqInt->Rc;
471 }
472 }
473
474 ASMAtomicIncS32(&pCtxInt->cRequests);
475 cReqs--;
476 pahReqs++;
477 }
478 }
479 } while (cReqs);
480
481 return rc;
482}
483
484RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
485 PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
486{
487 int rc = VINF_SUCCESS;
488 int cRequestsCompleted = 0;
489
490 /*
491 * Validate the parameters, making sure to always set pcReqs.
492 */
493 AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
494 *pcReqs = 0; /* always set */
495 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
496 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
497 AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
498 AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
499 AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);
500
501 if (RT_UNLIKELY(ASMAtomicReadS32(&pCtxInt->cRequests) == 0))
502 return VERR_FILE_AIO_NO_REQUEST;
503
504 /*
505 * Convert the timeout if specified.
506 */
507 struct timespec *pTimeout = NULL;
508 struct timespec Timeout = {0,0};
509 uint64_t StartNanoTS = 0;
510 if (cMillies != RT_INDEFINITE_WAIT)
511 {
512 Timeout.tv_sec = cMillies / 1000;
513 Timeout.tv_nsec = cMillies % 1000 * 1000000;
514 pTimeout = &Timeout;
515 StartNanoTS = RTTimeNanoTS();
516 }
517
518 /* Wait for at least one. */
519 if (!cMinReqs)
520 cMinReqs = 1;
521
522 /* For the wakeup call. */
523 Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
524 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());
525
526 while ( cMinReqs
527 && RT_SUCCESS_NP(rc))
528 {
529 struct kevent aKEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT];
530 int cRequestsToWait = cMinReqs < AIO_MAXIMUM_REQUESTS_PER_CONTEXT ? cReqs : AIO_MAXIMUM_REQUESTS_PER_CONTEXT;
531 int rcBSD;
532 uint64_t StartTime;
533
534 ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
535 rcBSD = kevent(pCtxInt->iKQueue, NULL, 0, aKEvents, cRequestsToWait, pTimeout);
536 ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
537
538 if (RT_UNLIKELY(rcBSD < 0))
539 {
540 rc = RTErrConvertFromErrno(errno);
541 break;
542 }
543
544 uint32_t const cDone = rcBSD;
545
546 /* Process received events. */
547 for (uint32_t i = 0; i < cDone; i++)
548 {
549 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aKEvents[i].udata;
550 AssertPtr(pReqInt);
551 Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC);
552
553 /*
554 * Retrieve the status code here already because the
555 * user may omit the RTFileAioReqGetRC() call and
556 * we will leak kernel resources then.
557 * This will result in errors during submission
558 * of other requests as soon as the max_aio_queue_per_proc
559 * limit is reached.
560 */
561 int cbTransfered = aio_return(&pReqInt->AioCB);
562
563 if (cbTransfered < 0)
564 {
565 pReqInt->Rc = RTErrConvertFromErrno(cbTransfered);
566 pReqInt->cbTransfered = 0;
567 }
568 else
569 {
570 pReqInt->Rc = VINF_SUCCESS;
571 pReqInt->cbTransfered = cbTransfered;
572 }
573 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
574 pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt;
575 }
576
577 /*
578 * Done Yet? If not advance and try again.
579 */
580 if (cDone >= cMinReqs)
581 break;
582 cMinReqs -= cDone;
583 cReqs -= cDone;
584
585 if (cMillies != RT_INDEFINITE_WAIT)
586 {
587 /* The API doesn't return ETIMEDOUT, so we have to fix that ourselves. */
588 uint64_t NanoTS = RTTimeNanoTS();
589 uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000;
590 if (cMilliesElapsed >= cMillies)
591 {
592 rc = VERR_TIMEOUT;
593 break;
594 }
595
596 /* The syscall supposedly updates it, but we're paranoid. :-) */
597 Timeout.tv_sec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000;
598 Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000;
599 }
600 }
601
602 /*
603 * Update the context state and set the return value.
604 */
605 *pcReqs = cRequestsCompleted;
606 ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted);
607 Assert(pCtxInt->hThreadWait == RTThreadSelf());
608 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);
609
610 /*
611 * Clear the wakeup flag and set rc.
612 */
613 if ( pCtxInt->fWokenUp
614 && RT_SUCCESS(rc))
615 {
616 ASMAtomicXchgBool(&pCtxInt->fWokenUp, false);
617 rc = VERR_INTERRUPTED;
618 }
619
620 return rc;
621}
622
623RTDECL(int) RTFileAioCtxWakeup(RTFILEAIOCTX hAioCtx)
624{
625 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
626 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
627
628 /** @todo r=bird: Define the protocol for how to resume work after calling
629 * this function. */
630
631 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUp, true);
632
633 /*
634 * Read the thread handle before the status flag.
635 * If we read the handle after the flag we might
636 * end up with an invalid handle because the thread
637 * waiting in RTFileAioCtxWakeup() might get scheduled
638 * before we read the flag and returns.
639 * We can ensure that the handle is valid if fWaiting is true
640 * when reading the handle before the status flag.
641 */
642 RTTHREAD hThread;
643 ASMAtomicReadHandle(&pCtxInt->hThreadWait, &hThread);
644 bool fWaiting = ASMAtomicReadBool(&pCtxInt->fWaiting);
645 if ( !fWokenUp
646 && fWaiting)
647 {
648 /*
649 * If a thread waits the handle must be valid.
650 * It is possible that the thread returns from
651 * kevent() before the signal is send.
652 * This is no problem because we already set fWokenUp
653 * to true which will let the thread return VERR_INTERRUPTED
654 * and the next call to RTFileAioCtxWait() will not
655 * return VERR_INTERRUPTED because signals are not saved
656 * and will simply vanish if the destination thread can't
657 * receive it.
658 */
659 Assert(hThread != NIL_RTTHREAD);
660 RTThreadPoke(hThread);
661 }
662
663 return VINF_SUCCESS;
664}
665
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette