VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/freebsd/fileaio-freebsd.cpp@ 28800

Last change on this file since 28800 was 28800, checked in by vboxsync, 15 years ago

Automated rebranding to Oracle copyright/license strings via filemuncher

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.1 KB
Line 
1/* $Id: fileaio-freebsd.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
2/** @file
3 * IPRT - File async I/O, native implementation for the FreeBSD host platform.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP RTLOGGROUP_FILE
32#include <iprt/asm.h>
33#include <iprt/file.h>
34#include <iprt/mem.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/err.h>
38#include <iprt/log.h>
39#include <iprt/thread.h>
40#include "internal/fileaio.h"
41
42#include <sys/types.h>
43#include <sys/event.h>
44#include <sys/time.h>
45#include <sys/sysctl.h>
46#include <aio.h>
47#include <errno.h>
48#include <unistd.h>
49#include <fcntl.h>
50
51/*******************************************************************************
52* Structures and Typedefs *
53*******************************************************************************/
54/**
55 * Async I/O completion context state.
56 */
57typedef struct RTFILEAIOCTXINTERNAL
58{
59 /** Handle to the kernel queue. */
60 int iKQueue;
61 /** Current number of requests active on this context. */
62 volatile int32_t cRequests;
63 /** The ID of the thread which is currently waiting for requests. */
64 volatile RTTHREAD hThreadWait;
65 /** Flag whether the thread was woken up. */
66 volatile bool fWokenUp;
67 /** Flag whether the thread is currently waiting in the syscall. */
68 volatile bool fWaiting;
69 /** Magic value (RTFILEAIOCTX_MAGIC). */
70 uint32_t u32Magic;
71} RTFILEAIOCTXINTERNAL;
72/** Pointer to an internal context structure. */
73typedef RTFILEAIOCTXINTERNAL *PRTFILEAIOCTXINTERNAL;
74
75/**
76 * Async I/O request state.
77 */
78typedef struct RTFILEAIOREQINTERNAL
79{
80 /** The aio control block. Must be the FIRST
81 * element. */
82 struct aiocb AioCB;
83 /** Current state the request is in. */
84 RTFILEAIOREQSTATE enmState;
85 /** Flag whether this is a flush request. */
86 bool fFlush;
87 /** Opaque user data. */
88 void *pvUser;
89 /** Completion context we are assigned to. */
90 PRTFILEAIOCTXINTERNAL pCtxInt;
91 /** Number of bytes actually transfered. */
92 size_t cbTransfered;
93 /** Status code. */
94 int Rc;
95 /** Magic value (RTFILEAIOREQ_MAGIC). */
96 uint32_t u32Magic;
97} RTFILEAIOREQINTERNAL;
98/** Pointer to an internal request structure. */
99typedef RTFILEAIOREQINTERNAL *PRTFILEAIOREQINTERNAL;
100
101
102/*******************************************************************************
103* Defined Constants And Macros *
104*******************************************************************************/
105/** The max number of events to get in one call. */
106#define AIO_MAXIMUM_REQUESTS_PER_CONTEXT 64
107
108RTR3DECL(int) RTFileAioGetLimits(PRTFILEAIOLIMITS pAioLimits)
109{
110 int rcBSD = 0;
111 AssertPtrReturn(pAioLimits, VERR_INVALID_POINTER);
112
113 /*
114 * The AIO API is implemented in a kernel module which is not
115 * loaded by default.
116 * If it is loaded there are additional sysctl parameters.
117 */
118 int cReqsOutstandingMax = 0;
119 size_t cbParameter = sizeof(int);
120
121 rcBSD = sysctlbyname("vfs.aio.max_aio_per_proc", /* name */
122 &cReqsOutstandingMax, /* Where to store the old value. */
123 &cbParameter, /* Size of the memory pointed to. */
124 NULL, /* Where the new value is located. */
125 NULL); /* Where the size of the new value is stored. */
126 if (rcBSD == -1)
127 {
128 /* ENOENT means the value is unknown thus the module is not loaded. */
129 if (errno == ENOENT)
130 return VERR_NOT_SUPPORTED;
131 else
132 return RTErrConvertFromErrno(errno);
133 }
134
135 pAioLimits->cReqsOutstandingMax = cReqsOutstandingMax;
136 pAioLimits->cbBufferAlignment = 0;
137
138 return VINF_SUCCESS;
139}
140
141RTR3DECL(int) RTFileAioReqCreate(PRTFILEAIOREQ phReq)
142{
143 AssertPtrReturn(phReq, VERR_INVALID_POINTER);
144
145 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOREQINTERNAL));
146 if (RT_UNLIKELY(!pReqInt))
147 return VERR_NO_MEMORY;
148
149 /* Ininitialize static parts. */
150 pReqInt->AioCB.aio_sigevent.sigev_notify = SIGEV_KEVENT;
151 pReqInt->AioCB.aio_sigevent.sigev_value.sival_ptr = pReqInt;
152 pReqInt->pCtxInt = NULL;
153 pReqInt->u32Magic = RTFILEAIOREQ_MAGIC;
154 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
155
156 *phReq = (RTFILEAIOREQ)pReqInt;
157
158 return VINF_SUCCESS;
159}
160
161RTDECL(int) RTFileAioReqDestroy(RTFILEAIOREQ hReq)
162{
163 /*
164 * Validate the handle and ignore nil.
165 */
166 if (hReq == NIL_RTFILEAIOREQ)
167 return VINF_SUCCESS;
168 PRTFILEAIOREQINTERNAL pReqInt = hReq;
169 RTFILEAIOREQ_VALID_RETURN(pReqInt);
170 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
171
172 /*
173 * Trash the magic and free it.
174 */
175 ASMAtomicUoWriteU32(&pReqInt->u32Magic, ~RTFILEAIOREQ_MAGIC);
176 RTMemFree(pReqInt);
177 return VINF_SUCCESS;
178}
179
180/**
181 * Worker setting up the request.
182 */
183DECLINLINE(int) rtFileAioReqPrepareTransfer(RTFILEAIOREQ hReq, RTFILE hFile,
184 unsigned uTransferDirection,
185 RTFOFF off, void *pvBuf, size_t cbTransfer,
186 void *pvUser)
187{
188 /*
189 * Validate the input.
190 */
191 PRTFILEAIOREQINTERNAL pReqInt = hReq;
192 RTFILEAIOREQ_VALID_RETURN(pReqInt);
193 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
194 Assert(hFile != NIL_RTFILE);
195 AssertPtr(pvBuf);
196 Assert(off >= 0);
197 Assert(cbTransfer > 0);
198
199 pReqInt->AioCB.aio_sigevent.sigev_notify = SIGEV_KEVENT;
200 pReqInt->AioCB.aio_sigevent.sigev_value.sival_ptr = pReqInt;
201 pReqInt->AioCB.aio_lio_opcode = uTransferDirection;
202 pReqInt->AioCB.aio_fildes = (int)hFile;
203 pReqInt->AioCB.aio_offset = off;
204 pReqInt->AioCB.aio_nbytes = cbTransfer;
205 pReqInt->AioCB.aio_buf = pvBuf;
206 pReqInt->pvUser = pvUser;
207 pReqInt->pCtxInt = NULL;
208 pReqInt->Rc = VERR_FILE_AIO_IN_PROGRESS;
209 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
210
211 return VINF_SUCCESS;
212}
213
214RTDECL(int) RTFileAioReqPrepareRead(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
215 void *pvBuf, size_t cbRead, void *pvUser)
216{
217 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_READ,
218 off, pvBuf, cbRead, pvUser);
219}
220
221RTDECL(int) RTFileAioReqPrepareWrite(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
222 void const *pvBuf, size_t cbWrite, void *pvUser)
223{
224 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_WRITE,
225 off, (void *)pvBuf, cbWrite, pvUser);
226}
227
228RTDECL(int) RTFileAioReqPrepareFlush(RTFILEAIOREQ hReq, RTFILE hFile, void *pvUser)
229{
230 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)hReq;
231
232 RTFILEAIOREQ_VALID_RETURN(pReqInt);
233 Assert(hFile != NIL_RTFILE);
234 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
235
236 pReqInt->fFlush = true;
237 pReqInt->AioCB.aio_fildes = (int)hFile;
238 pReqInt->pvUser = pvUser;
239 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
240
241 return VINF_SUCCESS;
242}
243
244RTDECL(void *) RTFileAioReqGetUser(RTFILEAIOREQ hReq)
245{
246 PRTFILEAIOREQINTERNAL pReqInt = hReq;
247 RTFILEAIOREQ_VALID_RETURN_RC(pReqInt, NULL);
248
249 return pReqInt->pvUser;
250}
251
252RTDECL(int) RTFileAioReqCancel(RTFILEAIOREQ hReq)
253{
254 PRTFILEAIOREQINTERNAL pReqInt = hReq;
255 RTFILEAIOREQ_VALID_RETURN(pReqInt);
256 RTFILEAIOREQ_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_NOT_SUBMITTED);
257
258
259 int rcBSD = aio_cancel(pReqInt->AioCB.aio_fildes, &pReqInt->AioCB);
260
261 if (rcBSD == AIO_CANCELED)
262 {
263 /*
264 * Decrement request count because the request will never arrive at the
265 * completion port.
266 */
267 AssertMsg(VALID_PTR(pReqInt->pCtxInt),
268 ("Invalid state. Request was canceled but wasn't submitted\n"));
269
270 ASMAtomicDecS32(&pReqInt->pCtxInt->cRequests);
271 pReqInt->Rc = VERR_FILE_AIO_CANCELED;
272 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
273 return VINF_SUCCESS;
274 }
275 else if (rcBSD == AIO_ALLDONE)
276 return VERR_FILE_AIO_COMPLETED;
277 else if (rcBSD == AIO_NOTCANCELED)
278 return VERR_FILE_AIO_IN_PROGRESS;
279 else
280 return RTErrConvertFromErrno(errno);
281}
282
283RTDECL(int) RTFileAioReqGetRC(RTFILEAIOREQ hReq, size_t *pcbTransfered)
284{
285 PRTFILEAIOREQINTERNAL pReqInt = hReq;
286 RTFILEAIOREQ_VALID_RETURN(pReqInt);
287 AssertPtrNull(pcbTransfered);
288 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
289 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, PREPARED, VERR_FILE_AIO_NOT_SUBMITTED);
290
291 if ( (RT_SUCCESS(pReqInt->Rc))
292 && (pcbTransfered))
293 *pcbTransfered = pReqInt->cbTransfered;
294
295 return pReqInt->Rc;
296}
297
298RTDECL(int) RTFileAioCtxCreate(PRTFILEAIOCTX phAioCtx, uint32_t cAioReqsMax)
299{
300 int rc = VINF_SUCCESS;
301 PRTFILEAIOCTXINTERNAL pCtxInt;
302 AssertPtrReturn(phAioCtx, VERR_INVALID_POINTER);
303
304 pCtxInt = (PRTFILEAIOCTXINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOCTXINTERNAL));
305 if (RT_UNLIKELY(!pCtxInt))
306 return VERR_NO_MEMORY;
307
308 /* Init the event handle. */
309 pCtxInt->iKQueue = kqueue();
310 if (RT_LIKELY(pCtxInt->iKQueue > 0))
311 {
312 pCtxInt->u32Magic = RTFILEAIOCTX_MAGIC;
313 *phAioCtx = (RTFILEAIOCTX)pCtxInt;
314 }
315 else
316 {
317 RTMemFree(pCtxInt);
318 rc = RTErrConvertFromErrno(errno);
319 }
320
321 return rc;
322}
323
324RTDECL(int) RTFileAioCtxDestroy(RTFILEAIOCTX hAioCtx)
325{
326 /* Validate the handle and ignore nil. */
327 if (hAioCtx == NIL_RTFILEAIOCTX)
328 return VINF_SUCCESS;
329 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
330 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
331
332 /* Cannot destroy a busy context. */
333 if (RT_UNLIKELY(pCtxInt->cRequests))
334 return VERR_FILE_AIO_BUSY;
335
336 close(pCtxInt->iKQueue);
337 ASMAtomicUoWriteU32(&pCtxInt->u32Magic, RTFILEAIOCTX_MAGIC_DEAD);
338 RTMemFree(pCtxInt);
339
340 return VINF_SUCCESS;
341}
342
343RTDECL(uint32_t) RTFileAioCtxGetMaxReqCount(RTFILEAIOCTX hAioCtx)
344{
345 return RTFILEAIO_UNLIMITED_REQS;
346}
347
348RTDECL(int) RTFileAioCtxAssociateWithFile(RTFILEAIOCTX hAioCtx, RTFILE hFile)
349{
350 return VINF_SUCCESS;
351}
352
353RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
354{
355 /*
356 * Parameter validation.
357 */
358 int rc = VINF_SUCCESS;
359 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
360 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
361 AssertReturn(cReqs > 0, VERR_INVALID_PARAMETER);
362 AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
363
364 do
365 {
366 int rcBSD = 0;
367 size_t cReqsSubmit = 0;
368 size_t i = 0;
369 PRTFILEAIOREQINTERNAL pReqInt;
370
371 while ( (i < cReqs)
372 && (i < AIO_LISTIO_MAX))
373 {
374 pReqInt = pahReqs[i];
375 if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
376 {
377 /* Undo everything and stop submitting. */
378 for (size_t iUndo = 0; iUndo < i; iUndo++)
379 {
380 pReqInt = pahReqs[iUndo];
381 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
382 pReqInt->pCtxInt = NULL;
383 pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = 0;
384 }
385 rc = VERR_INVALID_HANDLE;
386 break;
387 }
388
389 pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = pCtxInt->iKQueue;
390 pReqInt->pCtxInt = pCtxInt;
391 RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);
392
393 if (pReqInt->fFlush)
394 break;
395
396 cReqsSubmit++;
397 i++;
398 }
399
400 if (cReqsSubmit)
401 {
402 rcBSD = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
403 if (RT_UNLIKELY(rcBSD < 0))
404 {
405 if (rcBSD == EAGAIN)
406 rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
407 else
408 rc = RTErrConvertFromErrno(errno);
409
410 /* Check which requests got actually submitted and which not. */
411 for (i = 0; i < cReqs; i++)
412 {
413 pReqInt = pahReqs[i];
414 rcBSD = aio_error(&pReqInt->AioCB);
415 if (rcBSD == EINVAL)
416 {
417 /* Was not submitted. */
418 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
419 pReqInt->pCtxInt = NULL;
420 }
421 else if (rcBSD != EINPROGRESS)
422 {
423 /* The request encountered an error. */
424 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
425 pReqInt->Rc = RTErrConvertFromErrno(rcBSD);
426 pReqInt->pCtxInt = NULL;
427 pReqInt->cbTransfered = 0;
428 }
429 }
430 break;
431 }
432
433 ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
434 cReqs -= cReqsSubmit;
435 pahReqs += cReqsSubmit;
436 }
437
438 /* Check if we have a flush request now. */
439 if (cReqs)
440 {
441 pReqInt = pahReqs[0];
442 RTFILEAIOREQ_VALID_RETURN(pReqInt);
443
444 if (pReqInt->fFlush)
445 {
446 /*
447 * lio_listio does not work with flush requests so
448 * we have to use aio_fsync directly.
449 */
450 rcBSD = aio_fsync(O_SYNC, &pReqInt->AioCB);
451 if (RT_UNLIKELY(rcBSD < 0))
452 {
453 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
454 pReqInt->Rc = RTErrConvertFromErrno(errno);
455 pReqInt->cbTransfered = 0;
456 return pReqInt->Rc;
457 }
458
459 ASMAtomicIncS32(&pCtxInt->cRequests);
460 cReqs--;
461 pahReqs++;
462 }
463 }
464 } while (cReqs);
465
466 return rc;
467}
468
469RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
470 PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
471{
472 int rc = VINF_SUCCESS;
473 int cRequestsCompleted = 0;
474
475 /*
476 * Validate the parameters, making sure to always set pcReqs.
477 */
478 AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
479 *pcReqs = 0; /* always set */
480 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
481 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
482 AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
483 AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
484 AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);
485
486 if (RT_UNLIKELY(ASMAtomicReadS32(&pCtxInt->cRequests) == 0))
487 return VERR_FILE_AIO_NO_REQUEST;
488
489 /*
490 * Convert the timeout if specified.
491 */
492 struct timespec *pTimeout = NULL;
493 struct timespec Timeout = {0,0};
494 uint64_t StartNanoTS = 0;
495 if (cMillies != RT_INDEFINITE_WAIT)
496 {
497 Timeout.tv_sec = cMillies / 1000;
498 Timeout.tv_nsec = cMillies % 1000 * 1000000;
499 pTimeout = &Timeout;
500 StartNanoTS = RTTimeNanoTS();
501 }
502
503 /* Wait for at least one. */
504 if (!cMinReqs)
505 cMinReqs = 1;
506
507 /* For the wakeup call. */
508 Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
509 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());
510
511 while ( cMinReqs
512 && RT_SUCCESS_NP(rc))
513 {
514 struct kevent aKEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT];
515 int cRequestsToWait = cMinReqs < AIO_MAXIMUM_REQUESTS_PER_CONTEXT ? cReqs : AIO_MAXIMUM_REQUESTS_PER_CONTEXT;
516 int rcBSD;
517 uint64_t StartTime;
518
519 ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
520 rcBSD = kevent(pCtxInt->iKQueue, NULL, 0, aKEvents, cRequestsToWait, pTimeout);
521 ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
522
523 if (RT_UNLIKELY(rcBSD < 0))
524 {
525 rc = RTErrConvertFromErrno(errno);
526 break;
527 }
528
529 uint32_t const cDone = rcBSD;
530
531 /* Process received events. */
532 for (uint32_t i = 0; i < cDone; i++)
533 {
534 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aKEvents[i].udata;
535 AssertPtr(pReqInt);
536 Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC);
537
538 /*
539 * Retrieve the status code here already because the
540 * user may omit the RTFileAioReqGetRC() call and
541 * we will leak kernel ressources then.
542 * This will result in errors during submission
543 * of other requests as soon as the max_aio_queue_per_proc
544 * limit is reached.
545 */
546 int cbTransfered = aio_return(&pReqInt->AioCB);
547
548 if (cbTransfered < 0)
549 {
550 pReqInt->Rc = RTErrConvertFromErrno(cbTransfered);
551 pReqInt->cbTransfered = 0;
552 }
553 else
554 {
555 pReqInt->Rc = VINF_SUCCESS;
556 pReqInt->cbTransfered = cbTransfered;
557 }
558 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
559 pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt;
560 }
561
562 /*
563 * Done Yet? If not advance and try again.
564 */
565 if (cDone >= cMinReqs)
566 break;
567 cMinReqs -= cDone;
568 cReqs -= cDone;
569
570 if (cMillies != RT_INDEFINITE_WAIT)
571 {
572 /* The API doesn't return ETIMEDOUT, so we have to fix that ourselves. */
573 uint64_t NanoTS = RTTimeNanoTS();
574 uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000;
575 if (cMilliesElapsed >= cMillies)
576 {
577 rc = VERR_TIMEOUT;
578 break;
579 }
580
581 /* The syscall supposedly updates it, but we're paranoid. :-) */
582 Timeout.tv_sec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000;
583 Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000;
584 }
585 }
586
587 /*
588 * Update the context state and set the return value.
589 */
590 *pcReqs = cRequestsCompleted;
591 ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted);
592 Assert(pCtxInt->hThreadWait == RTThreadSelf());
593 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);
594
595 /*
596 * Clear the wakeup flag and set rc.
597 */
598 if ( pCtxInt->fWokenUp
599 && RT_SUCCESS(rc))
600 {
601 ASMAtomicXchgBool(&pCtxInt->fWokenUp, false);
602 rc = VERR_INTERRUPTED;
603 }
604
605 return rc;
606}
607
608RTDECL(int) RTFileAioCtxWakeup(RTFILEAIOCTX hAioCtx)
609{
610 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
611 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
612
613 /** @todo r=bird: Define the protocol for how to resume work after calling
614 * this function. */
615
616 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUp, true);
617
618 /*
619 * Read the thread handle before the status flag.
620 * If we read the handle after the flag we might
621 * end up with an invalid handle because the thread
622 * waiting in RTFileAioCtxWakeup() might get scheduled
623 * before we read the flag and returns.
624 * We can ensure that the handle is valid if fWaiting is true
625 * when reading the handle before the status flag.
626 */
627 RTTHREAD hThread;
628 ASMAtomicReadHandle(&pCtxInt->hThreadWait, &hThread);
629 bool fWaiting = ASMAtomicReadBool(&pCtxInt->fWaiting);
630 if ( !fWokenUp
631 && fWaiting)
632 {
633 /*
634 * If a thread waits the handle must be valid.
635 * It is possible that the thread returns from
636 * kevent() before the signal is send.
637 * This is no problem because we already set fWokenUp
638 * to true which will let the thread return VERR_INTERRUPTED
639 * and the next call to RTFileAioCtxWait() will not
640 * return VERR_INTERRUPTED because signals are not saved
641 * and will simply vanish if the destination thread can't
642 * receive it.
643 */
644 Assert(hThread != NIL_RTTHREAD);
645 RTThreadPoke(hThread);
646 }
647
648 return VINF_SUCCESS;
649}
650
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette