VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/misc/once.cpp@ 78052

Last change on this file since 78052 was 76553, checked in by vboxsync, 6 years ago

scm --update-copyright-year

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.9 KB
Line 
1/* $Id: once.cpp 76553 2019-01-01 01:45:53Z vboxsync $ */
2/** @file
3 * IPRT - Execute Once.
4 */
5
6/*
7 * Copyright (C) 2007-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include <iprt/once.h>
32#include "internal/iprt.h"
33
34#include <iprt/asm.h>
35#include <iprt/assert.h>
36#ifdef IN_RING3
37# include <iprt/critsect.h>
38# define RTONCE_USE_CRITSECT_FOR_TERM
39#elif defined(IN_RING0)
40# include <iprt/spinlock.h>
41# define RTONCE_USE_SPINLOCK_FOR_TERM
42#else
43# define RTONCE_NO_TERM
44#endif
45#include <iprt/err.h>
46#include <iprt/initterm.h>
47#include <iprt/semaphore.h>
48#include <iprt/thread.h>
49
50
51/*********************************************************************************************************************************
52* Global Variables *
53*********************************************************************************************************************************/
54#ifndef RTONCE_NO_TERM
55/** For initializing the clean-up list code. */
56static RTONCE g_OnceCleanUp = RTONCE_INITIALIZER;
57/** Lock protecting the clean-up list. */
58#ifdef RTONCE_USE_CRITSECT_FOR_TERM
59static RTCRITSECT g_CleanUpCritSect;
60#else
61static RTSEMFASTMUTEX g_hCleanUpLock;
62#endif
63/** The clean-up list. */
64static RTLISTANCHOR g_CleanUpList;
65
66/** Locks the clean-up list. */
67#ifdef RTONCE_USE_CRITSECT_FOR_TERM
68# define RTONCE_CLEANUP_LOCK() RTCritSectEnter(&g_CleanUpCritSect)
69#else
70# define RTONCE_CLEANUP_LOCK() RTSemFastMutexRequest(g_hCleanUpLock);
71#endif
72
73/** Unlocks the clean-up list. */
74#ifdef RTONCE_USE_CRITSECT_FOR_TERM
75# define RTONCE_CLEANUP_UNLOCK() RTCritSectLeave(&g_CleanUpCritSect);
76#else
77# define RTONCE_CLEANUP_UNLOCK() RTSemFastMutexRelease(g_hCleanUpLock);
78#endif
79
80
81
82/** @callback_method_impl{FNRTTERMCALLBACK} */
83static DECLCALLBACK(void) rtOnceTermCallback(RTTERMREASON enmReason, int32_t iStatus, void *pvUser)
84{
85 bool const fLazyCleanUpOk = RTTERMREASON_IS_LAZY_CLEANUP_OK(enmReason);
86 RTONCE_CLEANUP_LOCK(); /* Potentially dangerous. */
87
88 PRTONCE pCur, pPrev;
89 RTListForEachReverseSafe(&g_CleanUpList, pCur, pPrev, RTONCE, CleanUpNode)
90 {
91 /*
92 * Mostly reset it before doing the callback.
93 *
94 * Should probably introduce some new states here, but I'm not sure
95 * it's really worth it at this point.
96 */
97 PFNRTONCECLEANUP pfnCleanUp = pCur->pfnCleanUp;
98 void *pvUserCleanUp = pCur->pvUser;
99 pCur->pvUser = NULL;
100 pCur->pfnCleanUp = NULL;
101 ASMAtomicWriteS32(&pCur->rc, VERR_WRONG_ORDER);
102
103 pfnCleanUp(pvUserCleanUp, fLazyCleanUpOk);
104
105 /*
106 * Reset the reset of the state if we're being unloaded or smth.
107 */
108 if (!fLazyCleanUpOk)
109 {
110 ASMAtomicWriteS32(&pCur->rc, VERR_INTERNAL_ERROR);
111 ASMAtomicWriteS32(&pCur->iState, RTONCESTATE_UNINITIALIZED);
112 }
113 }
114
115 RTONCE_CLEANUP_UNLOCK();
116
117 /*
118 * Reset our own structure and the critsect / mutex.
119 */
120 if (!fLazyCleanUpOk)
121 {
122# ifdef RTONCE_USE_CRITSECT_FOR_TERM
123 RTCritSectDelete(&g_CleanUpCritSect);
124# else
125 RTSemFastMutexDestroy(g_hCleanUpLock);
126 g_hCleanUpLock = NIL_RTSEMFASTMUTEX;
127# endif
128
129 ASMAtomicWriteS32(&g_OnceCleanUp.rc, VERR_INTERNAL_ERROR);
130 ASMAtomicWriteS32(&g_OnceCleanUp.iState, RTONCESTATE_UNINITIALIZED);
131 }
132
133 NOREF(pvUser); NOREF(iStatus);
134}
135
136
137
138/**
139 * Initializes the globals (using RTOnce).
140 *
141 * @returns IPRT status code
142 * @param pvUser Unused.
143 */
144static DECLCALLBACK(int32_t) rtOnceInitCleanUp(void *pvUser)
145{
146 NOREF(pvUser);
147 RTListInit(&g_CleanUpList);
148# ifdef RTONCE_USE_CRITSECT_FOR_TERM
149 int rc = RTCritSectInit(&g_CleanUpCritSect);
150# else
151 int rc = RTSemFastMutexCreate(&g_hCleanUpLock);
152# endif
153 if (RT_SUCCESS(rc))
154 {
155 rc = RTTermRegisterCallback(rtOnceTermCallback, NULL);
156 if (RT_SUCCESS(rc))
157 return rc;
158
159# ifdef RTONCE_USE_CRITSECT_FOR_TERM
160 RTCritSectDelete(&g_CleanUpCritSect);
161# else
162 RTSemFastMutexDestroy(g_hCleanUpLock);
163 g_hCleanUpLock = NIL_RTSEMFASTMUTEX;
164# endif
165 }
166 return rc;
167}
168
169#endif /* !RTONCE_NO_TERM */
170
171/**
172 * The state loop of the other threads.
173 *
174 * @returns VINF_SUCCESS when everything went smoothly. IPRT status code if we
175 * encountered trouble.
176 * @param pOnce The execute once structure.
177 * @param phEvtM Where to store the semaphore handle so the caller
178 * can do the cleaning up for us.
179 */
180static int rtOnceOtherThread(PRTONCE pOnce, PRTSEMEVENTMULTI phEvtM)
181{
182 uint32_t cYields = 0;
183 for (;;)
184 {
185 int32_t iState = ASMAtomicReadS32(&pOnce->iState);
186 switch (iState)
187 {
188 /*
189 * No semaphore, try create one.
190 */
191 case RTONCESTATE_BUSY_NO_SEM:
192 if (ASMAtomicCmpXchgS32(&pOnce->iState, RTONCESTATE_BUSY_CREATING_SEM, RTONCESTATE_BUSY_NO_SEM))
193 {
194 int rc = RTSemEventMultiCreate(phEvtM);
195 if (RT_SUCCESS(rc))
196 {
197 ASMAtomicWriteHandle(&pOnce->hEventMulti, *phEvtM);
198 int32_t cRefs = ASMAtomicIncS32(&pOnce->cEventRefs); Assert(cRefs == 1); NOREF(cRefs);
199
200 if (!ASMAtomicCmpXchgS32(&pOnce->iState, RTONCESTATE_BUSY_HAVE_SEM, RTONCESTATE_BUSY_CREATING_SEM))
201 {
202 /* Too slow. */
203 AssertReturn(ASMAtomicCmpXchgS32(&pOnce->iState, RTONCESTATE_DONE, RTONCESTATE_DONE_CREATING_SEM)
204 , VERR_INTERNAL_ERROR_5);
205
206 ASMAtomicWriteHandle(&pOnce->hEventMulti, NIL_RTSEMEVENTMULTI);
207 cRefs = ASMAtomicDecS32(&pOnce->cEventRefs); Assert(cRefs == 0);
208
209 RTSemEventMultiDestroy(*phEvtM);
210 *phEvtM = NIL_RTSEMEVENTMULTI;
211 }
212 }
213 else
214 {
215 AssertReturn( ASMAtomicCmpXchgS32(&pOnce->iState, RTONCESTATE_BUSY_SPIN, RTONCESTATE_BUSY_CREATING_SEM)
216 || ASMAtomicCmpXchgS32(&pOnce->iState, RTONCESTATE_DONE, RTONCESTATE_DONE_CREATING_SEM)
217 , VERR_INTERNAL_ERROR_4);
218 *phEvtM = NIL_RTSEMEVENTMULTI;
219 }
220 }
221 break;
222
223 /*
224 * This isn't nice, but it's the easy way out.
225 */
226 case RTONCESTATE_BUSY_CREATING_SEM:
227 case RTONCESTATE_BUSY_SPIN:
228 cYields++;
229 if (!(++cYields % 8))
230 RTThreadSleep(1);
231 else
232 RTThreadYield();
233 break;
234
235 /*
236 * There is a semaphore, try wait on it.
237 *
238 * We continue waiting after reaching DONE_HAVE_SEM if we
239 * already got the semaphore to avoid racing the first thread.
240 */
241 case RTONCESTATE_DONE_HAVE_SEM:
242 if (*phEvtM == NIL_RTSEMEVENTMULTI)
243 return VINF_SUCCESS;
244 RT_FALL_THRU();
245 case RTONCESTATE_BUSY_HAVE_SEM:
246 {
247 /*
248 * Grab the semaphore if we haven't got it yet.
249 * We must take care not to increment the counter if it
250 * is 0. This may happen if we're racing a state change.
251 */
252 if (*phEvtM == NIL_RTSEMEVENTMULTI)
253 {
254 int32_t cEventRefs = ASMAtomicUoReadS32(&pOnce->cEventRefs);
255 while ( cEventRefs > 0
256 && ASMAtomicUoReadS32(&pOnce->iState) == RTONCESTATE_BUSY_HAVE_SEM)
257 {
258 if (ASMAtomicCmpXchgExS32(&pOnce->cEventRefs, cEventRefs + 1, cEventRefs, &cEventRefs))
259 break;
260 ASMNopPause();
261 }
262 if (cEventRefs <= 0)
263 break;
264
265 ASMAtomicReadHandle(&pOnce->hEventMulti, phEvtM);
266 AssertReturn(*phEvtM != NIL_RTSEMEVENTMULTI, VERR_INTERNAL_ERROR_2);
267 }
268
269 /*
270 * We've got a sempahore, do the actual waiting.
271 */
272 do
273 RTSemEventMultiWaitNoResume(*phEvtM, RT_INDEFINITE_WAIT);
274 while (ASMAtomicReadS32(&pOnce->iState) == RTONCESTATE_BUSY_HAVE_SEM);
275 break;
276 }
277
278 case RTONCESTATE_DONE_CREATING_SEM:
279 case RTONCESTATE_DONE:
280 return VINF_SUCCESS;
281
282 default:
283 AssertMsgFailedReturn(("%d\n", iState), VERR_INTERNAL_ERROR_3);
284 }
285 }
286}
287
288
289RTDECL(int) RTOnceSlow(PRTONCE pOnce, PFNRTONCE pfnOnce, PFNRTONCECLEANUP pfnCleanUp, void *pvUser)
290{
291 /*
292 * Validate input (strict builds only).
293 */
294 AssertPtr(pOnce);
295 AssertPtr(pfnOnce);
296
297 /*
298 * Deal with the 'initialized' case first
299 */
300 int32_t iState = ASMAtomicUoReadS32(&pOnce->iState);
301 if (RT_LIKELY( iState == RTONCESTATE_DONE
302 || iState == RTONCESTATE_DONE_CREATING_SEM
303 || iState == RTONCESTATE_DONE_HAVE_SEM
304 ))
305 return ASMAtomicUoReadS32(&pOnce->rc);
306
307 AssertReturn( iState == RTONCESTATE_UNINITIALIZED
308 || iState == RTONCESTATE_BUSY_NO_SEM
309 || iState == RTONCESTATE_BUSY_SPIN
310 || iState == RTONCESTATE_BUSY_CREATING_SEM
311 || iState == RTONCESTATE_BUSY_HAVE_SEM
312 , VERR_INTERNAL_ERROR);
313
314#ifdef RTONCE_NO_TERM
315 AssertReturn(!pfnCleanUp, VERR_NOT_SUPPORTED);
316#else /* !RTONCE_NO_TERM */
317
318 /*
319 * Make sure our clean-up bits are working if needed later.
320 */
321 if (pfnCleanUp)
322 {
323 int rc = RTOnce(&g_OnceCleanUp, rtOnceInitCleanUp, NULL);
324 if (RT_FAILURE(rc))
325 return rc;
326 }
327#endif /* !RTONCE_NO_TERM */
328
329 /*
330 * Do we initialize it?
331 */
332 int32_t rcOnce;
333 if ( iState == RTONCESTATE_UNINITIALIZED
334 && ASMAtomicCmpXchgS32(&pOnce->iState, RTONCESTATE_BUSY_NO_SEM, RTONCESTATE_UNINITIALIZED))
335 {
336 /*
337 * Yes, so do the execute once stuff.
338 */
339 rcOnce = pfnOnce(pvUser);
340 ASMAtomicWriteS32(&pOnce->rc, rcOnce);
341
342#ifndef RTONCE_NO_TERM
343 /*
344 * Register clean-up if requested and we were successful.
345 */
346 if (pfnCleanUp && RT_SUCCESS(rcOnce))
347 {
348 RTONCE_CLEANUP_LOCK();
349
350 pOnce->pfnCleanUp = pfnCleanUp;
351 pOnce->pvUser = pvUser;
352 RTListAppend(&g_CleanUpList, &pOnce->CleanUpNode);
353
354 RTONCE_CLEANUP_UNLOCK();
355 }
356#endif /* !RTONCE_NO_TERM */
357
358 /*
359 * If there is a sempahore to signal, we're in for some extra work here.
360 */
361 if ( !ASMAtomicCmpXchgS32(&pOnce->iState, RTONCESTATE_DONE, RTONCESTATE_BUSY_NO_SEM)
362 && !ASMAtomicCmpXchgS32(&pOnce->iState, RTONCESTATE_DONE, RTONCESTATE_BUSY_SPIN)
363 && !ASMAtomicCmpXchgS32(&pOnce->iState, RTONCESTATE_DONE_CREATING_SEM, RTONCESTATE_BUSY_CREATING_SEM)
364 )
365 {
366 /* Grab the sempahore by switching to 'DONE_HAVE_SEM' before reaching 'DONE'. */
367 AssertReturn(ASMAtomicCmpXchgS32(&pOnce->iState, RTONCESTATE_DONE_HAVE_SEM, RTONCESTATE_BUSY_HAVE_SEM),
368 VERR_INTERNAL_ERROR_2);
369
370 int32_t cRefs = ASMAtomicIncS32(&pOnce->cEventRefs);
371 Assert(cRefs > 1); NOREF(cRefs);
372
373 RTSEMEVENTMULTI hEvtM;
374 ASMAtomicReadHandle(&pOnce->hEventMulti, &hEvtM);
375 Assert(hEvtM != NIL_RTSEMEVENTMULTI);
376
377 ASMAtomicWriteS32(&pOnce->iState, RTONCESTATE_DONE);
378
379 /* Signal it and return. */
380 RTSemEventMultiSignal(hEvtM);
381 }
382 }
383 else
384 {
385 /*
386 * Wait for the first thread to complete. Delegate this to a helper
387 * function to simplify cleanup and keep things a bit shorter.
388 */
389 RTSEMEVENTMULTI hEvtM = NIL_RTSEMEVENTMULTI;
390 rcOnce = rtOnceOtherThread(pOnce, &hEvtM);
391 if (hEvtM != NIL_RTSEMEVENTMULTI)
392 {
393 if (ASMAtomicDecS32(&pOnce->cEventRefs) == 0)
394 {
395 bool fRc;
396 ASMAtomicCmpXchgHandle(&pOnce->hEventMulti, NIL_RTSEMEVENTMULTI, hEvtM, fRc); Assert(fRc);
397 fRc = ASMAtomicCmpXchgS32(&pOnce->iState, RTONCESTATE_DONE, RTONCESTATE_DONE_HAVE_SEM); Assert(fRc);
398 RTSemEventMultiDestroy(hEvtM);
399 }
400 }
401 if (RT_SUCCESS(rcOnce))
402 rcOnce = ASMAtomicUoReadS32(&pOnce->rc);
403 }
404
405 return rcOnce;
406}
407RT_EXPORT_SYMBOL(RTOnceSlow);
408
409
410RTDECL(void) RTOnceReset(PRTONCE pOnce)
411{
412 /* Cannot be done while busy! */
413 AssertPtr(pOnce);
414 Assert(pOnce->hEventMulti == NIL_RTSEMEVENTMULTI);
415 int32_t iState = ASMAtomicUoReadS32(&pOnce->iState);
416 AssertMsg( iState == RTONCESTATE_DONE
417 || iState == RTONCESTATE_UNINITIALIZED,
418 ("%d\n", iState));
419 NOREF(iState);
420
421#ifndef RTONCE_NO_TERM
422 /* Unregister clean-up. */
423 if (pOnce->pfnCleanUp)
424 {
425 RTONCE_CLEANUP_LOCK();
426
427 RTListNodeRemove(&pOnce->CleanUpNode);
428 pOnce->pfnCleanUp = NULL;
429 pOnce->pvUser = NULL;
430
431 RTONCE_CLEANUP_UNLOCK();
432 }
433#endif /* !RTONCE_NO_TERM */
434
435 /* Do the same as RTONCE_INITIALIZER does. */
436 ASMAtomicWriteS32(&pOnce->rc, VERR_INTERNAL_ERROR);
437 ASMAtomicWriteS32(&pOnce->iState, RTONCESTATE_UNINITIALIZED);
438}
439RT_EXPORT_SYMBOL(RTOnceReset);
440
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette