VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/generic/semspinmutex-r0drv-generic.c@ 98103

Last change on this file since 98103 was 98103, checked in by vboxsync, 2 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 16.4 KB
Line 
1/* $Id: semspinmutex-r0drv-generic.c 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * IPRT - Spinning Mutex Semaphores, Ring-0 Driver, Generic.
4 */
5
6/*
7 * Copyright (C) 2009-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#ifdef RT_OS_WINDOWS
42# include "../nt/the-nt-kernel.h"
43#endif
44#include "internal/iprt.h"
45
46#include <iprt/semaphore.h>
47#include <iprt/asm.h>
48#include <iprt/asm-amd64-x86.h>
49#include <iprt/assert.h>
50#include <iprt/err.h>
51#include <iprt/mem.h>
52#include <iprt/thread.h>
53#include "internal/magics.h"
54
55
56/*********************************************************************************************************************************
57* Structures and Typedefs *
58*********************************************************************************************************************************/
59/**
60 * Saved state information.
61 */
62typedef struct RTSEMSPINMUTEXSTATE
63{
64 /** Saved flags register. */
65 RTCCUINTREG fSavedFlags;
66 /** Preemption state. */
67 RTTHREADPREEMPTSTATE PreemptState;
68 /** Whether to spin or sleep. */
69 bool fSpin;
70 /** Whether the flags have been saved. */
71 bool fValidFlags;
72} RTSEMSPINMUTEXSTATE;
73
74/**
75 * Spinning mutex semaphore.
76 */
77typedef struct RTSEMSPINMUTEXINTERNAL
78{
79 /** Magic value (RTSEMSPINMUTEX_MAGIC)
80 * RTCRITSECT_MAGIC is the value of an initialized & operational section. */
81 uint32_t volatile u32Magic;
82 /** Flags. This is a combination of RTSEMSPINMUTEX_FLAGS_XXX and
83 * RTSEMSPINMUTEX_INT_FLAGS_XXX. */
84 uint32_t volatile fFlags;
85 /** The owner thread.
86 * This is NIL if the semaphore is not owned by anyone. */
87 RTNATIVETHREAD volatile hOwner;
88 /** Number of threads that are fighting for the lock. */
89 int32_t volatile cLockers;
90 /** The semaphore to block on. */
91 RTSEMEVENT hEventSem;
92 /** Saved state information of the owner.
93 * This will be restored by RTSemSpinRelease. */
94 RTSEMSPINMUTEXSTATE SavedState;
95} RTSEMSPINMUTEXINTERNAL;
96
97
98/*********************************************************************************************************************************
99* Defined Constants And Macros *
100*********************************************************************************************************************************/
101/*#define RTSEMSPINMUTEX_INT_FLAGS_MUST*/
102
103/** Validates the handle, returning if invalid. */
104#define RTSEMSPINMUTEX_VALIDATE_RETURN(pThis) \
105 do \
106 { \
107 uint32_t u32Magic; \
108 AssertPtr(pThis); \
109 u32Magic = (pThis)->u32Magic; \
110 if (u32Magic != RTSEMSPINMUTEX_MAGIC) \
111 { \
112 AssertMsgFailed(("u32Magic=%#x pThis=%p\n", u32Magic, pThis)); \
113 return u32Magic == RTSEMSPINMUTEX_MAGIC_DEAD ? VERR_SEM_DESTROYED : VERR_INVALID_HANDLE; \
114 } \
115 } while (0)
116
117
118RTDECL(int) RTSemSpinMutexCreate(PRTSEMSPINMUTEX phSpinMtx, uint32_t fFlags)
119{
120 RTSEMSPINMUTEXINTERNAL *pThis;
121 int rc;
122
123 AssertReturn(!(fFlags & ~RTSEMSPINMUTEX_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
124 AssertPtr(phSpinMtx);
125
126 /*
127 * Allocate and initialize the structure.
128 */
129 pThis = (RTSEMSPINMUTEXINTERNAL *)RTMemAllocZ(sizeof(*pThis));
130 if (!pThis)
131 return VERR_NO_MEMORY;
132 pThis->u32Magic = RTSEMSPINMUTEX_MAGIC;
133 pThis->fFlags = fFlags;
134 pThis->hOwner = NIL_RTNATIVETHREAD;
135 pThis->cLockers = 0;
136 rc = RTSemEventCreateEx(&pThis->hEventSem, RTSEMEVENT_FLAGS_NO_LOCK_VAL, NIL_RTLOCKVALCLASS, NULL);
137 if (RT_SUCCESS(rc))
138 {
139 *phSpinMtx = pThis;
140 return VINF_SUCCESS;
141 }
142
143 RTMemFree(pThis);
144 return rc;
145}
146RT_EXPORT_SYMBOL(RTSemSpinMutexCreate);
147
148
149/**
150 * Helper for RTSemSpinMutexTryRequest and RTSemSpinMutexRequest.
151 *
152 * This will check the current context and see if it's usui
153 *
154 * @returns VINF_SUCCESS or VERR_SEM_BAD_CONTEXT.
155 * @param pState Output structure.
156 */
157static int rtSemSpinMutexEnter(RTSEMSPINMUTEXSTATE *pState, RTSEMSPINMUTEXINTERNAL *pThis)
158{
159#ifndef RT_OS_WINDOWS
160 RTTHREADPREEMPTSTATE const StateInit = RTTHREADPREEMPTSTATE_INITIALIZER;
161#endif
162 int rc = VINF_SUCCESS;
163
164 /** @todo Later #1: When entering in interrupt context and we're not able to
165 * wake up threads from it, we could try switch the lock into pure
166 * spinlock mode. This would require that there are no other threads
167 * currently waiting on it and that the RTSEMSPINMUTEX_FLAGS_IRQ_SAFE
168 * flag is set.
169 *
170 * Later #2: Similarly, it is possible to turn on the
171 * RTSEMSPINMUTEX_FLAGS_IRQ_SAFE at run time if we manage to grab the
172 * semaphore ownership at interrupt time. We might want to try delay the
173 * RTSEMSPINMUTEX_FLAGS_IRQ_SAFE even, since we're fine if we get it...
174 */
175
176#ifdef RT_OS_WINDOWS
177 /*
178 * NT: IRQL <= DISPATCH_LEVEL for waking up threads; IRQL < DISPATCH_LEVEL for sleeping.
179 */
180 pState->PreemptState.uchOldIrql = KeGetCurrentIrql();
181 if (pState->PreemptState.uchOldIrql > DISPATCH_LEVEL)
182 return VERR_SEM_BAD_CONTEXT;
183
184 if (pState->PreemptState.uchOldIrql >= DISPATCH_LEVEL)
185 pState->fSpin = true;
186 else
187 {
188 pState->fSpin = false;
189 KeRaiseIrql(DISPATCH_LEVEL, &pState->PreemptState.uchOldIrql);
190 Assert(pState->PreemptState.uchOldIrql < DISPATCH_LEVEL);
191 }
192
193#elif defined(RT_OS_SOLARIS)
194 /*
195 * Solaris: RTSemEventSignal will do bad stuff on S10 if interrupts are disabled.
196 */
197 if (!ASMIntAreEnabled())
198 return VERR_SEM_BAD_CONTEXT;
199
200 pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
201 if (RTThreadIsInInterrupt(NIL_RTTHREAD))
202 {
203 if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE))
204 rc = VINF_SEM_BAD_CONTEXT; /* Try, but owner might be interrupted. */
205 pState->fSpin = true;
206 }
207 pState->PreemptState = StateInit;
208 RTThreadPreemptDisable(&pState->PreemptState);
209
210#elif defined(RT_OS_LINUX) || defined(RT_OS_OS2)
211 /*
212 * OSes on which RTSemEventSignal can be called from any context.
213 */
214 pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
215 if (RTThreadIsInInterrupt(NIL_RTTHREAD))
216 {
217 if (!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE))
218 rc = VINF_SEM_BAD_CONTEXT; /* Try, but owner might be interrupted. */
219 pState->fSpin = true;
220 }
221 pState->PreemptState = StateInit;
222 RTThreadPreemptDisable(&pState->PreemptState);
223
224#else /* PORTME: Check for context where we cannot wake up threads. */
225 /*
226 * Default: ASSUME thread can be woken up if interrupts are enabled and
227 * we're not in an interrupt context.
228 * ASSUME that we can go to sleep if preemption is enabled.
229 */
230 if ( RTThreadIsInInterrupt(NIL_RTTHREAD)
231 || !ASMIntAreEnabled())
232 return VERR_SEM_BAD_CONTEXT;
233
234 pState->fSpin = !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
235 pState->PreemptState = StateInit;
236 RTThreadPreemptDisable(&pState->PreemptState);
237#endif
238
239 /*
240 * Disable interrupts if necessary.
241 */
242 pState->fValidFlags = !!(pThis->fFlags & RTSEMSPINMUTEX_FLAGS_IRQ_SAFE);
243 if (pState->fValidFlags)
244 pState->fSavedFlags = ASMIntDisableFlags();
245 else
246 pState->fSavedFlags = 0;
247
248 return rc;
249}
250
251
252/**
253 * Helper for RTSemSpinMutexTryRequest, RTSemSpinMutexRequest and
254 * RTSemSpinMutexRelease.
255 *
256 * @param pState
257 */
258DECL_FORCE_INLINE(void) rtSemSpinMutexLeave(RTSEMSPINMUTEXSTATE *pState)
259{
260 /*
261 * Restore the interrupt flag.
262 */
263 if (pState->fValidFlags)
264 ASMSetFlags(pState->fSavedFlags);
265
266#ifdef RT_OS_WINDOWS
267 /*
268 * NT: Lower the IRQL if we raised it.
269 */
270 if (pState->PreemptState.uchOldIrql < DISPATCH_LEVEL)
271 KeLowerIrql(pState->PreemptState.uchOldIrql);
272#else
273 /*
274 * Default: Restore preemption.
275 */
276 RTThreadPreemptRestore(&pState->PreemptState);
277#endif
278}
279
280
281RTDECL(int) RTSemSpinMutexTryRequest(RTSEMSPINMUTEX hSpinMtx)
282{
283 RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
284 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
285 RTSEMSPINMUTEXSTATE State;
286 bool fRc;
287 int rc;
288
289 Assert(hSelf != NIL_RTNATIVETHREAD);
290 RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
291
292 /*
293 * Check context, disable preemption and save flags if necessary.
294 */
295 rc = rtSemSpinMutexEnter(&State, pThis);
296 if (RT_FAILURE(rc))
297 return rc;
298
299 /*
300 * Try take the ownership.
301 */
302 ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
303 if (!fRc)
304 {
305 /* Busy, too bad. Check for attempts at nested access. */
306 rc = VERR_SEM_BUSY;
307 if (RT_UNLIKELY(pThis->hOwner == hSelf))
308 {
309 AssertMsgFailed(("%p attempt at nested access\n"));
310 rc = VERR_SEM_NESTED;
311 }
312
313 rtSemSpinMutexLeave(&State);
314 return rc;
315 }
316
317 /*
318 * We're the semaphore owner.
319 */
320 ASMAtomicIncS32(&pThis->cLockers);
321 pThis->SavedState = State;
322 return VINF_SUCCESS;
323}
324RT_EXPORT_SYMBOL(RTSemSpinMutexTryRequest);
325
326
327RTDECL(int) RTSemSpinMutexRequest(RTSEMSPINMUTEX hSpinMtx)
328{
329 RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
330 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
331 RTSEMSPINMUTEXSTATE State;
332 bool fRc;
333 int rc;
334
335 Assert(hSelf != NIL_RTNATIVETHREAD);
336 RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
337
338 /*
339 * Check context, disable preemption and save flags if necessary.
340 */
341 rc = rtSemSpinMutexEnter(&State, pThis);
342 if (RT_FAILURE(rc))
343 return rc;
344
345 /*
346 * Try take the ownership.
347 */
348 ASMAtomicIncS32(&pThis->cLockers);
349 ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
350 if (!fRc)
351 {
352 uint32_t cSpins;
353
354 /*
355 * It's busy. Check if it's an attempt at nested access.
356 */
357 if (RT_UNLIKELY(pThis->hOwner == hSelf))
358 {
359 AssertMsgFailed(("%p attempt at nested access\n"));
360 rtSemSpinMutexLeave(&State);
361 return VERR_SEM_NESTED;
362 }
363
364 /*
365 * Return if we're in interrupt context and the semaphore isn't
366 * configure to be interrupt safe.
367 */
368 if (rc == VINF_SEM_BAD_CONTEXT)
369 {
370 rtSemSpinMutexLeave(&State);
371 return VERR_SEM_BAD_CONTEXT;
372 }
373
374 /*
375 * Ok, we have to wait.
376 */
377 if (State.fSpin)
378 {
379 for (cSpins = 0; ; cSpins++)
380 {
381 ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
382 if (fRc)
383 break;
384 ASMNopPause();
385 if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC))
386 {
387 rtSemSpinMutexLeave(&State);
388 return VERR_SEM_DESTROYED;
389 }
390
391 /*
392 * "Yield" once in a while. This may lower our IRQL/PIL which
393 * may preempting us, and it will certainly stop the hammering
394 * of hOwner for a little while.
395 */
396 if ((cSpins & 0x7f) == 0x1f)
397 {
398 rtSemSpinMutexLeave(&State);
399 rtSemSpinMutexEnter(&State, pThis);
400 Assert(State.fSpin);
401 }
402 }
403 }
404 else
405 {
406 for (cSpins = 0;; cSpins++)
407 {
408 ASMAtomicCmpXchgHandle(&pThis->hOwner, hSelf, NIL_RTNATIVETHREAD, fRc);
409 if (fRc)
410 break;
411 ASMNopPause();
412 if (RT_UNLIKELY(pThis->u32Magic != RTSEMSPINMUTEX_MAGIC))
413 {
414 rtSemSpinMutexLeave(&State);
415 return VERR_SEM_DESTROYED;
416 }
417
418 if ((cSpins & 15) == 15) /* spin a bit before going sleep (again). */
419 {
420 rtSemSpinMutexLeave(&State);
421
422 rc = RTSemEventWait(pThis->hEventSem, RT_INDEFINITE_WAIT);
423 ASMCompilerBarrier();
424 if (RT_SUCCESS(rc))
425 AssertReturn(pThis->u32Magic == RTSEMSPINMUTEX_MAGIC, VERR_SEM_DESTROYED);
426 else if (rc == VERR_INTERRUPTED)
427 AssertRC(rc); /* shouldn't happen */
428 else
429 {
430 AssertRC(rc);
431 return rc;
432 }
433
434 rc = rtSemSpinMutexEnter(&State, pThis);
435 AssertRCReturn(rc, rc);
436 Assert(!State.fSpin);
437 }
438 }
439 }
440 }
441
442 /*
443 * We're the semaphore owner.
444 */
445 pThis->SavedState = State;
446 Assert(pThis->hOwner == hSelf);
447 return VINF_SUCCESS;
448}
449RT_EXPORT_SYMBOL(RTSemSpinMutexRequest);
450
451
452RTDECL(int) RTSemSpinMutexRelease(RTSEMSPINMUTEX hSpinMtx)
453{
454 RTSEMSPINMUTEXINTERNAL *pThis = hSpinMtx;
455 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
456 uint32_t cLockers;
457 RTSEMSPINMUTEXSTATE State;
458 bool fRc;
459
460 Assert(hSelf != NIL_RTNATIVETHREAD);
461 RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
462
463 /*
464 * Get the saved state and try release the semaphore.
465 */
466 State = pThis->SavedState;
467 ASMCompilerBarrier();
468 ASMAtomicCmpXchgHandle(&pThis->hOwner, NIL_RTNATIVETHREAD, hSelf, fRc);
469 AssertMsgReturn(fRc,
470 ("hOwner=%p hSelf=%p cLockers=%d\n", pThis->hOwner, hSelf, pThis->cLockers),
471 VERR_NOT_OWNER);
472
473 cLockers = ASMAtomicDecS32(&pThis->cLockers);
474 rtSemSpinMutexLeave(&State);
475 if (cLockers > 0)
476 {
477 int rc = RTSemEventSignal(pThis->hEventSem);
478 AssertReleaseMsg(RT_SUCCESS(rc), ("RTSemEventSignal -> %Rrc\n", rc));
479 }
480 return VINF_SUCCESS;
481}
482RT_EXPORT_SYMBOL(RTSemSpinMutexRelease);
483
484
485RTDECL(int) RTSemSpinMutexDestroy(RTSEMSPINMUTEX hSpinMtx)
486{
487 RTSEMSPINMUTEXINTERNAL *pThis;
488 RTSEMEVENT hEventSem;
489 int rc;
490
491 if (hSpinMtx == NIL_RTSEMSPINMUTEX)
492 return VINF_SUCCESS;
493 pThis = hSpinMtx;
494 RTSEMSPINMUTEX_VALIDATE_RETURN(pThis);
495
496 /* No destruction races allowed! */
497 AssertMsg( pThis->cLockers == 0
498 && pThis->hOwner == NIL_RTNATIVETHREAD,
499 ("pThis=%p cLockers=%d hOwner=%p\n", pThis, pThis->cLockers, pThis->hOwner));
500
501 /*
502 * Invalidate the structure, free the mutex and free the structure.
503 */
504 ASMAtomicWriteU32(&pThis->u32Magic, RTSEMSPINMUTEX_MAGIC_DEAD);
505 hEventSem = pThis->hEventSem;
506 pThis->hEventSem = NIL_RTSEMEVENT;
507 rc = RTSemEventDestroy(hEventSem); AssertRC(rc);
508
509 RTMemFree(pThis);
510 return rc;
511}
512RT_EXPORT_SYMBOL(RTSemSpinMutexDestroy);
513
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette