VirtualBox

source: vbox/trunk/src/VBox/Additions/haiku/SharedFolders/lock.h@ 60906

Last change on this file since 60906 was 48940, checked in by vboxsync, 11 years ago

Additions/haiku: Whitespace and svn:keyword cleanups by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 9.2 KB
Line 
1/* $Id: lock.h 48940 2013-10-07 21:26:19Z vboxsync $ */
2/** @file
3 * Lock.h - Haiku, private locking internals.
4 */
5
6/*
7 * Copyright (C) 2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * This code is based on:
20 *
21 * VirtualBox Guest Additions for Haiku.
22 *
23 * Copyright 2008-2010, Ingo Weinhold, [email protected].
24 * Copyright 2002-2009, Axel Dörfler, [email protected].
25 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
26 * Distributed under the terms of the MIT License.
27 */
28
29#ifndef _KERNEL_LOCK_H
30#define _KERNEL_LOCK_H
31
32#include <OS.h>
33
34
35struct mutex_waiter;
36
37typedef struct mutex {
38 const char* name;
39 struct mutex_waiter* waiters;
40#if KDEBUG
41 thread_id holder;
42#else
43 int32 count;
44 uint16 ignore_unlock_count;
45#endif
46 uint8 flags;
47} mutex;
48
49#define MUTEX_FLAG_CLONE_NAME 0x1
50
51
52typedef struct recursive_lock {
53 mutex lock;
54#if !KDEBUG
55 thread_id holder;
56#endif
57 int recursion;
58} recursive_lock;
59
60
61struct rw_lock_waiter;
62
63typedef struct rw_lock {
64 const char* name;
65 struct rw_lock_waiter* waiters;
66 thread_id holder;
67 vint32 count;
68 int32 owner_count;
69 int16 active_readers;
70 // Only > 0 while a writer is waiting: number
71 // of active readers when the first waiting
72 // writer started waiting.
73 int16 pending_readers;
74 // Number of readers that have already
75 // incremented "count", but have not yet started
76 // to wait at the time the last writer unlocked.
77 uint32 flags;
78} rw_lock;
79
80#define RW_LOCK_WRITER_COUNT_BASE 0x10000
81
82#define RW_LOCK_FLAG_CLONE_NAME 0x1
83
84
85#if KDEBUG
86# define KDEBUG_RW_LOCK_DEBUG 0
87 // Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK().
88 // The rw_lock will just behave like a recursive locker then.
89# define ASSERT_LOCKED_RECURSIVE(r) \
90 { ASSERT(find_thread(NULL) == (r)->lock.holder); }
91# define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); }
92# define ASSERT_WRITE_LOCKED_RW_LOCK(l) \
93 { ASSERT(find_thread(NULL) == (l)->holder); }
94# if KDEBUG_RW_LOCK_DEBUG
95# define ASSERT_READ_LOCKED_RW_LOCK(l) \
96 { ASSERT(find_thread(NULL) == (l)->holder); }
97# else
98# define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
99# endif
100#else
101# define ASSERT_LOCKED_RECURSIVE(r) do {} while (false)
102# define ASSERT_LOCKED_MUTEX(m) do {} while (false)
103# define ASSERT_WRITE_LOCKED_RW_LOCK(m) do {} while (false)
104# define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
105#endif
106
107
108// static initializers
109#if KDEBUG
110# define MUTEX_INITIALIZER(name) { name, NULL, -1, 0 }
111# define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 }
112#else
113# define MUTEX_INITIALIZER(name) { name, NULL, 0, 0, 0 }
114# define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 }
115#endif
116
117#define RW_LOCK_INITIALIZER(name) { name, NULL, -1, 0, 0, 0 }
118
119
120#if KDEBUG
121# define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->lock.holder)
122#else
123# define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->holder)
124#endif
125
126
127#ifdef __cplusplus
128extern "C" {
129#endif
130
131extern void recursive_lock_init(recursive_lock *lock, const char *name);
132 // name is *not* cloned nor freed in recursive_lock_destroy()
133extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
134 uint32 flags);
135extern void recursive_lock_destroy(recursive_lock *lock);
136extern status_t recursive_lock_lock(recursive_lock *lock);
137extern status_t recursive_lock_trylock(recursive_lock *lock);
138extern void recursive_lock_unlock(recursive_lock *lock);
139extern int32 recursive_lock_get_recursion(recursive_lock *lock);
140
141extern void rw_lock_init(rw_lock* lock, const char* name);
142 // name is *not* cloned nor freed in rw_lock_destroy()
143extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
144extern void rw_lock_destroy(rw_lock* lock);
145extern status_t rw_lock_write_lock(rw_lock* lock);
146
147extern void mutex_init(mutex* lock, const char* name);
148 // name is *not* cloned nor freed in mutex_destroy()
149extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
150extern void mutex_destroy(mutex* lock);
151extern status_t mutex_switch_lock(mutex* from, mutex* to);
152 // Unlocks "from" and locks "to" such that unlocking and starting to wait
153 // for the lock is atomically. I.e. if "from" guards the object "to" belongs
154 // to, the operation is safe as long as "from" is held while destroying
155 // "to".
156extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to);
157 // Like mutex_switch_lock(), just for a switching from a read-locked
158 // rw_lock.
159
160
161// implementation private:
162
163extern status_t _rw_lock_read_lock(rw_lock* lock);
164extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock,
165 uint32 timeoutFlags, bigtime_t timeout);
166extern void _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked);
167extern void _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked);
168
169extern status_t _mutex_lock(mutex* lock, bool threadsLocked);
170extern void _mutex_unlock(mutex* lock, bool threadsLocked);
171extern status_t _mutex_trylock(mutex* lock);
172extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags,
173 bigtime_t timeout);
174
175
176static inline status_t
177rw_lock_read_lock(rw_lock* lock)
178{
179#if KDEBUG_RW_LOCK_DEBUG
180 return rw_lock_write_lock(lock);
181#else
182 int32 oldCount = atomic_add(&lock->count, 1);
183 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
184 return _rw_lock_read_lock(lock);
185 return B_OK;
186#endif
187}
188
189
190static inline status_t
191rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags,
192 bigtime_t timeout)
193{
194#if KDEBUG_RW_LOCK_DEBUG
195 return mutex_lock_with_timeout(lock, timeoutFlags, timeout);
196#else
197 int32 oldCount = atomic_add(&lock->count, 1);
198 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
199 return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout);
200 return B_OK;
201#endif
202}
203
204
205static inline void
206rw_lock_read_unlock(rw_lock* lock)
207{
208#if KDEBUG_RW_LOCK_DEBUG
209 rw_lock_write_unlock(lock);
210#else
211 int32 oldCount = atomic_add(&lock->count, -1);
212 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE)
213 _rw_lock_read_unlock(lock, false);
214#endif
215}
216
217
218static inline void
219rw_lock_write_unlock(rw_lock* lock)
220{
221 _rw_lock_write_unlock(lock, false);
222}
223
224
225static inline status_t
226mutex_lock(mutex* lock)
227{
228#if KDEBUG
229 return _mutex_lock(lock, false);
230#else
231 if (atomic_add(&lock->count, -1) < 0)
232 return _mutex_lock(lock, false);
233 return B_OK;
234#endif
235}
236
237
238static inline status_t
239mutex_lock_threads_locked(mutex* lock)
240{
241#if KDEBUG
242 return _mutex_lock(lock, true);
243#else
244 if (atomic_add(&lock->count, -1) < 0)
245 return _mutex_lock(lock, true);
246 return B_OK;
247#endif
248}
249
250
251static inline status_t
252mutex_trylock(mutex* lock)
253{
254#if KDEBUG
255 return _mutex_trylock(lock);
256#else
257 if (atomic_test_and_set(&lock->count, -1, 0) != 0)
258 return B_WOULD_BLOCK;
259 return B_OK;
260#endif
261}
262
263
264static inline status_t
265mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, bigtime_t timeout)
266{
267#if KDEBUG
268 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
269#else
270 if (atomic_add(&lock->count, -1) < 0)
271 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout);
272 return B_OK;
273#endif
274}
275
276
277static inline void
278mutex_unlock(mutex* lock)
279{
280#if !KDEBUG
281 if (atomic_add(&lock->count, 1) < -1)
282#endif
283 _mutex_unlock(lock, false);
284}
285
286
287static inline void
288mutex_transfer_lock(mutex* lock, thread_id thread)
289{
290#if KDEBUG
291 lock->holder = thread;
292#endif
293}
294
295
296extern void lock_debug_init();
297
298#ifdef __cplusplus
299}
300#endif
301
302#endif /* _KERNEL_LOCK_H */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette