- Timestamp:
- Oct 7, 2013 9:26:19 PM (11 years ago)
- Location:
- trunk/src/VBox/Additions/haiku
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/haiku/SharedFolders/OpenHashTable.h
r43364 r48940 267 267 268 268 if (returnElements) 269 269 { 270 270 ValueType** nextPointer = &result; 271 271 … … 348 348 349 349 class Iterator 350 350 { 351 351 public: 352 352 Iterator(const HashTable* table) -
trunk/src/VBox/Additions/haiku/SharedFolders/kernel_cpp.h
r43363 r48940 48 48 operator new(size_t size) throw (std::bad_alloc) 49 49 { 50 51 52 53 50 // we don't actually throw any exceptions, but we have to 51 // keep the prototype as specified in <new>, or else GCC 3 52 // won't like us 53 return malloc(size); 54 54 } 55 55 … … 58 58 operator new[](size_t size) throw (std::bad_alloc) 59 59 { 60 60 return malloc(size); 61 61 } 62 62 … … 65 65 operator new(size_t size, const std::nothrow_t &) throw () 66 66 { 67 67 return malloc(size); 68 68 } 69 69 … … 72 72 operator new[](size_t size, const std::nothrow_t &) throw () 73 73 { 74 74 return malloc(size); 75 75 } 76 76 … … 79 79 operator new(size_t size, const mynothrow_t &) throw () 80 80 { 81 81 return malloc(size); 82 82 } 83 83 … … 86 86 operator new[](size_t size, const mynothrow_t &) throw () 87 87 { 88 88 return malloc(size); 89 89 } 90 90 … … 93 93 operator delete(void *ptr) throw () 94 94 { 95 95 free(ptr); 96 96 } 97 97 … … 100 100 operator delete[](void *ptr) throw () 101 101 { 102 102 free(ptr); 103 103 } 104 104 105 #endif 105 #endif // #if _KERNEL_MODE 106 106 107 #endif 107 #endif // __cplusplus 108 108 109 #endif 109 #endif /* KERNEL_CPP_H */ -
trunk/src/VBox/Additions/haiku/SharedFolders/lock.h
r43363 r48940 36 36 37 37 typedef struct mutex { 38 const char*name;39 struct mutex_waiter*waiters;40 #if KDEBUG 41 thread_idholder;42 #else 43 int32count;44 uint16ignore_unlock_count;45 #endif 46 uint8flags;38 const char* name; 39 struct mutex_waiter* waiters; 40 #if KDEBUG 41 thread_id holder; 42 #else 43 int32 count; 44 uint16 ignore_unlock_count; 45 #endif 46 uint8 flags; 47 47 } mutex; 48 48 49 #define MUTEX_FLAG_CLONE_NAME 49 #define MUTEX_FLAG_CLONE_NAME 0x1 50 50 51 51 52 52 typedef struct recursive_lock { 53 mutexlock;53 mutex lock; 54 54 #if !KDEBUG 55 thread_idholder;56 #endif 57 intrecursion;55 thread_id holder; 56 #endif 57 int recursion; 58 58 } recursive_lock; 59 59 … … 62 62 63 63 typedef struct rw_lock { 64 const char*name;65 struct rw_lock_waiter*waiters;66 thread_idholder;67 vint32count;68 int32owner_count;69 int16active_readers;70 71 72 73 int16pending_readers;74 75 76 77 uint32flags;64 const char* name; 65 struct rw_lock_waiter* waiters; 66 thread_id holder; 67 vint32 count; 68 int32 owner_count; 69 int16 active_readers; 70 // Only > 0 while a writer is waiting: number 71 // of active readers when the first waiting 72 // writer started waiting. 73 int16 pending_readers; 74 // Number of readers that have already 75 // incremented "count", but have not yet started 76 // to wait at the time the last writer unlocked. 77 uint32 flags; 78 78 } rw_lock; 79 79 80 #define RW_LOCK_WRITER_COUNT_BASE 81 82 #define RW_LOCK_FLAG_CLONE_NAME 83 84 85 #if KDEBUG 86 # 87 88 89 # 90 91 # 92 # 93 94 # 95 # 96 97 # 98 # 99 # 100 #else 101 # define ASSERT_LOCKED_RECURSIVE(r)do {} while (false)102 # define ASSERT_LOCKED_MUTEX(m)do {} while (false)103 # define ASSERT_WRITE_LOCKED_RW_LOCK(m)do {} while (false)104 # define ASSERT_READ_LOCKED_RW_LOCK(l)do {} while (false)80 #define RW_LOCK_WRITER_COUNT_BASE 0x10000 81 82 #define RW_LOCK_FLAG_CLONE_NAME 0x1 83 84 85 #if KDEBUG 86 # define KDEBUG_RW_LOCK_DEBUG 0 87 // Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK(). 88 // The rw_lock will just behave like a recursive locker then. 89 # define ASSERT_LOCKED_RECURSIVE(r) \ 90 { ASSERT(find_thread(NULL) == (r)->lock.holder); } 91 # define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); } 92 # define ASSERT_WRITE_LOCKED_RW_LOCK(l) \ 93 { ASSERT(find_thread(NULL) == (l)->holder); } 94 # if KDEBUG_RW_LOCK_DEBUG 95 # define ASSERT_READ_LOCKED_RW_LOCK(l) \ 96 { ASSERT(find_thread(NULL) == (l)->holder); } 97 # else 98 # define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false) 99 # endif 100 #else 101 # define ASSERT_LOCKED_RECURSIVE(r) do {} while (false) 102 # define ASSERT_LOCKED_MUTEX(m) do {} while (false) 103 # define ASSERT_WRITE_LOCKED_RW_LOCK(m) do {} while (false) 104 # define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false) 105 105 #endif 106 106 … … 108 108 // static initializers 109 109 #if KDEBUG 110 # define MUTEX_INITIALIZER(name){ name, NULL, -1, 0 }111 # define RECURSIVE_LOCK_INITIALIZER(name){ MUTEX_INITIALIZER(name), 0 }112 #else 113 # define MUTEX_INITIALIZER(name){ name, NULL, 0, 0, 0 }114 # define RECURSIVE_LOCK_INITIALIZER(name){ MUTEX_INITIALIZER(name), -1, 0 }115 #endif 116 117 #define RW_LOCK_INITIALIZER(name) 118 119 120 #if KDEBUG 121 # define RECURSIVE_LOCK_HOLDER(recursiveLock)((recursiveLock)->lock.holder)122 #else 123 # define RECURSIVE_LOCK_HOLDER(recursiveLock)((recursiveLock)->holder)110 # define MUTEX_INITIALIZER(name) { name, NULL, -1, 0 } 111 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 } 112 #else 113 # define MUTEX_INITIALIZER(name) { name, NULL, 0, 0, 0 } 114 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 } 115 #endif 116 117 #define RW_LOCK_INITIALIZER(name) { name, NULL, -1, 0, 0, 0 } 118 119 120 #if KDEBUG 121 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->lock.holder) 122 #else 123 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->holder) 124 124 #endif 125 125 … … 129 129 #endif 130 130 131 extern void 132 131 extern void recursive_lock_init(recursive_lock *lock, const char *name); 132 // name is *not* cloned nor freed in recursive_lock_destroy() 133 133 extern void recursive_lock_init_etc(recursive_lock *lock, const char *name, 134 134 uint32 flags); 135 135 extern void recursive_lock_destroy(recursive_lock *lock); 136 136 extern status_t recursive_lock_lock(recursive_lock *lock); … … 140 140 141 141 extern void rw_lock_init(rw_lock* lock, const char* name); 142 142 // name is *not* cloned nor freed in rw_lock_destroy() 143 143 extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags); 144 144 extern void rw_lock_destroy(rw_lock* lock); … … 146 146 147 147 extern void mutex_init(mutex* lock, const char* name); 148 148 // name is *not* cloned nor freed in mutex_destroy() 149 149 extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags); 150 150 extern void mutex_destroy(mutex* lock); 151 151 extern status_t mutex_switch_lock(mutex* from, mutex* to); 152 153 154 155 152 // Unlocks "from" and locks "to" such that unlocking and starting to wait 153 // for the lock is atomically. I.e. if "from" guards the object "to" belongs 154 // to, the operation is safe as long as "from" is held while destroying 155 // "to". 156 156 extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to); 157 158 157 // Like mutex_switch_lock(), just for a switching from a read-locked 158 // rw_lock. 159 159 160 160 … … 163 163 extern status_t _rw_lock_read_lock(rw_lock* lock); 164 164 extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock, 165 165 uint32 timeoutFlags, bigtime_t timeout); 166 166 extern void _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked); 167 167 extern void _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked); … … 171 171 extern status_t _mutex_trylock(mutex* lock); 172 172 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, 173 173 bigtime_t timeout); 174 174 175 175 … … 178 178 { 179 179 #if KDEBUG_RW_LOCK_DEBUG 180 181 #else 182 183 184 185 180 return rw_lock_write_lock(lock); 181 #else 182 int32 oldCount = atomic_add(&lock->count, 1); 183 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) 184 return _rw_lock_read_lock(lock); 185 return B_OK; 186 186 #endif 187 187 } … … 190 190 static inline status_t 191 191 rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags, 192 192 bigtime_t timeout) 193 193 { 194 194 #if KDEBUG_RW_LOCK_DEBUG 195 196 #else 197 198 199 200 195 return mutex_lock_with_timeout(lock, timeoutFlags, timeout); 196 #else 197 int32 oldCount = atomic_add(&lock->count, 1); 198 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) 199 return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout); 200 return B_OK; 201 201 #endif 202 202 } … … 207 207 { 208 208 #if KDEBUG_RW_LOCK_DEBUG 209 210 #else 211 212 213 209 rw_lock_write_unlock(lock); 210 #else 211 int32 oldCount = atomic_add(&lock->count, -1); 212 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) 213 _rw_lock_read_unlock(lock, false); 214 214 #endif 215 215 } … … 219 219 rw_lock_write_unlock(rw_lock* lock) 220 220 { 221 221 _rw_lock_write_unlock(lock, false); 222 222 } 223 223 … … 227 227 { 228 228 #if KDEBUG 229 230 #else 231 232 233 229 return _mutex_lock(lock, false); 230 #else 231 if (atomic_add(&lock->count, -1) < 0) 232 return _mutex_lock(lock, false); 233 return B_OK; 234 234 #endif 235 235 } … … 240 240 { 241 241 #if KDEBUG 242 243 #else 244 245 246 242 return _mutex_lock(lock, true); 243 #else 244 if (atomic_add(&lock->count, -1) < 0) 245 return _mutex_lock(lock, true); 246 return B_OK; 247 247 #endif 248 248 } … … 253 253 { 254 254 #if KDEBUG 255 256 #else 257 258 259 255 return _mutex_trylock(lock); 256 #else 257 if (atomic_test_and_set(&lock->count, -1, 0) != 0) 258 return B_WOULD_BLOCK; 259 return B_OK; 260 260 #endif 261 261 } … … 266 266 { 267 267 #if KDEBUG 268 269 #else 270 271 272 268 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout); 269 #else 270 if (atomic_add(&lock->count, -1) < 0) 271 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout); 272 return B_OK; 273 273 #endif 274 274 } … … 279 279 { 280 280 #if !KDEBUG 281 282 #endif 283 281 if (atomic_add(&lock->count, 1) < -1) 282 #endif 283 _mutex_unlock(lock, false); 284 284 } 285 285 … … 289 289 { 290 290 #if KDEBUG 291 291 lock->holder = thread; 292 292 #endif 293 293 } … … 300 300 #endif 301 301 302 #endif 302 #endif /* _KERNEL_LOCK_H */ -
trunk/src/VBox/Additions/haiku/include/lock.h
r43363 r48940 28 28 29 29 /** @todo r=ramshankar: Eventually this file should be shipped by Haiku and 30 * 30 * should be removed from the VBox tree. */ 31 31 32 32 #ifndef _KERNEL_LOCK_H … … 39 39 40 40 typedef struct mutex { 41 const char*name;42 struct mutex_waiter*waiters;43 #if KDEBUG 44 thread_idholder;45 #else 46 int32count;47 uint16ignore_unlock_count;48 #endif 49 uint8flags;41 const char* name; 42 struct mutex_waiter* waiters; 43 #if KDEBUG 44 thread_id holder; 45 #else 46 int32 count; 47 uint16 ignore_unlock_count; 48 #endif 49 uint8 flags; 50 50 } mutex; 51 51 52 #define MUTEX_FLAG_CLONE_NAME 52 #define MUTEX_FLAG_CLONE_NAME 0x1 53 53 54 54 55 55 typedef struct recursive_lock { 56 mutexlock;56 mutex lock; 57 57 #if !KDEBUG 58 thread_idholder;59 #endif 60 intrecursion;58 thread_id holder; 59 #endif 60 int recursion; 61 61 } recursive_lock; 62 62 … … 65 65 66 66 typedef struct rw_lock { 67 const char*name;68 struct rw_lock_waiter*waiters;69 thread_idholder;70 vint32count;71 int32owner_count;72 int16active_readers;73 74 75 76 int16pending_readers;77 78 79 80 uint32flags;67 const char* name; 68 struct rw_lock_waiter* waiters; 69 thread_id holder; 70 vint32 count; 71 int32 owner_count; 72 int16 active_readers; 73 // Only > 0 while a writer is waiting: number 74 // of active readers when the first waiting 75 // writer started waiting. 76 int16 pending_readers; 77 // Number of readers that have already 78 // incremented "count", but have not yet started 79 // to wait at the time the last writer unlocked. 80 uint32 flags; 81 81 } rw_lock; 82 82 83 #define RW_LOCK_WRITER_COUNT_BASE 84 85 #define RW_LOCK_FLAG_CLONE_NAME 86 87 88 #if KDEBUG 89 # 90 91 92 # 93 94 # 95 # 96 97 # 98 # 99 100 # 101 # 102 # 103 #else 104 # define ASSERT_LOCKED_RECURSIVE(r)do {} while (false)105 # define ASSERT_LOCKED_MUTEX(m)do {} while (false)106 # define ASSERT_WRITE_LOCKED_RW_LOCK(m)do {} while (false)107 # define ASSERT_READ_LOCKED_RW_LOCK(l)do {} while (false)83 #define RW_LOCK_WRITER_COUNT_BASE 0x10000 84 85 #define RW_LOCK_FLAG_CLONE_NAME 0x1 86 87 88 #if KDEBUG 89 # define KDEBUG_RW_LOCK_DEBUG 0 90 // Define to 1 if you want to use ASSERT_READ_LOCKED_RW_LOCK(). 91 // The rw_lock will just behave like a recursive locker then. 92 # define ASSERT_LOCKED_RECURSIVE(r) \ 93 { ASSERT(find_thread(NULL) == (r)->lock.holder); } 94 # define ASSERT_LOCKED_MUTEX(m) { ASSERT(find_thread(NULL) == (m)->holder); } 95 # define ASSERT_WRITE_LOCKED_RW_LOCK(l) \ 96 { ASSERT(find_thread(NULL) == (l)->holder); } 97 # if KDEBUG_RW_LOCK_DEBUG 98 # define ASSERT_READ_LOCKED_RW_LOCK(l) \ 99 { ASSERT(find_thread(NULL) == (l)->holder); } 100 # else 101 # define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false) 102 # endif 103 #else 104 # define ASSERT_LOCKED_RECURSIVE(r) do {} while (false) 105 # define ASSERT_LOCKED_MUTEX(m) do {} while (false) 106 # define ASSERT_WRITE_LOCKED_RW_LOCK(m) do {} while (false) 107 # define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false) 108 108 #endif 109 109 … … 111 111 // static initializers 112 112 #if KDEBUG 113 # define MUTEX_INITIALIZER(name){ name, NULL, -1, 0 }114 # define RECURSIVE_LOCK_INITIALIZER(name){ MUTEX_INITIALIZER(name), 0 }115 #else 116 # define MUTEX_INITIALIZER(name){ name, NULL, 0, 0, 0 }117 # define RECURSIVE_LOCK_INITIALIZER(name){ MUTEX_INITIALIZER(name), -1, 0 }118 #endif 119 120 #define RW_LOCK_INITIALIZER(name) 121 122 123 #if KDEBUG 124 # define RECURSIVE_LOCK_HOLDER(recursiveLock)((recursiveLock)->lock.holder)125 #else 126 # define RECURSIVE_LOCK_HOLDER(recursiveLock)((recursiveLock)->holder)113 # define MUTEX_INITIALIZER(name) { name, NULL, -1, 0 } 114 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), 0 } 115 #else 116 # define MUTEX_INITIALIZER(name) { name, NULL, 0, 0, 0 } 117 # define RECURSIVE_LOCK_INITIALIZER(name) { MUTEX_INITIALIZER(name), -1, 0 } 118 #endif 119 120 #define RW_LOCK_INITIALIZER(name) { name, NULL, -1, 0, 0, 0 } 121 122 123 #if KDEBUG 124 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->lock.holder) 125 #else 126 # define RECURSIVE_LOCK_HOLDER(recursiveLock) ((recursiveLock)->holder) 127 127 #endif 128 128 … … 132 132 #endif 133 133 134 extern void 135 134 extern void recursive_lock_init(recursive_lock *lock, const char *name); 135 // name is *not* cloned nor freed in recursive_lock_destroy() 136 136 extern void recursive_lock_init_etc(recursive_lock *lock, const char *name, 137 137 uint32 flags); 138 138 extern void recursive_lock_destroy(recursive_lock *lock); 139 139 extern status_t recursive_lock_lock(recursive_lock *lock); … … 143 143 144 144 extern void rw_lock_init(rw_lock* lock, const char* name); 145 145 // name is *not* cloned nor freed in rw_lock_destroy() 146 146 extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags); 147 147 extern void rw_lock_destroy(rw_lock* lock); … … 149 149 150 150 extern void mutex_init(mutex* lock, const char* name); 151 151 // name is *not* cloned nor freed in mutex_destroy() 152 152 extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags); 153 153 extern void mutex_destroy(mutex* lock); 154 154 extern status_t mutex_switch_lock(mutex* from, mutex* to); 155 156 157 158 155 // Unlocks "from" and locks "to" such that unlocking and starting to wait 156 // for the lock is atomically. I.e. if "from" guards the object "to" belongs 157 // to, the operation is safe as long as "from" is held while destroying 158 // "to". 159 159 extern status_t mutex_switch_from_read_lock(rw_lock* from, mutex* to); 160 161 160 // Like mutex_switch_lock(), just for a switching from a read-locked 161 // rw_lock. 162 162 163 163 … … 166 166 extern status_t _rw_lock_read_lock(rw_lock* lock); 167 167 extern status_t _rw_lock_read_lock_with_timeout(rw_lock* lock, 168 168 uint32 timeoutFlags, bigtime_t timeout); 169 169 extern void _rw_lock_read_unlock(rw_lock* lock, bool threadsLocked); 170 170 extern void _rw_lock_write_unlock(rw_lock* lock, bool threadsLocked); … … 174 174 extern status_t _mutex_trylock(mutex* lock); 175 175 extern status_t _mutex_lock_with_timeout(mutex* lock, uint32 timeoutFlags, 176 176 bigtime_t timeout); 177 177 178 178 … … 181 181 { 182 182 #if KDEBUG_RW_LOCK_DEBUG 183 184 #else 185 186 187 188 183 return rw_lock_write_lock(lock); 184 #else 185 int32 oldCount = atomic_add(&lock->count, 1); 186 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) 187 return _rw_lock_read_lock(lock); 188 return B_OK; 189 189 #endif 190 190 } … … 193 193 static inline status_t 194 194 rw_lock_read_lock_with_timeout(rw_lock* lock, uint32 timeoutFlags, 195 195 bigtime_t timeout) 196 196 { 197 197 #if KDEBUG_RW_LOCK_DEBUG 198 199 #else 200 201 202 203 198 return mutex_lock_with_timeout(lock, timeoutFlags, timeout); 199 #else 200 int32 oldCount = atomic_add(&lock->count, 1); 201 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) 202 return _rw_lock_read_lock_with_timeout(lock, timeoutFlags, timeout); 203 return B_OK; 204 204 #endif 205 205 } … … 210 210 { 211 211 #if KDEBUG_RW_LOCK_DEBUG 212 213 #else 214 215 216 212 rw_lock_write_unlock(lock); 213 #else 214 int32 oldCount = atomic_add(&lock->count, -1); 215 if (oldCount >= RW_LOCK_WRITER_COUNT_BASE) 216 _rw_lock_read_unlock(lock, false); 217 217 #endif 218 218 } … … 222 222 rw_lock_write_unlock(rw_lock* lock) 223 223 { 224 224 _rw_lock_write_unlock(lock, false); 225 225 } 226 226 … … 230 230 { 231 231 #if KDEBUG 232 233 #else 234 235 236 232 return _mutex_lock(lock, false); 233 #else 234 if (atomic_add(&lock->count, -1) < 0) 235 return _mutex_lock(lock, false); 236 return B_OK; 237 237 #endif 238 238 } … … 243 243 { 244 244 #if KDEBUG 245 246 #else 247 248 249 245 return _mutex_lock(lock, true); 246 #else 247 if (atomic_add(&lock->count, -1) < 0) 248 return _mutex_lock(lock, true); 249 return B_OK; 250 250 #endif 251 251 } … … 256 256 { 257 257 #if KDEBUG 258 259 #else 260 261 262 258 return _mutex_trylock(lock); 259 #else 260 if (atomic_test_and_set(&lock->count, -1, 0) != 0) 261 return B_WOULD_BLOCK; 262 return B_OK; 263 263 #endif 264 264 } … … 269 269 { 270 270 #if KDEBUG 271 272 #else 273 274 275 271 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout); 272 #else 273 if (atomic_add(&lock->count, -1) < 0) 274 return _mutex_lock_with_timeout(lock, timeoutFlags, timeout); 275 return B_OK; 276 276 #endif 277 277 } … … 282 282 { 283 283 #if !KDEBUG 284 285 #endif 286 284 if (atomic_add(&lock->count, 1) < -1) 285 #endif 286 _mutex_unlock(lock, false); 287 287 } 288 288 … … 292 292 { 293 293 #if KDEBUG 294 294 lock->holder = thread; 295 295 #endif 296 296 } … … 303 303 #endif 304 304 305 #endif 305 #endif /* _KERNEL_LOCK_H */
Note:
See TracChangeset
for help on using the changeset viewer.