VirtualBox

source: vbox/trunk/src/recompiler/qemu-lock.h@ 36170

Last change on this file since 36170 was 36170, checked in by vboxsync, 14 years ago

rem: synced up to svn://svn.savannah.nongnu.org/qemu/trunk@6686 (repo UUID c046a42c-6fe2-441c-8c8c-71466251a162).

  • Property svn:eol-style set to native
File size: 6.6 KB
Line 
1/*
2 * Copyright (c) 2003 Fabrice Bellard
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
17 */
18
19/*
20 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
21 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
22 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
23 * a choice of LGPL license versions is made available with the language indicating
24 * that LGPLv2 or any later version may be used, or where a choice of which version
25 * of the LGPL is applied is otherwise unspecified.
26 */
27
28/* Locking primitives. Most of this code should be redundant -
29 system emulation doesn't need/use locking, NPTL userspace uses
30 pthread mutexes, and non-NPTL userspace isn't threadsafe anyway.
31 In either case a spinlock is probably the wrong kind of lock.
32 Spinlocks are only good if you know annother CPU has the lock and is
33 likely to release it soon. In environments where you have more threads
34 than physical CPUs (the extreme case being a single CPU host) a spinlock
35 simply wastes CPU until the OS decides to preempt it. */
36#if defined(USE_NPTL)
37
38#include <pthread.h>
39#define spin_lock pthread_mutex_lock
40#define spin_unlock pthread_mutex_unlock
41#define spinlock_t pthread_mutex_t
42#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
43
44#else
45
46#if defined(__hppa__)
47
48typedef int spinlock_t[4];
49
50#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
51
52static inline void resetlock (spinlock_t *p)
53{
54 (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
55}
56
57#else
58
59typedef int spinlock_t;
60
61#define SPIN_LOCK_UNLOCKED 0
62
63static inline void resetlock (spinlock_t *p)
64{
65 *p = SPIN_LOCK_UNLOCKED;
66}
67
68#endif
69
70#ifdef VBOX
71DECLINLINE(int) testandset (int *p)
72{
73 return ASMAtomicCmpXchgU32((volatile uint32_t *)p, 1, 0) ? 0 : 1;
74}
75#elif defined(_ARCH_PPC)
76static inline int testandset (int *p)
77{
78 int ret;
79 __asm__ __volatile__ (
80 " lwarx %0,0,%1\n"
81 " xor. %0,%3,%0\n"
82 " bne $+12\n"
83 " stwcx. %2,0,%1\n"
84 " bne- $-16\n"
85 : "=&r" (ret)
86 : "r" (p), "r" (1), "r" (0)
87 : "cr0", "memory");
88 return ret;
89}
90#elif defined(__i386__)
91static inline int testandset (int *p)
92{
93 long int readval = 0;
94
95 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
96 : "+m" (*p), "+a" (readval)
97 : "r" (1)
98 : "cc");
99 return readval;
100}
101#elif defined(__x86_64__)
102static inline int testandset (int *p)
103{
104 long int readval = 0;
105
106 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
107 : "+m" (*p), "+a" (readval)
108 : "r" (1)
109 : "cc");
110 return readval;
111}
112#elif defined(__s390__)
113static inline int testandset (int *p)
114{
115 int ret;
116
117 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
118 " jl 0b"
119 : "=&d" (ret)
120 : "r" (1), "a" (p), "0" (*p)
121 : "cc", "memory" );
122 return ret;
123}
124#elif defined(__alpha__)
125static inline int testandset (int *p)
126{
127 int ret;
128 unsigned long one;
129
130 __asm__ __volatile__ ("0: mov 1,%2\n"
131 " ldl_l %0,%1\n"
132 " stl_c %2,%1\n"
133 " beq %2,1f\n"
134 ".subsection 2\n"
135 "1: br 0b\n"
136 ".previous"
137 : "=r" (ret), "=m" (*p), "=r" (one)
138 : "m" (*p));
139 return ret;
140}
141#elif defined(__sparc__)
142static inline int testandset (int *p)
143{
144 int ret;
145
146 __asm__ __volatile__("ldstub [%1], %0"
147 : "=r" (ret)
148 : "r" (p)
149 : "memory");
150
151 return (ret ? 1 : 0);
152}
153#elif defined(__arm__)
154static inline int testandset (int *spinlock)
155{
156 register unsigned int ret;
157 __asm__ __volatile__("swp %0, %1, [%2]"
158 : "=r"(ret)
159 : "0"(1), "r"(spinlock));
160
161 return ret;
162}
163#elif defined(__mc68000)
164static inline int testandset (int *p)
165{
166 char ret;
167 __asm__ __volatile__("tas %1; sne %0"
168 : "=r" (ret)
169 : "m" (p)
170 : "cc","memory");
171 return ret;
172}
173#elif defined(__hppa__)
174
175/* Because malloc only guarantees 8-byte alignment for malloc'd data,
176 and GCC only guarantees 8-byte alignment for stack locals, we can't
177 be assured of 16-byte alignment for atomic lock data even if we
178 specify "__attribute ((aligned(16)))" in the type declaration. So,
179 we use a struct containing an array of four ints for the atomic lock
180 type and dynamically select the 16-byte aligned int from the array
181 for the semaphore. */
182#define __PA_LDCW_ALIGNMENT 16
183static inline void *ldcw_align (void *p) {
184 unsigned long a = (unsigned long)p;
185 a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
186 return (void *)a;
187}
188
189static inline int testandset (spinlock_t *p)
190{
191 unsigned int ret;
192 p = ldcw_align(p);
193 __asm__ __volatile__("ldcw 0(%1),%0"
194 : "=r" (ret)
195 : "r" (p)
196 : "memory" );
197 return !ret;
198}
199
200#elif defined(__ia64)
201
202#include <ia64intrin.h>
203
204static inline int testandset (int *p)
205{
206 return __sync_lock_test_and_set (p, 1);
207}
208#elif defined(__mips__)
209static inline int testandset (int *p)
210{
211 int ret;
212
213 __asm__ __volatile__ (
214 " .set push \n"
215 " .set noat \n"
216 " .set mips2 \n"
217 "1: li $1, 1 \n"
218 " ll %0, %1 \n"
219 " sc $1, %1 \n"
220 " beqz $1, 1b \n"
221 " .set pop "
222 : "=r" (ret), "+R" (*p)
223 :
224 : "memory");
225
226 return ret;
227}
228#else
229#error unimplemented CPU support
230#endif
231
232#if defined(CONFIG_USER_ONLY)
233static inline void spin_lock(spinlock_t *lock)
234{
235 while (testandset(lock));
236}
237
238static inline void spin_unlock(spinlock_t *lock)
239{
240 resetlock(lock);
241}
242
243static inline int spin_trylock(spinlock_t *lock)
244{
245 return !testandset(lock);
246}
247#else
248static inline void spin_lock(spinlock_t *lock)
249{
250}
251
252static inline void spin_unlock(spinlock_t *lock)
253{
254}
255
256static inline int spin_trylock(spinlock_t *lock)
257{
258 return 1;
259}
260#endif
261
262#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette