]> Pileus Git - ~andy/linux/blob - arch/s390/include/asm/rwsem.h
Merge branch 'devel'
[~andy/linux] / arch / s390 / include / asm / rwsem.h
1 #ifndef _S390_RWSEM_H
2 #define _S390_RWSEM_H
3
4 /*
5  *  include/asm-s390/rwsem.h
6  *
7  *  S390 version
8  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
9  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
10  *
11  *  Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
12  */
13
14 /*
15  *
16  * The MSW of the count is the negated number of active writers and waiting
17  * lockers, and the LSW is the total number of active locks
18  *
19  * The lock count is initialized to 0 (no active and no waiting lockers).
20  *
21  * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
22  * uncontended lock. This can be determined because XADD returns the old value.
23  * Readers increment by 1 and see a positive value when uncontended, negative
24  * if there are writers (and maybe) readers waiting (in which case it goes to
25  * sleep).
26  *
27  * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
28  * be extended to 65534 by manually checking the whole MSW rather than relying
29  * on the S flag.
30  *
31  * The value of ACTIVE_BIAS supports up to 65535 active processes.
32  *
33  * This should be totally fair - if anything is waiting, a process that wants a
34  * lock will go to the back of the queue. When the currently active lock is
35  * released, if there's a writer at the front of the queue, then that and only
36  * that will be woken up; if there's a bunch of consequtive readers at the
37  * front, then they'll all be woken up, but no other readers will be.
38  */
39
40 #ifndef _LINUX_RWSEM_H
41 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
42 #endif
43
44 #ifndef CONFIG_64BIT
45 #define RWSEM_UNLOCKED_VALUE    0x00000000
46 #define RWSEM_ACTIVE_BIAS       0x00000001
47 #define RWSEM_ACTIVE_MASK       0x0000ffff
48 #define RWSEM_WAITING_BIAS      (-0x00010000)
49 #else /* CONFIG_64BIT */
50 #define RWSEM_UNLOCKED_VALUE    0x0000000000000000L
51 #define RWSEM_ACTIVE_BIAS       0x0000000000000001L
52 #define RWSEM_ACTIVE_MASK       0x00000000ffffffffL
53 #define RWSEM_WAITING_BIAS      (-0x0000000100000000L)
54 #endif /* CONFIG_64BIT */
55 #define RWSEM_ACTIVE_READ_BIAS  RWSEM_ACTIVE_BIAS
56 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
57
58 /*
59  * lock for reading
60  */
61 static inline void __down_read(struct rw_semaphore *sem)
62 {
63         signed long old, new;
64
65         asm volatile(
66 #ifndef CONFIG_64BIT
67                 "       l       %0,%2\n"
68                 "0:     lr      %1,%0\n"
69                 "       ahi     %1,%4\n"
70                 "       cs      %0,%1,%2\n"
71                 "       jl      0b"
72 #else /* CONFIG_64BIT */
73                 "       lg      %0,%2\n"
74                 "0:     lgr     %1,%0\n"
75                 "       aghi    %1,%4\n"
76                 "       csg     %0,%1,%2\n"
77                 "       jl      0b"
78 #endif /* CONFIG_64BIT */
79                 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
80                 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
81                 : "cc", "memory");
82         if (old < 0)
83                 rwsem_down_read_failed(sem);
84 }
85
86 /*
87  * trylock for reading -- returns 1 if successful, 0 if contention
88  */
89 static inline int __down_read_trylock(struct rw_semaphore *sem)
90 {
91         signed long old, new;
92
93         asm volatile(
94 #ifndef CONFIG_64BIT
95                 "       l       %0,%2\n"
96                 "0:     ltr     %1,%0\n"
97                 "       jm      1f\n"
98                 "       ahi     %1,%4\n"
99                 "       cs      %0,%1,%2\n"
100                 "       jl      0b\n"
101                 "1:"
102 #else /* CONFIG_64BIT */
103                 "       lg      %0,%2\n"
104                 "0:     ltgr    %1,%0\n"
105                 "       jm      1f\n"
106                 "       aghi    %1,%4\n"
107                 "       csg     %0,%1,%2\n"
108                 "       jl      0b\n"
109                 "1:"
110 #endif /* CONFIG_64BIT */
111                 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
112                 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
113                 : "cc", "memory");
114         return old >= 0 ? 1 : 0;
115 }
116
117 /*
118  * lock for writing
119  */
120 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
121 {
122         signed long old, new, tmp;
123
124         tmp = RWSEM_ACTIVE_WRITE_BIAS;
125         asm volatile(
126 #ifndef CONFIG_64BIT
127                 "       l       %0,%2\n"
128                 "0:     lr      %1,%0\n"
129                 "       a       %1,%4\n"
130                 "       cs      %0,%1,%2\n"
131                 "       jl      0b"
132 #else /* CONFIG_64BIT */
133                 "       lg      %0,%2\n"
134                 "0:     lgr     %1,%0\n"
135                 "       ag      %1,%4\n"
136                 "       csg     %0,%1,%2\n"
137                 "       jl      0b"
138 #endif /* CONFIG_64BIT */
139                 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
140                 : "Q" (sem->count), "m" (tmp)
141                 : "cc", "memory");
142         if (old != 0)
143                 rwsem_down_write_failed(sem);
144 }
145
146 static inline void __down_write(struct rw_semaphore *sem)
147 {
148         __down_write_nested(sem, 0);
149 }
150
151 /*
152  * trylock for writing -- returns 1 if successful, 0 if contention
153  */
154 static inline int __down_write_trylock(struct rw_semaphore *sem)
155 {
156         signed long old;
157
158         asm volatile(
159 #ifndef CONFIG_64BIT
160                 "       l       %0,%1\n"
161                 "0:     ltr     %0,%0\n"
162                 "       jnz     1f\n"
163                 "       cs      %0,%3,%1\n"
164                 "       jl      0b\n"
165 #else /* CONFIG_64BIT */
166                 "       lg      %0,%1\n"
167                 "0:     ltgr    %0,%0\n"
168                 "       jnz     1f\n"
169                 "       csg     %0,%3,%1\n"
170                 "       jl      0b\n"
171 #endif /* CONFIG_64BIT */
172                 "1:"
173                 : "=&d" (old), "=Q" (sem->count)
174                 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
175                 : "cc", "memory");
176         return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
177 }
178
179 /*
180  * unlock after reading
181  */
182 static inline void __up_read(struct rw_semaphore *sem)
183 {
184         signed long old, new;
185
186         asm volatile(
187 #ifndef CONFIG_64BIT
188                 "       l       %0,%2\n"
189                 "0:     lr      %1,%0\n"
190                 "       ahi     %1,%4\n"
191                 "       cs      %0,%1,%2\n"
192                 "       jl      0b"
193 #else /* CONFIG_64BIT */
194                 "       lg      %0,%2\n"
195                 "0:     lgr     %1,%0\n"
196                 "       aghi    %1,%4\n"
197                 "       csg     %0,%1,%2\n"
198                 "       jl      0b"
199 #endif /* CONFIG_64BIT */
200                 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
201                 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
202                 : "cc", "memory");
203         if (new < 0)
204                 if ((new & RWSEM_ACTIVE_MASK) == 0)
205                         rwsem_wake(sem);
206 }
207
208 /*
209  * unlock after writing
210  */
211 static inline void __up_write(struct rw_semaphore *sem)
212 {
213         signed long old, new, tmp;
214
215         tmp = -RWSEM_ACTIVE_WRITE_BIAS;
216         asm volatile(
217 #ifndef CONFIG_64BIT
218                 "       l       %0,%2\n"
219                 "0:     lr      %1,%0\n"
220                 "       a       %1,%4\n"
221                 "       cs      %0,%1,%2\n"
222                 "       jl      0b"
223 #else /* CONFIG_64BIT */
224                 "       lg      %0,%2\n"
225                 "0:     lgr     %1,%0\n"
226                 "       ag      %1,%4\n"
227                 "       csg     %0,%1,%2\n"
228                 "       jl      0b"
229 #endif /* CONFIG_64BIT */
230                 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
231                 : "Q" (sem->count), "m" (tmp)
232                 : "cc", "memory");
233         if (new < 0)
234                 if ((new & RWSEM_ACTIVE_MASK) == 0)
235                         rwsem_wake(sem);
236 }
237
238 /*
239  * downgrade write lock to read lock
240  */
241 static inline void __downgrade_write(struct rw_semaphore *sem)
242 {
243         signed long old, new, tmp;
244
245         tmp = -RWSEM_WAITING_BIAS;
246         asm volatile(
247 #ifndef CONFIG_64BIT
248                 "       l       %0,%2\n"
249                 "0:     lr      %1,%0\n"
250                 "       a       %1,%4\n"
251                 "       cs      %0,%1,%2\n"
252                 "       jl      0b"
253 #else /* CONFIG_64BIT */
254                 "       lg      %0,%2\n"
255                 "0:     lgr     %1,%0\n"
256                 "       ag      %1,%4\n"
257                 "       csg     %0,%1,%2\n"
258                 "       jl      0b"
259 #endif /* CONFIG_64BIT */
260                 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
261                 : "Q" (sem->count), "m" (tmp)
262                 : "cc", "memory");
263         if (new > 1)
264                 rwsem_downgrade_wake(sem);
265 }
266
267 /*
268  * implement atomic add functionality
269  */
270 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
271 {
272         signed long old, new;
273
274         asm volatile(
275 #ifndef CONFIG_64BIT
276                 "       l       %0,%2\n"
277                 "0:     lr      %1,%0\n"
278                 "       ar      %1,%4\n"
279                 "       cs      %0,%1,%2\n"
280                 "       jl      0b"
281 #else /* CONFIG_64BIT */
282                 "       lg      %0,%2\n"
283                 "0:     lgr     %1,%0\n"
284                 "       agr     %1,%4\n"
285                 "       csg     %0,%1,%2\n"
286                 "       jl      0b"
287 #endif /* CONFIG_64BIT */
288                 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
289                 : "Q" (sem->count), "d" (delta)
290                 : "cc", "memory");
291 }
292
293 /*
294  * implement exchange and add functionality
295  */
296 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
297 {
298         signed long old, new;
299
300         asm volatile(
301 #ifndef CONFIG_64BIT
302                 "       l       %0,%2\n"
303                 "0:     lr      %1,%0\n"
304                 "       ar      %1,%4\n"
305                 "       cs      %0,%1,%2\n"
306                 "       jl      0b"
307 #else /* CONFIG_64BIT */
308                 "       lg      %0,%2\n"
309                 "0:     lgr     %1,%0\n"
310                 "       agr     %1,%4\n"
311                 "       csg     %0,%1,%2\n"
312                 "       jl      0b"
313 #endif /* CONFIG_64BIT */
314                 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
315                 : "Q" (sem->count), "d" (delta)
316                 : "cc", "memory");
317         return new;
318 }
319
320 #endif /* _S390_RWSEM_H */