]> Pileus Git - ~andy/linux/blob - arch/powerpc/include/asm/spinlock.h
Linux 3.14
[~andy/linux] / arch / powerpc / include / asm / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3 #ifdef __KERNEL__
4
5 /*
6  * Simple spin lock operations.  
7  *
8  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11  *      Rework to support virtual processors
12  *
13  * Type of int is used as a full 64b word is not necessary.
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * (the type definitions are in asm/spinlock_types.h)
21  */
22 #include <linux/irqflags.h>
23 #ifdef CONFIG_PPC64
24 #include <asm/paca.h>
25 #include <asm/hvcall.h>
26 #endif
27 #include <asm/asm-compat.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
30
31 #define smp_mb__after_unlock_lock()     smp_mb()  /* Full ordering for lock. */
32
33 #ifdef CONFIG_PPC64
34 /* use 0x800000yy when locked, where yy == CPU number */
35 #ifdef __BIG_ENDIAN__
36 #define LOCK_TOKEN      (*(u32 *)(&get_paca()->lock_token))
37 #else
38 #define LOCK_TOKEN      (*(u32 *)(&get_paca()->paca_index))
39 #endif
40 #else
41 #define LOCK_TOKEN      1
42 #endif
43
44 #if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
45 #define CLEAR_IO_SYNC   (get_paca()->io_sync = 0)
46 #define SYNC_IO         do {                                            \
47                                 if (unlikely(get_paca()->io_sync)) {    \
48                                         mb();                           \
49                                         get_paca()->io_sync = 0;        \
50                                 }                                       \
51                         } while (0)
52 #else
53 #define CLEAR_IO_SYNC
54 #define SYNC_IO
55 #endif
56
57 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
58 {
59         return lock.slock == 0;
60 }
61
62 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
63 {
64         return !arch_spin_value_unlocked(*lock);
65 }
66
67 /*
68  * This returns the old value in the lock, so we succeeded
69  * in getting the lock if the return value is 0.
70  */
71 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
72 {
73         unsigned long tmp, token;
74
75         token = LOCK_TOKEN;
76         __asm__ __volatile__(
77 "1:     " PPC_LWARX(%0,0,%2,1) "\n\
78         cmpwi           0,%0,0\n\
79         bne-            2f\n\
80         stwcx.          %1,0,%2\n\
81         bne-            1b\n"
82         PPC_ACQUIRE_BARRIER
83 "2:"
84         : "=&r" (tmp)
85         : "r" (token), "r" (&lock->slock)
86         : "cr0", "memory");
87
88         return tmp;
89 }
90
91 static inline int arch_spin_trylock(arch_spinlock_t *lock)
92 {
93         CLEAR_IO_SYNC;
94         return __arch_spin_trylock(lock) == 0;
95 }
96
97 /*
98  * On a system with shared processors (that is, where a physical
99  * processor is multiplexed between several virtual processors),
100  * there is no point spinning on a lock if the holder of the lock
101  * isn't currently scheduled on a physical processor.  Instead
102  * we detect this situation and ask the hypervisor to give the
103  * rest of our timeslice to the lock holder.
104  *
105  * So that we can tell which virtual processor is holding a lock,
106  * we put 0x80000000 | smp_processor_id() in the lock when it is
107  * held.  Conveniently, we have a word in the paca that holds this
108  * value.
109  */
110
111 #if defined(CONFIG_PPC_SPLPAR)
112 /* We only yield to the hypervisor if we are in shared processor mode */
113 #define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
114 extern void __spin_yield(arch_spinlock_t *lock);
115 extern void __rw_yield(arch_rwlock_t *lock);
116 #else /* SPLPAR */
117 #define __spin_yield(x) barrier()
118 #define __rw_yield(x)   barrier()
119 #define SHARED_PROCESSOR        0
120 #endif
121
122 static inline void arch_spin_lock(arch_spinlock_t *lock)
123 {
124         CLEAR_IO_SYNC;
125         while (1) {
126                 if (likely(__arch_spin_trylock(lock) == 0))
127                         break;
128                 do {
129                         HMT_low();
130                         if (SHARED_PROCESSOR)
131                                 __spin_yield(lock);
132                 } while (unlikely(lock->slock != 0));
133                 HMT_medium();
134         }
135 }
136
137 static inline
138 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
139 {
140         unsigned long flags_dis;
141
142         CLEAR_IO_SYNC;
143         while (1) {
144                 if (likely(__arch_spin_trylock(lock) == 0))
145                         break;
146                 local_save_flags(flags_dis);
147                 local_irq_restore(flags);
148                 do {
149                         HMT_low();
150                         if (SHARED_PROCESSOR)
151                                 __spin_yield(lock);
152                 } while (unlikely(lock->slock != 0));
153                 HMT_medium();
154                 local_irq_restore(flags_dis);
155         }
156 }
157
158 static inline void arch_spin_unlock(arch_spinlock_t *lock)
159 {
160         SYNC_IO;
161         __asm__ __volatile__("# arch_spin_unlock\n\t"
162                                 PPC_RELEASE_BARRIER: : :"memory");
163         lock->slock = 0;
164 }
165
166 #ifdef CONFIG_PPC64
167 extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
168 #else
169 #define arch_spin_unlock_wait(lock) \
170         do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
171 #endif
172
173 /*
174  * Read-write spinlocks, allowing multiple readers
175  * but only one writer.
176  *
177  * NOTE! it is quite common to have readers in interrupts
178  * but no interrupt writers. For those circumstances we
179  * can "mix" irq-safe locks - any writer needs to get a
180  * irq-safe write-lock, but readers can get non-irqsafe
181  * read-locks.
182  */
183
184 #define arch_read_can_lock(rw)          ((rw)->lock >= 0)
185 #define arch_write_can_lock(rw) (!(rw)->lock)
186
187 #ifdef CONFIG_PPC64
188 #define __DO_SIGN_EXTEND        "extsw  %0,%0\n"
189 #define WRLOCK_TOKEN            LOCK_TOKEN      /* it's negative */
190 #else
191 #define __DO_SIGN_EXTEND
192 #define WRLOCK_TOKEN            (-1)
193 #endif
194
195 /*
196  * This returns the old value in the lock + 1,
197  * so we got a read lock if the return value is > 0.
198  */
199 static inline long __arch_read_trylock(arch_rwlock_t *rw)
200 {
201         long tmp;
202
203         __asm__ __volatile__(
204 "1:     " PPC_LWARX(%0,0,%1,1) "\n"
205         __DO_SIGN_EXTEND
206 "       addic.          %0,%0,1\n\
207         ble-            2f\n"
208         PPC405_ERR77(0,%1)
209 "       stwcx.          %0,0,%1\n\
210         bne-            1b\n"
211         PPC_ACQUIRE_BARRIER
212 "2:"    : "=&r" (tmp)
213         : "r" (&rw->lock)
214         : "cr0", "xer", "memory");
215
216         return tmp;
217 }
218
219 /*
220  * This returns the old value in the lock,
221  * so we got the write lock if the return value is 0.
222  */
223 static inline long __arch_write_trylock(arch_rwlock_t *rw)
224 {
225         long tmp, token;
226
227         token = WRLOCK_TOKEN;
228         __asm__ __volatile__(
229 "1:     " PPC_LWARX(%0,0,%2,1) "\n\
230         cmpwi           0,%0,0\n\
231         bne-            2f\n"
232         PPC405_ERR77(0,%1)
233 "       stwcx.          %1,0,%2\n\
234         bne-            1b\n"
235         PPC_ACQUIRE_BARRIER
236 "2:"    : "=&r" (tmp)
237         : "r" (token), "r" (&rw->lock)
238         : "cr0", "memory");
239
240         return tmp;
241 }
242
243 static inline void arch_read_lock(arch_rwlock_t *rw)
244 {
245         while (1) {
246                 if (likely(__arch_read_trylock(rw) > 0))
247                         break;
248                 do {
249                         HMT_low();
250                         if (SHARED_PROCESSOR)
251                                 __rw_yield(rw);
252                 } while (unlikely(rw->lock < 0));
253                 HMT_medium();
254         }
255 }
256
257 static inline void arch_write_lock(arch_rwlock_t *rw)
258 {
259         while (1) {
260                 if (likely(__arch_write_trylock(rw) == 0))
261                         break;
262                 do {
263                         HMT_low();
264                         if (SHARED_PROCESSOR)
265                                 __rw_yield(rw);
266                 } while (unlikely(rw->lock != 0));
267                 HMT_medium();
268         }
269 }
270
271 static inline int arch_read_trylock(arch_rwlock_t *rw)
272 {
273         return __arch_read_trylock(rw) > 0;
274 }
275
276 static inline int arch_write_trylock(arch_rwlock_t *rw)
277 {
278         return __arch_write_trylock(rw) == 0;
279 }
280
281 static inline void arch_read_unlock(arch_rwlock_t *rw)
282 {
283         long tmp;
284
285         __asm__ __volatile__(
286         "# read_unlock\n\t"
287         PPC_RELEASE_BARRIER
288 "1:     lwarx           %0,0,%1\n\
289         addic           %0,%0,-1\n"
290         PPC405_ERR77(0,%1)
291 "       stwcx.          %0,0,%1\n\
292         bne-            1b"
293         : "=&r"(tmp)
294         : "r"(&rw->lock)
295         : "cr0", "xer", "memory");
296 }
297
298 static inline void arch_write_unlock(arch_rwlock_t *rw)
299 {
300         __asm__ __volatile__("# write_unlock\n\t"
301                                 PPC_RELEASE_BARRIER: : :"memory");
302         rw->lock = 0;
303 }
304
305 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
306 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
307
308 #define arch_spin_relax(lock)   __spin_yield(lock)
309 #define arch_read_relax(lock)   __rw_yield(lock)
310 #define arch_write_relax(lock)  __rw_yield(lock)
311
312 #endif /* __KERNEL__ */
313 #endif /* __ASM_SPINLOCK_H */