]> Pileus Git - ~andy/linux/blob - arch/arm64/include/asm/cmpxchg.h
189390ce865334bb1417950400e12273edb3450a
[~andy/linux] / arch / arm64 / include / asm / cmpxchg.h
1 /*
2  * Based on arch/arm/include/asm/cmpxchg.h
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #ifndef __ASM_CMPXCHG_H
19 #define __ASM_CMPXCHG_H
20
21 #include <linux/bug.h>
22
23 #include <asm/barrier.h>
24
25 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
26 {
27         unsigned long ret, tmp;
28
29         switch (size) {
30         case 1:
31                 asm volatile("//        __xchg1\n"
32                 "1:     ldxrb   %w0, %2\n"
33                 "       stlxrb  %w1, %w3, %2\n"
34                 "       cbnz    %w1, 1b\n"
35                         : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
36                         : "r" (x)
37                         : "cc", "memory");
38                 break;
39         case 2:
40                 asm volatile("//        __xchg2\n"
41                 "1:     ldxrh   %w0, %2\n"
42                 "       stlxrh  %w1, %w3, %2\n"
43                 "       cbnz    %w1, 1b\n"
44                         : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
45                         : "r" (x)
46                         : "cc", "memory");
47                 break;
48         case 4:
49                 asm volatile("//        __xchg4\n"
50                 "1:     ldxr    %w0, %2\n"
51                 "       stlxr   %w1, %w3, %2\n"
52                 "       cbnz    %w1, 1b\n"
53                         : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
54                         : "r" (x)
55                         : "cc", "memory");
56                 break;
57         case 8:
58                 asm volatile("//        __xchg8\n"
59                 "1:     ldxr    %0, %2\n"
60                 "       stlxr   %w1, %3, %2\n"
61                 "       cbnz    %w1, 1b\n"
62                         : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
63                         : "r" (x)
64                         : "cc", "memory");
65                 break;
66         default:
67                 BUILD_BUG();
68         }
69
70         smp_mb();
71         return ret;
72 }
73
74 #define xchg(ptr,x) \
75         ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
76
77 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
78                                       unsigned long new, int size)
79 {
80         unsigned long oldval = 0, res;
81
82         switch (size) {
83         case 1:
84                 do {
85                         asm volatile("// __cmpxchg1\n"
86                         "       ldxrb   %w1, %2\n"
87                         "       mov     %w0, #0\n"
88                         "       cmp     %w1, %w3\n"
89                         "       b.ne    1f\n"
90                         "       stxrb   %w0, %w4, %2\n"
91                         "1:\n"
92                                 : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
93                                 : "Ir" (old), "r" (new)
94                                 : "cc");
95                 } while (res);
96                 break;
97
98         case 2:
99                 do {
100                         asm volatile("// __cmpxchg2\n"
101                         "       ldxrh   %w1, %2\n"
102                         "       mov     %w0, #0\n"
103                         "       cmp     %w1, %w3\n"
104                         "       b.ne    1f\n"
105                         "       stxrh   %w0, %w4, %2\n"
106                         "1:\n"
107                                 : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
108                                 : "Ir" (old), "r" (new)
109                                 : "cc");
110                 } while (res);
111                 break;
112
113         case 4:
114                 do {
115                         asm volatile("// __cmpxchg4\n"
116                         "       ldxr    %w1, %2\n"
117                         "       mov     %w0, #0\n"
118                         "       cmp     %w1, %w3\n"
119                         "       b.ne    1f\n"
120                         "       stxr    %w0, %w4, %2\n"
121                         "1:\n"
122                                 : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
123                                 : "Ir" (old), "r" (new)
124                                 : "cc");
125                 } while (res);
126                 break;
127
128         case 8:
129                 do {
130                         asm volatile("// __cmpxchg8\n"
131                         "       ldxr    %1, %2\n"
132                         "       mov     %w0, #0\n"
133                         "       cmp     %1, %3\n"
134                         "       b.ne    1f\n"
135                         "       stxr    %w0, %4, %2\n"
136                         "1:\n"
137                                 : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
138                                 : "Ir" (old), "r" (new)
139                                 : "cc");
140                 } while (res);
141                 break;
142
143         default:
144                 BUILD_BUG();
145         }
146
147         return oldval;
148 }
149
150 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
151                                          unsigned long new, int size)
152 {
153         unsigned long ret;
154
155         smp_mb();
156         ret = __cmpxchg(ptr, old, new, size);
157         smp_mb();
158
159         return ret;
160 }
161
162 #define cmpxchg(ptr, o, n) \
163 ({ \
164         __typeof__(*(ptr)) __ret; \
165         __ret = (__typeof__(*(ptr))) \
166                 __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
167                              sizeof(*(ptr))); \
168         __ret; \
169 })
170
171 #define cmpxchg_local(ptr, o, n) \
172 ({ \
173         __typeof__(*(ptr)) __ret; \
174         __ret = (__typeof__(*(ptr))) \
175                 __cmpxchg((ptr), (unsigned long)(o), \
176                           (unsigned long)(n), sizeof(*(ptr))); \
177         __ret; \
178 })
179
180 #define cmpxchg64(ptr,o,n)              cmpxchg((ptr),(o),(n))
181 #define cmpxchg64_local(ptr,o,n)        cmpxchg_local((ptr),(o),(n))
182
183 #define cmpxchg64_relaxed(ptr,o,n)      cmpxchg_local((ptr),(o),(n))
184
185 #endif  /* __ASM_CMPXCHG_H */