]> Pileus Git - ~andy/linux/commitdiff
ARM: 7852/1: cmpxchg: implement barrier-less cmpxchg64_local
authorWill Deacon <will.deacon@arm.com>
Wed, 9 Oct 2013 16:01:21 +0000 (17:01 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Tue, 29 Oct 2013 11:06:06 +0000 (11:06 +0000)
Our cmpxchg64 macros are wrappers around atomic64_cmpxchg. Whilst this is
great for code re-use, there is a case for barrier-less cmpxchg where it
is known to be safe (for example cmpxchg64_local and cmpxchg-based
lockrefs).

This patch introduces a 64-bit cmpxchg implementation specifically
for the cmpxchg64_* macros, so that it can be later used by the lockref
code.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/include/asm/cmpxchg.h

index 4f009c10540dff2a2e7efd08b0671c2369547b90..fbd978fc248fb43e2677fc4280de7b9360f35abf 100644 (file)
@@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
        return ret;
 }
 
+static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
+                                            unsigned long long old,
+                                            unsigned long long new)
+{
+       unsigned long long oldval;
+       unsigned long res;
+
+       __asm__ __volatile__(
+"1:    ldrexd          %1, %H1, [%3]\n"
+"      teq             %1, %4\n"
+"      teqeq           %H1, %H4\n"
+"      bne             2f\n"
+"      strexd          %0, %5, %H5, [%3]\n"
+"      teq             %0, #0\n"
+"      bne             1b\n"
+"2:"
+       : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+       : "r" (ptr), "r" (old), "r" (new)
+       : "cc");
+
+       return oldval;
+}
+
+static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
+                                               unsigned long long old,
+                                               unsigned long long new)
+{
+       unsigned long long ret;
+
+       smp_mb();
+       ret = __cmpxchg64(ptr, old, new);
+       smp_mb();
+
+       return ret;
+}
+
 #define cmpxchg_local(ptr,o,n)                                         \
        ((__typeof__(*(ptr)))__cmpxchg_local((ptr),                     \
                                       (unsigned long)(o),              \
@@ -230,18 +266,14 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
                                       sizeof(*(ptr))))
 
 #define cmpxchg64(ptr, o, n)                                           \
-       ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr),       \
-                                               atomic64_t,             \
-                                               counter),               \
-                                             (unsigned long long)(o),  \
-                                             (unsigned long long)(n)))
+       ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr),                      \
+                                       (unsigned long long)(o),        \
+                                       (unsigned long long)(n)))
 
 #define cmpxchg64_local(ptr, o, n)                                     \
-       ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr),        \
-                                               local64_t,              \
-                                               a),                     \
-                                            (unsigned long long)(o),   \
-                                            (unsigned long long)(n)))
+       ((__typeof__(*(ptr)))__cmpxchg64((ptr),                         \
+                                       (unsigned long long)(o),        \
+                                       (unsigned long long)(n)))
 
 #endif /* __LINUX_ARM_ARCH__ >= 6 */