arm64: lse: fix LSE atomics with LLVM's integrated assembler
[linux-2.6-microblaze.git] / arch / arm64 / include / asm / atomic_lse.h
index 574808b..da3280f 100644 (file)
@@ -14,6 +14,7 @@
 static inline void __lse_atomic_##op(int i, atomic_t *v)                       \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op "     %w[i], %[v]\n"                                  \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v));                                                     \
@@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd)
 static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)    \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op #mb " %w[i], %w[i], %[v]"                             \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v)                                                       \
@@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v)   \
        u32 tmp;                                                        \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
        "       add     %w[i], %w[i], %w[tmp]"                          \
        : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
@@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN(        , al, "memory")
 static inline void __lse_atomic_and(int i, atomic_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       mvn     %w[i], %w[i]\n"
        "       stclr   %w[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v)
 static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)     \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       mvn     %w[i], %w[i]\n"                                 \
        "       ldclr" #mb "    %w[i], %w[i], %[v]"                     \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND(        , al, "memory")
 static inline void __lse_atomic_sub(int i, atomic_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       neg     %w[i], %w[i]\n"
        "       stadd   %w[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
        u32 tmp;                                                        \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %w[i], %w[i]\n"                                 \
        "       ldadd" #mb "    %w[i], %w[tmp], %[v]\n"                 \
        "       add     %w[i], %w[i], %w[tmp]"                          \
@@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN(        , al, "memory")
 static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)     \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %w[i], %w[i]\n"                                 \
        "       ldadd" #mb "    %w[i], %w[i], %[v]"                     \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB(        , al, "memory")
 static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)           \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op "     %[i], %[v]\n"                                   \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v));                                                     \
@@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd)
 static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
 "      " #asm_op #mb " %[i], %[i], %[v]"                               \
        : [i] "+r" (i), [v] "+Q" (v->counter)                           \
        : "r" (v)                                                       \
@@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
        unsigned long tmp;                                              \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       ldadd" #mb "    %[i], %x[tmp], %[v]\n"                  \
        "       add     %[i], %[i], %x[tmp]"                            \
        : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)        \
@@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN(        , al, "memory")
 static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       mvn     %[i], %[i]\n"
        "       stclr   %[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
 static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)        \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       mvn     %[i], %[i]\n"                                   \
        "       ldclr" #mb "    %[i], %[i], %[v]"                       \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND(        , al, "memory")
 static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
 {
        asm volatile(
+       __LSE_PREAMBLE
        "       neg     %[i], %[i]\n"
        "       stadd   %[i], %[v]"
        : [i] "+&r" (i), [v] "+Q" (v->counter)
@@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)    \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %[i], %[i]\n"                                   \
        "       ldadd" #mb "    %[i], %x[tmp], %[v]\n"                  \
        "       add     %[i], %[i], %x[tmp]"                            \
@@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN(        , al, "memory")
 static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)        \
 {                                                                      \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       neg     %[i], %[i]\n"                                   \
        "       ldadd" #mb "    %[i], %[i], %[v]"                       \
        : [i] "+&r" (i), [v] "+Q" (v->counter)                          \
@@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
        unsigned long tmp;
 
        asm volatile(
+       __LSE_PREAMBLE
        "1:     ldr     %x[tmp], %[v]\n"
        "       subs    %[ret], %x[tmp], #1\n"
        "       b.lt    2f\n"
@@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr,                  \
        unsigned long tmp;                                              \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       mov     %" #w "[tmp], %" #w "[old]\n"                   \
        "       cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"    \
        "       mov     %" #w "[ret], %" #w "[tmp]"                     \
@@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1,                             \
        register unsigned long x4 asm ("x4") = (unsigned long)ptr;      \
                                                                        \
        asm volatile(                                                   \
+       __LSE_PREAMBLE                                                  \
        "       casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
        "       eor     %[old1], %[old1], %[oldval1]\n"                 \
        "       eor     %[old2], %[old2], %[oldval2]\n"                 \