Merge branch 'locking/atomics' into locking/core, to pick up WIP commits
authorIngo Molnar <mingo@kernel.org>
Mon, 11 Feb 2019 13:27:05 +0000 (14:27 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 11 Feb 2019 13:27:05 +0000 (14:27 +0100)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
1  2 
Kbuild
MAINTAINERS
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/cmpxchg.h

diff --cc Kbuild
Simple merge
diff --cc MAINTAINERS
Simple merge
@@@ -246,24 -246,15 +246,24 @@@ __LL_SC_PREFIX(arch_atomic64_dec_if_pos
  
        return result;
  }
- __LL_SC_EXPORT(atomic64_dec_if_positive);
+ __LL_SC_EXPORT(arch_atomic64_dec_if_positive);
  
 -#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl)                 \
 -__LL_SC_INLINE unsigned long                                          \
 -__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,              \
 -                                   unsigned long old,                 \
 -                                   unsigned long new))                \
 +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl)            \
 +__LL_SC_INLINE u##sz                                                  \
 +__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr,          \
 +                                       unsigned long old,             \
 +                                       u##sz new))                    \
  {                                                                     \
 -      unsigned long tmp, oldval;                                      \
 +      unsigned long tmp;                                              \
 +      u##sz oldval;                                                   \
 +                                                                      \
 +      /*                                                              \
 +       * Sub-word sizes require explicit casting so that the compare  \
 +       * part of the cmpxchg doesn't end up interpreting non-zero     \
 +       * upper bits of the register containing "old".                 \
 +       */                                                             \
 +      if (sz < 32)                                                    \
 +              old = (u##sz)old;                                       \
                                                                        \
        asm volatile(                                                   \
        "       prfm    pstl1strm, %[v]\n"                              \
Simple merge
@@@ -177,29 -177,29 +177,29 @@@ __CMPXCHG_GEN(_mb
        VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);      \
  })
  
- #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
- ({\
-       int __ret;\
-       __cmpxchg_double_check(ptr1, ptr2); \
-       __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
-                                    (unsigned long)(n1), (unsigned long)(n2), \
-                                    ptr1); \
-       __ret; \
+ #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2)                               \
+ ({                                                                            \
+       int __ret;                                                              \
+       __cmpxchg_double_check(ptr1, ptr2);                                     \
+       __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2),  \
+                                    (unsigned long)(n1), (unsigned long)(n2),  \
+                                    ptr1);                                     \
+       __ret;                                                                  \
  })
  
- #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
- ({\
-       int __ret;\
-       __cmpxchg_double_check(ptr1, ptr2); \
-       __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
-                                 (unsigned long)(n1), (unsigned long)(n2), \
-                                 ptr1); \
-       __ret; \
+ #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2)                 \
+ ({                                                                            \
+       int __ret;                                                              \
+       __cmpxchg_double_check(ptr1, ptr2);                                     \
+       __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2),     \
+                                 (unsigned long)(n1), (unsigned long)(n2),     \
+                                 ptr1);                                        \
+       __ret;                                                                  \
  })
  
 -#define __CMPWAIT_CASE(w, sz, name)                                   \
 -static inline void __cmpwait_case_##name(volatile void *ptr,          \
 -                                       unsigned long val)             \
 +#define __CMPWAIT_CASE(w, sfx, sz)                                    \
 +static inline void __cmpwait_case_##sz(volatile void *ptr,            \
 +                                     unsigned long val)               \
  {                                                                     \
        unsigned long tmp;                                              \
                                                                        \