Merge branch 'locking/atomics' into locking/core, to pick up WIP commits
authorIngo Molnar <mingo@kernel.org>
Mon, 11 Feb 2019 13:27:05 +0000 (14:27 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 11 Feb 2019 13:27:05 +0000 (14:27 +0100)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
36 files changed:
Kbuild
MAINTAINERS
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/cmpxchg.h
arch/arm64/include/asm/sync_bitops.h
include/asm-generic/atomic-instrumented.h
include/asm-generic/atomic-long.h
include/linux/atomic-fallback.h [new file with mode: 0644]
include/linux/atomic.h
scripts/atomic/atomic-tbl.sh [new file with mode: 0755]
scripts/atomic/atomics.tbl [new file with mode: 0755]
scripts/atomic/check-atomics.sh [new file with mode: 0755]
scripts/atomic/fallbacks/acquire [new file with mode: 0755]
scripts/atomic/fallbacks/add_negative [new file with mode: 0755]
scripts/atomic/fallbacks/add_unless [new file with mode: 0755]
scripts/atomic/fallbacks/andnot [new file with mode: 0755]
scripts/atomic/fallbacks/dec [new file with mode: 0755]
scripts/atomic/fallbacks/dec_and_test [new file with mode: 0755]
scripts/atomic/fallbacks/dec_if_positive [new file with mode: 0755]
scripts/atomic/fallbacks/dec_unless_positive [new file with mode: 0755]
scripts/atomic/fallbacks/fence [new file with mode: 0755]
scripts/atomic/fallbacks/fetch_add_unless [new file with mode: 0755]
scripts/atomic/fallbacks/inc [new file with mode: 0755]
scripts/atomic/fallbacks/inc_and_test [new file with mode: 0755]
scripts/atomic/fallbacks/inc_not_zero [new file with mode: 0755]
scripts/atomic/fallbacks/inc_unless_negative [new file with mode: 0755]
scripts/atomic/fallbacks/read_acquire [new file with mode: 0755]
scripts/atomic/fallbacks/release [new file with mode: 0755]
scripts/atomic/fallbacks/set_release [new file with mode: 0755]
scripts/atomic/fallbacks/sub_and_test [new file with mode: 0755]
scripts/atomic/fallbacks/try_cmpxchg [new file with mode: 0755]
scripts/atomic/gen-atomic-fallback.sh [new file with mode: 0755]
scripts/atomic/gen-atomic-instrumented.sh [new file with mode: 0755]
scripts/atomic/gen-atomic-long.sh [new file with mode: 0755]

diff --git a/Kbuild b/Kbuild
index 65db5be..4a4c47c 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -6,7 +6,8 @@
 # 2) Generate timeconst.h
 # 3) Generate asm-offsets.h (may need bounds.h and timeconst.h)
 # 4) Check for missing system calls
-# 5) Generate constants.py (may need bounds.h)
+# 5) check atomics headers are up-to-date
+# 6) Generate constants.py (may need bounds.h)
 
 #####
 # 1) Generate bounds.h
@@ -59,7 +60,20 @@ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
        $(call cmd,syscalls)
 
 #####
-# 5) Generate constants for Python GDB integration
+# 5) Check atomic headers are up-to-date
+#
+
+always += old-atomics
+targets += old-atomics
+
+quiet_cmd_atomics = CALL    $<
+      cmd_atomics = $(CONFIG_SHELL) $<
+
+old-atomics: scripts/atomic/check-atomics.sh FORCE
+       $(call cmd,atomics)
+
+#####
+# 6) Generate constants for Python GDB integration
 #
 
 extra-$(CONFIG_GDB_SCRIPTS) += build_constants_py
index 8c68de3..8b95aa5 100644 (file)
@@ -2609,6 +2609,7 @@ L:        linux-kernel@vger.kernel.org
 S:     Maintained
 F:     arch/*/include/asm/atomic*.h
 F:     include/*/atomic*.h
+F:     scripts/atomic/
 
 ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
 M:     Bradley Grove <linuxdrivers@attotech.com>
index 9bca54d..1f4e9ee 100644 (file)
 
 #define ATOMIC_INIT(i) { (i) }
 
-#define atomic_read(v)                 READ_ONCE((v)->counter)
-#define atomic_set(v, i)               WRITE_ONCE(((v)->counter), (i))
-
-#define atomic_add_return_relaxed      atomic_add_return_relaxed
-#define atomic_add_return_acquire      atomic_add_return_acquire
-#define atomic_add_return_release      atomic_add_return_release
-#define atomic_add_return              atomic_add_return
-
-#define atomic_sub_return_relaxed      atomic_sub_return_relaxed
-#define atomic_sub_return_acquire      atomic_sub_return_acquire
-#define atomic_sub_return_release      atomic_sub_return_release
-#define atomic_sub_return              atomic_sub_return
-
-#define atomic_fetch_add_relaxed       atomic_fetch_add_relaxed
-#define atomic_fetch_add_acquire       atomic_fetch_add_acquire
-#define atomic_fetch_add_release       atomic_fetch_add_release
-#define atomic_fetch_add               atomic_fetch_add
-
-#define atomic_fetch_sub_relaxed       atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_acquire       atomic_fetch_sub_acquire
-#define atomic_fetch_sub_release       atomic_fetch_sub_release
-#define atomic_fetch_sub               atomic_fetch_sub
-
-#define atomic_fetch_and_relaxed       atomic_fetch_and_relaxed
-#define atomic_fetch_and_acquire       atomic_fetch_and_acquire
-#define atomic_fetch_and_release       atomic_fetch_and_release
-#define atomic_fetch_and               atomic_fetch_and
-
-#define atomic_fetch_andnot_relaxed    atomic_fetch_andnot_relaxed
-#define atomic_fetch_andnot_acquire    atomic_fetch_andnot_acquire
-#define atomic_fetch_andnot_release    atomic_fetch_andnot_release
-#define atomic_fetch_andnot            atomic_fetch_andnot
-
-#define atomic_fetch_or_relaxed                atomic_fetch_or_relaxed
-#define atomic_fetch_or_acquire                atomic_fetch_or_acquire
-#define atomic_fetch_or_release                atomic_fetch_or_release
-#define atomic_fetch_or                        atomic_fetch_or
-
-#define atomic_fetch_xor_relaxed       atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_acquire       atomic_fetch_xor_acquire
-#define atomic_fetch_xor_release       atomic_fetch_xor_release
-#define atomic_fetch_xor               atomic_fetch_xor
-
-#define atomic_xchg_relaxed(v, new)    xchg_relaxed(&((v)->counter), (new))
-#define atomic_xchg_acquire(v, new)    xchg_acquire(&((v)->counter), (new))
-#define atomic_xchg_release(v, new)    xchg_release(&((v)->counter), (new))
-#define atomic_xchg(v, new)            xchg(&((v)->counter), (new))
-
-#define atomic_cmpxchg_relaxed(v, old, new)                            \
-       cmpxchg_relaxed(&((v)->counter), (old), (new))
-#define atomic_cmpxchg_acquire(v, old, new)                            \
-       cmpxchg_acquire(&((v)->counter), (old), (new))
-#define atomic_cmpxchg_release(v, old, new)                            \
-       cmpxchg_release(&((v)->counter), (old), (new))
-#define atomic_cmpxchg(v, old, new)    cmpxchg(&((v)->counter), (old), (new))
-
-#define atomic_andnot                  atomic_andnot
+#define arch_atomic_read(v)                    READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i)                  WRITE_ONCE(((v)->counter), (i))
+
+#define arch_atomic_add_return_relaxed         arch_atomic_add_return_relaxed
+#define arch_atomic_add_return_acquire         arch_atomic_add_return_acquire
+#define arch_atomic_add_return_release         arch_atomic_add_return_release
+#define arch_atomic_add_return                 arch_atomic_add_return
+
+#define arch_atomic_sub_return_relaxed         arch_atomic_sub_return_relaxed
+#define arch_atomic_sub_return_acquire         arch_atomic_sub_return_acquire
+#define arch_atomic_sub_return_release         arch_atomic_sub_return_release
+#define arch_atomic_sub_return                 arch_atomic_sub_return
+
+#define arch_atomic_fetch_add_relaxed          arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_add_acquire          arch_atomic_fetch_add_acquire
+#define arch_atomic_fetch_add_release          arch_atomic_fetch_add_release
+#define arch_atomic_fetch_add                  arch_atomic_fetch_add
+
+#define arch_atomic_fetch_sub_relaxed          arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_sub_acquire          arch_atomic_fetch_sub_acquire
+#define arch_atomic_fetch_sub_release          arch_atomic_fetch_sub_release
+#define arch_atomic_fetch_sub                  arch_atomic_fetch_sub
+
+#define arch_atomic_fetch_and_relaxed          arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_and_acquire          arch_atomic_fetch_and_acquire
+#define arch_atomic_fetch_and_release          arch_atomic_fetch_and_release
+#define arch_atomic_fetch_and                  arch_atomic_fetch_and
+
+#define arch_atomic_fetch_andnot_relaxed       arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_andnot_acquire       arch_atomic_fetch_andnot_acquire
+#define arch_atomic_fetch_andnot_release       arch_atomic_fetch_andnot_release
+#define arch_atomic_fetch_andnot               arch_atomic_fetch_andnot
+
+#define arch_atomic_fetch_or_relaxed           arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_or_acquire           arch_atomic_fetch_or_acquire
+#define arch_atomic_fetch_or_release           arch_atomic_fetch_or_release
+#define arch_atomic_fetch_or                   arch_atomic_fetch_or
+
+#define arch_atomic_fetch_xor_relaxed          arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_xor_acquire          arch_atomic_fetch_xor_acquire
+#define arch_atomic_fetch_xor_release          arch_atomic_fetch_xor_release
+#define arch_atomic_fetch_xor                  arch_atomic_fetch_xor
+
+#define arch_atomic_xchg_relaxed(v, new) \
+       arch_xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic_xchg_acquire(v, new) \
+       arch_xchg_acquire(&((v)->counter), (new))
+#define arch_atomic_xchg_release(v, new) \
+       arch_xchg_release(&((v)->counter), (new))
+#define arch_atomic_xchg(v, new) \
+       arch_xchg(&((v)->counter), (new))
+
+#define arch_atomic_cmpxchg_relaxed(v, old, new) \
+       arch_cmpxchg_relaxed(&((v)->counter), (old), (new))
+#define arch_atomic_cmpxchg_acquire(v, old, new) \
+       arch_cmpxchg_acquire(&((v)->counter), (old), (new))
+#define arch_atomic_cmpxchg_release(v, old, new) \
+       arch_cmpxchg_release(&((v)->counter), (old), (new))
+#define arch_atomic_cmpxchg(v, old, new) \
+       arch_cmpxchg(&((v)->counter), (old), (new))
+
+#define arch_atomic_andnot                     arch_atomic_andnot
 
 /*
- * 64-bit atomic operations.
+ * 64-bit arch_atomic operations.
  */
-#define ATOMIC64_INIT                  ATOMIC_INIT
-#define atomic64_read                  atomic_read
-#define atomic64_set                   atomic_set
-
-#define atomic64_add_return_relaxed    atomic64_add_return_relaxed
-#define atomic64_add_return_acquire    atomic64_add_return_acquire
-#define atomic64_add_return_release    atomic64_add_return_release
-#define atomic64_add_return            atomic64_add_return
-
-#define atomic64_sub_return_relaxed    atomic64_sub_return_relaxed
-#define atomic64_sub_return_acquire    atomic64_sub_return_acquire
-#define atomic64_sub_return_release    atomic64_sub_return_release
-#define atomic64_sub_return            atomic64_sub_return
-
-#define atomic64_fetch_add_relaxed     atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_acquire     atomic64_fetch_add_acquire
-#define atomic64_fetch_add_release     atomic64_fetch_add_release
-#define atomic64_fetch_add             atomic64_fetch_add
-
-#define atomic64_fetch_sub_relaxed     atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_acquire     atomic64_fetch_sub_acquire
-#define atomic64_fetch_sub_release     atomic64_fetch_sub_release
-#define atomic64_fetch_sub             atomic64_fetch_sub
-
-#define atomic64_fetch_and_relaxed     atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_acquire     atomic64_fetch_and_acquire
-#define atomic64_fetch_and_release     atomic64_fetch_and_release
-#define atomic64_fetch_and             atomic64_fetch_and
-
-#define atomic64_fetch_andnot_relaxed  atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_andnot_acquire  atomic64_fetch_andnot_acquire
-#define atomic64_fetch_andnot_release  atomic64_fetch_andnot_release
-#define atomic64_fetch_andnot          atomic64_fetch_andnot
-
-#define atomic64_fetch_or_relaxed      atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_acquire      atomic64_fetch_or_acquire
-#define atomic64_fetch_or_release      atomic64_fetch_or_release
-#define atomic64_fetch_or              atomic64_fetch_or
-
-#define atomic64_fetch_xor_relaxed     atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_acquire     atomic64_fetch_xor_acquire
-#define atomic64_fetch_xor_release     atomic64_fetch_xor_release
-#define atomic64_fetch_xor             atomic64_fetch_xor
-
-#define atomic64_xchg_relaxed          atomic_xchg_relaxed
-#define atomic64_xchg_acquire          atomic_xchg_acquire
-#define atomic64_xchg_release          atomic_xchg_release
-#define atomic64_xchg                  atomic_xchg
-
-#define atomic64_cmpxchg_relaxed       atomic_cmpxchg_relaxed
-#define atomic64_cmpxchg_acquire       atomic_cmpxchg_acquire
-#define atomic64_cmpxchg_release       atomic_cmpxchg_release
-#define atomic64_cmpxchg               atomic_cmpxchg
-
-#define atomic64_andnot                        atomic64_andnot
-
-#define atomic64_dec_if_positive       atomic64_dec_if_positive
+#define ATOMIC64_INIT                          ATOMIC_INIT
+#define arch_atomic64_read                     arch_atomic_read
+#define arch_atomic64_set                      arch_atomic_set
+
+#define arch_atomic64_add_return_relaxed       arch_atomic64_add_return_relaxed
+#define arch_atomic64_add_return_acquire       arch_atomic64_add_return_acquire
+#define arch_atomic64_add_return_release       arch_atomic64_add_return_release
+#define arch_atomic64_add_return               arch_atomic64_add_return
+
+#define arch_atomic64_sub_return_relaxed       arch_atomic64_sub_return_relaxed
+#define arch_atomic64_sub_return_acquire       arch_atomic64_sub_return_acquire
+#define arch_atomic64_sub_return_release       arch_atomic64_sub_return_release
+#define arch_atomic64_sub_return               arch_atomic64_sub_return
+
+#define arch_atomic64_fetch_add_relaxed                arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_add_acquire                arch_atomic64_fetch_add_acquire
+#define arch_atomic64_fetch_add_release                arch_atomic64_fetch_add_release
+#define arch_atomic64_fetch_add                        arch_atomic64_fetch_add
+
+#define arch_atomic64_fetch_sub_relaxed                arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_sub_acquire                arch_atomic64_fetch_sub_acquire
+#define arch_atomic64_fetch_sub_release                arch_atomic64_fetch_sub_release
+#define arch_atomic64_fetch_sub                        arch_atomic64_fetch_sub
+
+#define arch_atomic64_fetch_and_relaxed                arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_and_acquire                arch_atomic64_fetch_and_acquire
+#define arch_atomic64_fetch_and_release                arch_atomic64_fetch_and_release
+#define arch_atomic64_fetch_and                        arch_atomic64_fetch_and
+
+#define arch_atomic64_fetch_andnot_relaxed     arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_andnot_acquire     arch_atomic64_fetch_andnot_acquire
+#define arch_atomic64_fetch_andnot_release     arch_atomic64_fetch_andnot_release
+#define arch_atomic64_fetch_andnot             arch_atomic64_fetch_andnot
+
+#define arch_atomic64_fetch_or_relaxed         arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_or_acquire         arch_atomic64_fetch_or_acquire
+#define arch_atomic64_fetch_or_release         arch_atomic64_fetch_or_release
+#define arch_atomic64_fetch_or                 arch_atomic64_fetch_or
+
+#define arch_atomic64_fetch_xor_relaxed                arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_xor_acquire                arch_atomic64_fetch_xor_acquire
+#define arch_atomic64_fetch_xor_release                arch_atomic64_fetch_xor_release
+#define arch_atomic64_fetch_xor                        arch_atomic64_fetch_xor
+
+#define arch_atomic64_xchg_relaxed             arch_atomic_xchg_relaxed
+#define arch_atomic64_xchg_acquire             arch_atomic_xchg_acquire
+#define arch_atomic64_xchg_release             arch_atomic_xchg_release
+#define arch_atomic64_xchg                     arch_atomic_xchg
+
+#define arch_atomic64_cmpxchg_relaxed          arch_atomic_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_acquire          arch_atomic_cmpxchg_acquire
+#define arch_atomic64_cmpxchg_release          arch_atomic_cmpxchg_release
+#define arch_atomic64_cmpxchg                  arch_atomic_cmpxchg
+
+#define arch_atomic64_andnot                   arch_atomic64_andnot
+
+#define arch_atomic64_dec_if_positive          arch_atomic64_dec_if_positive
+
+#include <asm-generic/atomic-instrumented.h>
 
 #endif
 #endif
index af7b990..e321293 100644 (file)
@@ -39,7 +39,7 @@
 
 #define ATOMIC_OP(op, asm_op)                                          \
 __LL_SC_INLINE void                                                    \
-__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))                                \
+__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v))                   \
 {                                                                      \
        unsigned long tmp;                                              \
        int result;                                                     \
@@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))                             \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : "Ir" (i));                                                    \
 }                                                                      \
-__LL_SC_EXPORT(atomic_##op);
+__LL_SC_EXPORT(arch_atomic_##op);
 
 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)           \
 __LL_SC_INLINE int                                                     \
-__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v))         \
+__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v))    \
 {                                                                      \
        unsigned long tmp;                                              \
        int result;                                                     \
@@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v))              \
                                                                        \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic_##op##_return##name);
+__LL_SC_EXPORT(arch_atomic_##op##_return##name);
 
 #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)            \
 __LL_SC_INLINE int                                                     \
-__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v))            \
+__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v))       \
 {                                                                      \
        unsigned long tmp;                                              \
        int val, result;                                                \
@@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v))           \
                                                                        \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic_fetch_##op##name);
+__LL_SC_EXPORT(arch_atomic_fetch_##op##name);
 
 #define ATOMIC_OPS(...)                                                        \
        ATOMIC_OP(__VA_ARGS__)                                          \
@@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor)
 
 #define ATOMIC64_OP(op, asm_op)                                                \
 __LL_SC_INLINE void                                                    \
-__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))                   \
+__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v))              \
 {                                                                      \
        long result;                                                    \
        unsigned long tmp;                                              \
@@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))                      \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : "Ir" (i));                                                    \
 }                                                                      \
-__LL_SC_EXPORT(atomic64_##op);
+__LL_SC_EXPORT(arch_atomic64_##op);
 
 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)         \
 __LL_SC_INLINE long                                                    \
-__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v))    \
+__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
 {                                                                      \
        long result;                                                    \
        unsigned long tmp;                                              \
@@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v))       \
                                                                        \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic64_##op##_return##name);
+__LL_SC_EXPORT(arch_atomic64_##op##_return##name);
 
 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)          \
 __LL_SC_INLINE long                                                    \
-__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v))       \
+__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v))  \
 {                                                                      \
        long result, val;                                               \
        unsigned long tmp;                                              \
@@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v))    \
                                                                        \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic64_fetch_##op##name);
+__LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
 
 #define ATOMIC64_OPS(...)                                              \
        ATOMIC64_OP(__VA_ARGS__)                                        \
@@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor)
 #undef ATOMIC64_OP
 
 __LL_SC_INLINE long
-__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
+__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
 {
        long result;
        unsigned long tmp;
@@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
 
        return result;
 }
-__LL_SC_EXPORT(atomic64_dec_if_positive);
+__LL_SC_EXPORT(arch_atomic64_dec_if_positive);
 
 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl)             \
 __LL_SC_INLINE u##sz                                                   \
index a424355..9256a39 100644 (file)
@@ -25,9 +25,9 @@
 #error "please don't include this file directly"
 #endif
 
-#define __LL_SC_ATOMIC(op)     __LL_SC_CALL(atomic_##op)
+#define __LL_SC_ATOMIC(op)     __LL_SC_CALL(arch_atomic_##op)
 #define ATOMIC_OP(op, asm_op)                                          \
-static inline void atomic_##op(int i, atomic_t *v)                     \
+static inline void arch_atomic_##op(int i, atomic_t *v)                        \
 {                                                                      \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
@@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd)
 #undef ATOMIC_OP
 
 #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)                   \
-static inline int atomic_fetch_##op##name(int i, atomic_t *v)          \
+static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v)     \
 {                                                                      \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
@@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd)
 #undef ATOMIC_FETCH_OPS
 
 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)                          \
-static inline int atomic_add_return##name(int i, atomic_t *v)          \
+static inline int arch_atomic_add_return##name(int i, atomic_t *v)     \
 {                                                                      \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
@@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN(        , al, "memory")
 
 #undef ATOMIC_OP_ADD_RETURN
 
-static inline void atomic_and(int i, atomic_t *v)
+static inline void arch_atomic_and(int i, atomic_t *v)
 {
        register int w0 asm ("w0") = i;
        register atomic_t *x1 asm ("x1") = v;
@@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v)
 }
 
 #define ATOMIC_FETCH_OP_AND(name, mb, cl...)                           \
-static inline int atomic_fetch_and##name(int i, atomic_t *v)           \
+static inline int arch_atomic_fetch_and##name(int i, atomic_t *v)      \
 {                                                                      \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
@@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND(        , al, "memory")
 
 #undef ATOMIC_FETCH_OP_AND
 
-static inline void atomic_sub(int i, atomic_t *v)
+static inline void arch_atomic_sub(int i, atomic_t *v)
 {
        register int w0 asm ("w0") = i;
        register atomic_t *x1 asm ("x1") = v;
@@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v)
 }
 
 #define ATOMIC_OP_SUB_RETURN(name, mb, cl...)                          \
-static inline int atomic_sub_return##name(int i, atomic_t *v)          \
+static inline int arch_atomic_sub_return##name(int i, atomic_t *v)     \
 {                                                                      \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
@@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN(        , al, "memory")
 #undef ATOMIC_OP_SUB_RETURN
 
 #define ATOMIC_FETCH_OP_SUB(name, mb, cl...)                           \
-static inline int atomic_fetch_sub##name(int i, atomic_t *v)           \
+static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v)      \
 {                                                                      \
        register int w0 asm ("w0") = i;                                 \
        register atomic_t *x1 asm ("x1") = v;                           \
@@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB(        , al, "memory")
 #undef ATOMIC_FETCH_OP_SUB
 #undef __LL_SC_ATOMIC
 
-#define __LL_SC_ATOMIC64(op)   __LL_SC_CALL(atomic64_##op)
+#define __LL_SC_ATOMIC64(op)   __LL_SC_CALL(arch_atomic64_##op)
 #define ATOMIC64_OP(op, asm_op)                                                \
-static inline void atomic64_##op(long i, atomic64_t *v)                        \
+static inline void arch_atomic64_##op(long i, atomic64_t *v)           \
 {                                                                      \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
@@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd)
 #undef ATOMIC64_OP
 
 #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)                 \
-static inline long atomic64_fetch_##op##name(long i, atomic64_t *v)    \
+static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\
 {                                                                      \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
@@ -276,7 +276,7 @@ ATOMIC64_FETCH_OPS(add, ldadd)
 #undef ATOMIC64_FETCH_OPS
 
 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)                                \
-static inline long atomic64_add_return##name(long i, atomic64_t *v)    \
+static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\
 {                                                                      \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
@@ -302,7 +302,7 @@ ATOMIC64_OP_ADD_RETURN(        , al, "memory")
 
 #undef ATOMIC64_OP_ADD_RETURN
 
-static inline void atomic64_and(long i, atomic64_t *v)
+static inline void arch_atomic64_and(long i, atomic64_t *v)
 {
        register long x0 asm ("x0") = i;
        register atomic64_t *x1 asm ("x1") = v;
@@ -320,7 +320,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
 }
 
 #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)                         \
-static inline long atomic64_fetch_and##name(long i, atomic64_t *v)     \
+static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v)        \
 {                                                                      \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
@@ -346,7 +346,7 @@ ATOMIC64_FETCH_OP_AND(        , al, "memory")
 
 #undef ATOMIC64_FETCH_OP_AND
 
-static inline void atomic64_sub(long i, atomic64_t *v)
+static inline void arch_atomic64_sub(long i, atomic64_t *v)
 {
        register long x0 asm ("x0") = i;
        register atomic64_t *x1 asm ("x1") = v;
@@ -364,7 +364,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
 }
 
 #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)                                \
-static inline long atomic64_sub_return##name(long i, atomic64_t *v)    \
+static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\
 {                                                                      \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
@@ -392,7 +392,7 @@ ATOMIC64_OP_SUB_RETURN(        , al, "memory")
 #undef ATOMIC64_OP_SUB_RETURN
 
 #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)                         \
-static inline long atomic64_fetch_sub##name(long i, atomic64_t *v)     \
+static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v)        \
 {                                                                      \
        register long x0 asm ("x0") = i;                                \
        register atomic64_t *x1 asm ("x1") = v;                         \
@@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB(        , al, "memory")
 
 #undef ATOMIC64_FETCH_OP_SUB
 
-static inline long atomic64_dec_if_positive(atomic64_t *v)
+static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        register long x0 asm ("x0") = (long)v;
 
index 3f9376f..e6ea0f4 100644 (file)
@@ -110,10 +110,10 @@ __XCHG_GEN(_mb)
 })
 
 /* xchg */
-#define xchg_relaxed(...)      __xchg_wrapper(    , __VA_ARGS__)
-#define xchg_acquire(...)      __xchg_wrapper(_acq, __VA_ARGS__)
-#define xchg_release(...)      __xchg_wrapper(_rel, __VA_ARGS__)
-#define xchg(...)              __xchg_wrapper( _mb, __VA_ARGS__)
+#define arch_xchg_relaxed(...) __xchg_wrapper(    , __VA_ARGS__)
+#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
+#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
+#define arch_xchg(...)         __xchg_wrapper( _mb, __VA_ARGS__)
 
 #define __CMPXCHG_GEN(sfx)                                             \
 static inline unsigned long __cmpxchg##sfx(volatile void *ptr,         \
@@ -154,18 +154,18 @@ __CMPXCHG_GEN(_mb)
 })
 
 /* cmpxchg */
-#define cmpxchg_relaxed(...)   __cmpxchg_wrapper(    , __VA_ARGS__)
-#define cmpxchg_acquire(...)   __cmpxchg_wrapper(_acq, __VA_ARGS__)
-#define cmpxchg_release(...)   __cmpxchg_wrapper(_rel, __VA_ARGS__)
-#define cmpxchg(...)           __cmpxchg_wrapper( _mb, __VA_ARGS__)
-#define cmpxchg_local          cmpxchg_relaxed
+#define arch_cmpxchg_relaxed(...)      __cmpxchg_wrapper(    , __VA_ARGS__)
+#define arch_cmpxchg_acquire(...)      __cmpxchg_wrapper(_acq, __VA_ARGS__)
+#define arch_cmpxchg_release(...)      __cmpxchg_wrapper(_rel, __VA_ARGS__)
+#define arch_cmpxchg(...)              __cmpxchg_wrapper( _mb, __VA_ARGS__)
+#define arch_cmpxchg_local             arch_cmpxchg_relaxed
 
 /* cmpxchg64 */
-#define cmpxchg64_relaxed      cmpxchg_relaxed
-#define cmpxchg64_acquire      cmpxchg_acquire
-#define cmpxchg64_release      cmpxchg_release
-#define cmpxchg64              cmpxchg
-#define cmpxchg64_local                cmpxchg_local
+#define arch_cmpxchg64_relaxed         arch_cmpxchg_relaxed
+#define arch_cmpxchg64_acquire         arch_cmpxchg_acquire
+#define arch_cmpxchg64_release         arch_cmpxchg_release
+#define arch_cmpxchg64                 arch_cmpxchg
+#define arch_cmpxchg64_local           arch_cmpxchg_local
 
 /* cmpxchg_double */
 #define system_has_cmpxchg_double()     1
@@ -177,24 +177,24 @@ __CMPXCHG_GEN(_mb)
        VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);      \
 })
 
-#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
-({\
-       int __ret;\
-       __cmpxchg_double_check(ptr1, ptr2); \
-       __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
-                                    (unsigned long)(n1), (unsigned long)(n2), \
-                                    ptr1); \
-       __ret; \
+#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2)                                \
+({                                                                             \
+       int __ret;                                                              \
+       __cmpxchg_double_check(ptr1, ptr2);                                     \
+       __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2),  \
+                                    (unsigned long)(n1), (unsigned long)(n2),  \
+                                    ptr1);                                     \
+       __ret;                                                                  \
 })
 
-#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
-({\
-       int __ret;\
-       __cmpxchg_double_check(ptr1, ptr2); \
-       __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
-                                 (unsigned long)(n1), (unsigned long)(n2), \
-                                 ptr1); \
-       __ret; \
+#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2)                  \
+({                                                                             \
+       int __ret;                                                              \
+       __cmpxchg_double_check(ptr1, ptr2);                                     \
+       __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2),     \
+                                 (unsigned long)(n1), (unsigned long)(n2),     \
+                                 ptr1);                                        \
+       __ret;                                                                  \
 })
 
 #define __CMPWAIT_CASE(w, sfx, sz)                                     \
index eee31a9..e9c1a02 100644 (file)
  * ops which are SMP safe even on a UP kernel.
  */
 
-#define sync_set_bit(nr, p)            set_bit(nr, p)
-#define sync_clear_bit(nr, p)          clear_bit(nr, p)
-#define sync_change_bit(nr, p)         change_bit(nr, p)
-#define sync_test_and_set_bit(nr, p)   test_and_set_bit(nr, p)
-#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
-#define sync_test_and_change_bit(nr, p)        test_and_change_bit(nr, p)
-#define sync_test_bit(nr, addr)                test_bit(nr, addr)
-#define sync_cmpxchg                   cmpxchg
+#define sync_set_bit(nr, p)                    set_bit(nr, p)
+#define sync_clear_bit(nr, p)                  clear_bit(nr, p)
+#define sync_change_bit(nr, p)                 change_bit(nr, p)
+#define sync_test_and_set_bit(nr, p)           test_and_set_bit(nr, p)
+#define sync_test_and_clear_bit(nr, p)         test_and_clear_bit(nr, p)
+#define sync_test_and_change_bit(nr, p)                test_and_change_bit(nr, p)
+#define sync_test_bit(nr, addr)                        test_bit(nr, addr)
+#define arch_sync_cmpxchg                      arch_cmpxchg
 
 #endif
index 0d4b1d3..b8f5b35 100644 (file)
@@ -1,3 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-instrumented.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
 /*
  * This file provides wrappers with KASAN instrumentation for atomic operations.
  * To use this functionality an arch's atomic.h file needs to define all
  * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
  * double instrumentation.
  */
+#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
+#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/kasan-checks.h>
+
+static inline int
+atomic_read(const atomic_t *v)
+{
+       kasan_check_read(v, sizeof(*v));
+       return arch_atomic_read(v);
+}
+#define atomic_read atomic_read
+
+#if defined(arch_atomic_read_acquire)
+static inline int
+atomic_read_acquire(const atomic_t *v)
+{
+       kasan_check_read(v, sizeof(*v));
+       return arch_atomic_read_acquire(v);
+}
+#define atomic_read_acquire atomic_read_acquire
+#endif
+
+static inline void
+atomic_set(atomic_t *v, int i)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic_set(v, i);
+}
+#define atomic_set atomic_set
+
+#if defined(arch_atomic_set_release)
+static inline void
+atomic_set_release(atomic_t *v, int i)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic_set_release(v, i);
+}
+#define atomic_set_release atomic_set_release
+#endif
+
+static inline void
+atomic_add(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic_add(i, v);
+}
+#define atomic_add atomic_add
+
+#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
+static inline int
+atomic_add_return(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_add_return(i, v);
+}
+#define atomic_add_return atomic_add_return
+#endif
+
+#if defined(arch_atomic_add_return_acquire)
+static inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_add_return_acquire(i, v);
+}
+#define atomic_add_return_acquire atomic_add_return_acquire
+#endif
+
+#if defined(arch_atomic_add_return_release)
+static inline int
+atomic_add_return_release(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_add_return_release(i, v);
+}
+#define atomic_add_return_release atomic_add_return_release
+#endif
+
+#if defined(arch_atomic_add_return_relaxed)
+static inline int
+atomic_add_return_relaxed(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_add_return_relaxed(i, v);
+}
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#endif
+
+#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
+static inline int
+atomic_fetch_add(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_add(i, v);
+}
+#define atomic_fetch_add atomic_fetch_add
+#endif
+
+#if defined(arch_atomic_fetch_add_acquire)
+static inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_acquire(i, v);
+}
+#define atomic_fetch_add_acquire atomic_fetch_add_acquire
+#endif
+
+#if defined(arch_atomic_fetch_add_release)
+static inline int
+atomic_fetch_add_release(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_release(i, v);
+}
+#define atomic_fetch_add_release atomic_fetch_add_release
+#endif
+
+#if defined(arch_atomic_fetch_add_relaxed)
+static inline int
+atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_relaxed(i, v);
+}
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#endif
+
+static inline void
+atomic_sub(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic_sub(i, v);
+}
+#define atomic_sub atomic_sub
+
+#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
+static inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_sub_return(i, v);
+}
+#define atomic_sub_return atomic_sub_return
+#endif
+
+#if defined(arch_atomic_sub_return_acquire)
+static inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_sub_return_acquire(i, v);
+}
+#define atomic_sub_return_acquire atomic_sub_return_acquire
+#endif
+
+#if defined(arch_atomic_sub_return_release)
+static inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_sub_return_release(i, v);
+}
+#define atomic_sub_return_release atomic_sub_return_release
+#endif
+
+#if defined(arch_atomic_sub_return_relaxed)
+static inline int
+atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_sub_return_relaxed(i, v);
+}
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#endif
+
+#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
+static inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub(i, v);
+}
+#define atomic_fetch_sub atomic_fetch_sub
+#endif
+
+#if defined(arch_atomic_fetch_sub_acquire)
+static inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub_acquire(i, v);
+}
+#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
+#endif
+
+#if defined(arch_atomic_fetch_sub_release)
+static inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub_release(i, v);
+}
+#define atomic_fetch_sub_release atomic_fetch_sub_release
+#endif
+
+#if defined(arch_atomic_fetch_sub_relaxed)
+static inline int
+atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_sub_relaxed(i, v);
+}
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#endif
+
+#if defined(arch_atomic_inc)
+static inline void
+atomic_inc(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic_inc(v);
+}
+#define atomic_inc atomic_inc
+#endif
+
+#if defined(arch_atomic_inc_return)
+static inline int
+atomic_inc_return(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_inc_return(v);
+}
+#define atomic_inc_return atomic_inc_return
+#endif
+
+#if defined(arch_atomic_inc_return_acquire)
+static inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_inc_return_acquire(v);
+}
+#define atomic_inc_return_acquire atomic_inc_return_acquire
+#endif
+
+#if defined(arch_atomic_inc_return_release)
+static inline int
+atomic_inc_return_release(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_inc_return_release(v);
+}
+#define atomic_inc_return_release atomic_inc_return_release
+#endif
+
+#if defined(arch_atomic_inc_return_relaxed)
+static inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_inc_return_relaxed(v);
+}
+#define atomic_inc_return_relaxed atomic_inc_return_relaxed
+#endif
+
+#if defined(arch_atomic_fetch_inc)
+static inline int
+atomic_fetch_inc(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc(v);
+}
+#define atomic_fetch_inc atomic_fetch_inc
+#endif
+
+#if defined(arch_atomic_fetch_inc_acquire)
+static inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc_acquire(v);
+}
+#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
+#endif
+
+#if defined(arch_atomic_fetch_inc_release)
+static inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc_release(v);
+}
+#define atomic_fetch_inc_release atomic_fetch_inc_release
+#endif
+
+#if defined(arch_atomic_fetch_inc_relaxed)
+static inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_inc_relaxed(v);
+}
+#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
+#endif
+
+#if defined(arch_atomic_dec)
+static inline void
+atomic_dec(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic_dec(v);
+}
+#define atomic_dec atomic_dec
+#endif
+
+#if defined(arch_atomic_dec_return)
+static inline int
+atomic_dec_return(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_dec_return(v);
+}
+#define atomic_dec_return atomic_dec_return
+#endif
+
+#if defined(arch_atomic_dec_return_acquire)
+static inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_dec_return_acquire(v);
+}
+#define atomic_dec_return_acquire atomic_dec_return_acquire
+#endif
+
+#if defined(arch_atomic_dec_return_release)
+static inline int
+atomic_dec_return_release(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_dec_return_release(v);
+}
+#define atomic_dec_return_release atomic_dec_return_release
+#endif
+
+#if defined(arch_atomic_dec_return_relaxed)
+static inline int
+atomic_dec_return_relaxed(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_dec_return_relaxed(v);
+}
+#define atomic_dec_return_relaxed atomic_dec_return_relaxed
+#endif
+
+#if defined(arch_atomic_fetch_dec)
+static inline int
+atomic_fetch_dec(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec(v);
+}
+#define atomic_fetch_dec atomic_fetch_dec
+#endif
+
+#if defined(arch_atomic_fetch_dec_acquire)
+static inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec_acquire(v);
+}
+#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
+#endif
+
+#if defined(arch_atomic_fetch_dec_release)
+static inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec_release(v);
+}
+#define atomic_fetch_dec_release atomic_fetch_dec_release
+#endif
+
+#if defined(arch_atomic_fetch_dec_relaxed)
+static inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_dec_relaxed(v);
+}
+#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
+#endif
+
+static inline void
+atomic_and(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic_and(i, v);
+}
+#define atomic_and atomic_and
+
+#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
+static inline int
+atomic_fetch_and(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_and(i, v);
+}
+#define atomic_fetch_and atomic_fetch_and
+#endif
+
+#if defined(arch_atomic_fetch_and_acquire)
+static inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_and_acquire(i, v);
+}
+#define atomic_fetch_and_acquire atomic_fetch_and_acquire
+#endif
+
+#if defined(arch_atomic_fetch_and_release)
+static inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_and_release(i, v);
+}
+#define atomic_fetch_and_release atomic_fetch_and_release
+#endif
+
+#if defined(arch_atomic_fetch_and_relaxed)
+static inline int
+atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_and_relaxed(i, v);
+}
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#endif
+
+#if defined(arch_atomic_andnot)
+static inline void
+atomic_andnot(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic_andnot(i, v);
+}
+#define atomic_andnot atomic_andnot
+#endif
+
+#if defined(arch_atomic_fetch_andnot)
+static inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot(i, v);
+}
+#define atomic_fetch_andnot atomic_fetch_andnot
+#endif
+
+#if defined(arch_atomic_fetch_andnot_acquire)
+static inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot_acquire(i, v);
+}
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
+#endif
+
+#if defined(arch_atomic_fetch_andnot_release)
+static inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot_release(i, v);
+}
+#define atomic_fetch_andnot_release atomic_fetch_andnot_release
+#endif
+
+#if defined(arch_atomic_fetch_andnot_relaxed)
+static inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+#endif
+
+static inline void
+atomic_or(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic_or(i, v);
+}
+#define atomic_or atomic_or
+
+#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
+static inline int
+atomic_fetch_or(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_or(i, v);
+}
+#define atomic_fetch_or atomic_fetch_or
+#endif
+
+#if defined(arch_atomic_fetch_or_acquire)
+static inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_or_acquire(i, v);
+}
+#define atomic_fetch_or_acquire atomic_fetch_or_acquire
+#endif
+
+#if defined(arch_atomic_fetch_or_release)
+static inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_or_release(i, v);
+}
+#define atomic_fetch_or_release atomic_fetch_or_release
+#endif
+
+#if defined(arch_atomic_fetch_or_relaxed)
+static inline int
+atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_or_relaxed(i, v);
+}
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#endif
+
+static inline void
+atomic_xor(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic_xor(i, v);
+}
+#define atomic_xor atomic_xor
+
+#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
+static inline int
+atomic_fetch_xor(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor(i, v);
+}
+#define atomic_fetch_xor atomic_fetch_xor
+#endif
+
+#if defined(arch_atomic_fetch_xor_acquire)
+static inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor_acquire(i, v);
+}
+#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
+#endif
+
+#if defined(arch_atomic_fetch_xor_release)
+static inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor_release(i, v);
+}
+#define atomic_fetch_xor_release atomic_fetch_xor_release
+#endif
+
+#if defined(arch_atomic_fetch_xor_relaxed)
+static inline int
+atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_xor_relaxed(i, v);
+}
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#endif
+
+#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
+static inline int
+atomic_xchg(atomic_t *v, int i)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_xchg(v, i);
+}
+#define atomic_xchg atomic_xchg
+#endif
+
+#if defined(arch_atomic_xchg_acquire)
+static inline int
+atomic_xchg_acquire(atomic_t *v, int i)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_xchg_acquire(v, i);
+}
+#define atomic_xchg_acquire atomic_xchg_acquire
+#endif
+
+#if defined(arch_atomic_xchg_release)
+static inline int
+atomic_xchg_release(atomic_t *v, int i)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_xchg_release(v, i);
+}
+#define atomic_xchg_release atomic_xchg_release
+#endif
+
+#if defined(arch_atomic_xchg_relaxed)
+static inline int
+atomic_xchg_relaxed(atomic_t *v, int i)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_xchg_relaxed(v, i);
+}
+#define atomic_xchg_relaxed atomic_xchg_relaxed
+#endif
+
+#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
+static inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg(v, old, new);
+}
+#define atomic_cmpxchg atomic_cmpxchg
+#endif
+
+#if defined(arch_atomic_cmpxchg_acquire)
+static inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg_acquire(v, old, new);
+}
+#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
+#endif
+
+#if defined(arch_atomic_cmpxchg_release)
+static inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg_release(v, old, new);
+}
+#define atomic_cmpxchg_release atomic_cmpxchg_release
+#endif
+
+#if defined(arch_atomic_cmpxchg_relaxed)
+static inline int
+atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+#endif
+
+#if defined(arch_atomic_try_cmpxchg)
+static inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+       kasan_check_write(v, sizeof(*v));
+       kasan_check_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg(v, old, new);
+}
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+#endif
+
+#if defined(arch_atomic_try_cmpxchg_acquire)
+static inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+       kasan_check_write(v, sizeof(*v));
+       kasan_check_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg_acquire(v, old, new);
+}
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
+#endif
+
+#if defined(arch_atomic_try_cmpxchg_release)
+static inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+       kasan_check_write(v, sizeof(*v));
+       kasan_check_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg_release(v, old, new);
+}
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
+#endif
+
+#if defined(arch_atomic_try_cmpxchg_relaxed)
+static inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+       kasan_check_write(v, sizeof(*v));
+       kasan_check_write(old, sizeof(*old));
+       return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
+#endif
+
+#if defined(arch_atomic_sub_and_test)
+static inline bool
+atomic_sub_and_test(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_sub_and_test(i, v);
+}
+#define atomic_sub_and_test atomic_sub_and_test
+#endif
+
+#if defined(arch_atomic_dec_and_test)
+static inline bool
+atomic_dec_and_test(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_dec_and_test(v);
+}
+#define atomic_dec_and_test atomic_dec_and_test
+#endif
+
+#if defined(arch_atomic_inc_and_test)
+static inline bool
+atomic_inc_and_test(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_inc_and_test(v);
+}
+#define atomic_inc_and_test atomic_inc_and_test
+#endif
+
+#if defined(arch_atomic_add_negative)
+static inline bool
+atomic_add_negative(int i, atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_add_negative(i, v);
+}
+#define atomic_add_negative atomic_add_negative
+#endif
+
+#if defined(arch_atomic_fetch_add_unless)
+static inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_fetch_add_unless(v, a, u);
+}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+#endif
+
+#if defined(arch_atomic_add_unless)
+static inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_add_unless(v, a, u);
+}
+#define atomic_add_unless atomic_add_unless
+#endif
+
+#if defined(arch_atomic_inc_not_zero)
+static inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_inc_not_zero(v);
+}
+#define atomic_inc_not_zero atomic_inc_not_zero
+#endif
+
+#if defined(arch_atomic_inc_unless_negative)
+static inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_inc_unless_negative(v);
+}
+#define atomic_inc_unless_negative atomic_inc_unless_negative
+#endif
+
+#if defined(arch_atomic_dec_unless_positive)
+static inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_dec_unless_positive(v);
+}
+#define atomic_dec_unless_positive atomic_dec_unless_positive
+#endif
+
+#if defined(arch_atomic_dec_if_positive)
+static inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic_dec_if_positive(v);
+}
+#define atomic_dec_if_positive atomic_dec_if_positive
+#endif
+
+static inline s64
+atomic64_read(const atomic64_t *v)
+{
+       kasan_check_read(v, sizeof(*v));
+       return arch_atomic64_read(v);
+}
+#define atomic64_read atomic64_read
+
+#if defined(arch_atomic64_read_acquire)
+static inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+       kasan_check_read(v, sizeof(*v));
+       return arch_atomic64_read_acquire(v);
+}
+#define atomic64_read_acquire atomic64_read_acquire
+#endif
+
+static inline void
+atomic64_set(atomic64_t *v, s64 i)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic64_set(v, i);
+}
+#define atomic64_set atomic64_set
+
+#if defined(arch_atomic64_set_release)
+static inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic64_set_release(v, i);
+}
+#define atomic64_set_release atomic64_set_release
+#endif
+
+static inline void
+atomic64_add(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic64_add(i, v);
+}
+#define atomic64_add atomic64_add
+
+#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
+static inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_add_return(i, v);
+}
+#define atomic64_add_return atomic64_add_return
+#endif
+
+#if defined(arch_atomic64_add_return_acquire)
+static inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_add_return_acquire(i, v);
+}
+#define atomic64_add_return_acquire atomic64_add_return_acquire
+#endif
+
+#if defined(arch_atomic64_add_return_release)
+static inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_add_return_release(i, v);
+}
+#define atomic64_add_return_release atomic64_add_return_release
+#endif
+
+#if defined(arch_atomic64_add_return_relaxed)
+static inline s64
+atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_add_return_relaxed(i, v);
+}
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#endif
+
+#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
+static inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add(i, v);
+}
+#define atomic64_fetch_add atomic64_fetch_add
+#endif
+
+#if defined(arch_atomic64_fetch_add_acquire)
+static inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_acquire(i, v);
+}
+#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_add_release)
+static inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_release(i, v);
+}
+#define atomic64_fetch_add_release atomic64_fetch_add_release
+#endif
+
+#if defined(arch_atomic64_fetch_add_relaxed)
+static inline s64
+atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_relaxed(i, v);
+}
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#endif
+
+static inline void
+atomic64_sub(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       arch_atomic64_sub(i, v);
+}
+#define atomic64_sub atomic64_sub
 
-#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
-#define _LINUX_ATOMIC_INSTRUMENTED_H
+#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
+static inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_sub_return(i, v);
+}
+#define atomic64_sub_return atomic64_sub_return
+#endif
 
-#include <linux/build_bug.h>
-#include <linux/kasan-checks.h>
+#if defined(arch_atomic64_sub_return_acquire)
+static inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_sub_return_acquire(i, v);
+}
+#define atomic64_sub_return_acquire atomic64_sub_return_acquire
+#endif
 
-static __always_inline int atomic_read(const atomic_t *v)
+#if defined(arch_atomic64_sub_return_release)
+static inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
 {
-       kasan_check_read(v, sizeof(*v));
-       return arch_atomic_read(v);
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_sub_return_release(i, v);
 }
+#define atomic64_sub_return_release atomic64_sub_return_release
+#endif
 
-static __always_inline s64 atomic64_read(const atomic64_t *v)
+#if defined(arch_atomic64_sub_return_relaxed)
+static inline s64
+atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
 {
-       kasan_check_read(v, sizeof(*v));
-       return arch_atomic64_read(v);
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_sub_return_relaxed(i, v);
 }
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#endif
 
-static __always_inline void atomic_set(atomic_t *v, int i)
+#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
+static inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic_set(v, i);
+       return arch_atomic64_fetch_sub(i, v);
 }
+#define atomic64_fetch_sub atomic64_fetch_sub
+#endif
 
-static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+#if defined(arch_atomic64_fetch_sub_acquire)
+static inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic64_set(v, i);
+       return arch_atomic64_fetch_sub_acquire(i, v);
 }
+#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
+#endif
 
-static __always_inline int atomic_xchg(atomic_t *v, int i)
+#if defined(arch_atomic64_fetch_sub_release)
+static inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_xchg(v, i);
+       return arch_atomic64_fetch_sub_release(i, v);
 }
+#define atomic64_fetch_sub_release atomic64_fetch_sub_release
+#endif
 
-static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i)
+#if defined(arch_atomic64_fetch_sub_relaxed)
+static inline s64
+atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_xchg(v, i);
+       return arch_atomic64_fetch_sub_relaxed(i, v);
 }
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#endif
 
-static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+#if defined(arch_atomic64_inc)
+static inline void
+atomic64_inc(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_cmpxchg(v, old, new);
+       arch_atomic64_inc(v);
 }
+#define atomic64_inc atomic64_inc
+#endif
 
-static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+#if defined(arch_atomic64_inc_return)
+static inline s64
+atomic64_inc_return(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_cmpxchg(v, old, new);
+       return arch_atomic64_inc_return(v);
 }
+#define atomic64_inc_return atomic64_inc_return
+#endif
 
-#ifdef arch_atomic_try_cmpxchg
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+#if defined(arch_atomic64_inc_return_acquire)
+static inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       kasan_check_read(old, sizeof(*old));
-       return arch_atomic_try_cmpxchg(v, old, new);
+       return arch_atomic64_inc_return_acquire(v);
 }
+#define atomic64_inc_return_acquire atomic64_inc_return_acquire
 #endif
 
-#ifdef arch_atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+#if defined(arch_atomic64_inc_return_release)
+static inline s64
+atomic64_inc_return_release(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       kasan_check_read(old, sizeof(*old));
-       return arch_atomic64_try_cmpxchg(v, old, new);
+       return arch_atomic64_inc_return_release(v);
 }
+#define atomic64_inc_return_release atomic64_inc_return_release
 #endif
 
-#ifdef arch_atomic_fetch_add_unless
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+#if defined(arch_atomic64_inc_return_relaxed)
+static inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_fetch_add_unless(v, a, u);
+       return arch_atomic64_inc_return_relaxed(v);
 }
+#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
 #endif
 
-#ifdef arch_atomic64_fetch_add_unless
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+#if defined(arch_atomic64_fetch_inc)
+static inline s64
+atomic64_fetch_inc(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add_unless(v, a, u);
+       return arch_atomic64_fetch_inc(v);
 }
+#define atomic64_fetch_inc atomic64_fetch_inc
 #endif
 
-#ifdef arch_atomic_inc
-#define atomic_inc atomic_inc
-static __always_inline void atomic_inc(atomic_t *v)
+#if defined(arch_atomic64_fetch_inc_acquire)
+static inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic_inc(v);
+       return arch_atomic64_fetch_inc_acquire(v);
 }
+#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
 #endif
 
-#ifdef arch_atomic64_inc
-#define atomic64_inc atomic64_inc
-static __always_inline void atomic64_inc(atomic64_t *v)
+#if defined(arch_atomic64_fetch_inc_release)
+static inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic64_inc(v);
+       return arch_atomic64_fetch_inc_release(v);
 }
+#define atomic64_fetch_inc_release atomic64_fetch_inc_release
 #endif
 
-#ifdef arch_atomic_dec
-#define atomic_dec atomic_dec
-static __always_inline void atomic_dec(atomic_t *v)
+#if defined(arch_atomic64_fetch_inc_relaxed)
+static inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic_dec(v);
+       return arch_atomic64_fetch_inc_relaxed(v);
 }
+#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
 #endif
 
-#ifdef atch_atomic64_dec
-#define atomic64_dec
-static __always_inline void atomic64_dec(atomic64_t *v)
+#if defined(arch_atomic64_dec)
+static inline void
+atomic64_dec(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        arch_atomic64_dec(v);
 }
+#define atomic64_dec atomic64_dec
 #endif
 
-static __always_inline void atomic_add(int i, atomic_t *v)
+#if defined(arch_atomic64_dec_return)
+static inline s64
+atomic64_dec_return(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic_add(i, v);
+       return arch_atomic64_dec_return(v);
 }
+#define atomic64_dec_return atomic64_dec_return
+#endif
 
-static __always_inline void atomic64_add(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_dec_return_acquire)
+static inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic64_add(i, v);
+       return arch_atomic64_dec_return_acquire(v);
 }
+#define atomic64_dec_return_acquire atomic64_dec_return_acquire
+#endif
 
-static __always_inline void atomic_sub(int i, atomic_t *v)
+#if defined(arch_atomic64_dec_return_release)
+static inline s64
+atomic64_dec_return_release(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic_sub(i, v);
+       return arch_atomic64_dec_return_release(v);
 }
+#define atomic64_dec_return_release atomic64_dec_return_release
+#endif
 
-static __always_inline void atomic64_sub(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_dec_return_relaxed)
+static inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic64_sub(i, v);
+       return arch_atomic64_dec_return_relaxed(v);
 }
+#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#endif
 
-static __always_inline void atomic_and(int i, atomic_t *v)
+#if defined(arch_atomic64_fetch_dec)
+static inline s64
+atomic64_fetch_dec(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic_and(i, v);
+       return arch_atomic64_fetch_dec(v);
+}
+#define atomic64_fetch_dec atomic64_fetch_dec
+#endif
+
+#if defined(arch_atomic64_fetch_dec_acquire)
+static inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_fetch_dec_acquire(v);
+}
+#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_dec_release)
+static inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_fetch_dec_release(v);
 }
+#define atomic64_fetch_dec_release atomic64_fetch_dec_release
+#endif
+
+#if defined(arch_atomic64_fetch_dec_relaxed)
+static inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_fetch_dec_relaxed(v);
+}
+#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
+#endif
 
-static __always_inline void atomic64_and(s64 i, atomic64_t *v)
+static inline void
+atomic64_and(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        arch_atomic64_and(i, v);
 }
+#define atomic64_and atomic64_and
 
-static __always_inline void atomic_or(int i, atomic_t *v)
+#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
+static inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic_or(i, v);
+       return arch_atomic64_fetch_and(i, v);
 }
+#define atomic64_fetch_and atomic64_fetch_and
+#endif
 
-static __always_inline void atomic64_or(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_fetch_and_acquire)
+static inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic64_or(i, v);
+       return arch_atomic64_fetch_and_acquire(i, v);
 }
+#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
+#endif
 
-static __always_inline void atomic_xor(int i, atomic_t *v)
+#if defined(arch_atomic64_fetch_and_release)
+static inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic_xor(i, v);
+       return arch_atomic64_fetch_and_release(i, v);
 }
+#define atomic64_fetch_and_release atomic64_fetch_and_release
+#endif
 
-static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_fetch_and_relaxed)
+static inline s64
+atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       arch_atomic64_xor(i, v);
+       return arch_atomic64_fetch_and_relaxed(i, v);
 }
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#endif
 
-#ifdef arch_atomic_inc_return
-#define atomic_inc_return atomic_inc_return
-static __always_inline int atomic_inc_return(atomic_t *v)
+#if defined(arch_atomic64_andnot)
+static inline void
+atomic64_andnot(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_inc_return(v);
+       arch_atomic64_andnot(i, v);
 }
+#define atomic64_andnot atomic64_andnot
 #endif
 
-#ifdef arch_atomic64_in_return
-#define atomic64_inc_return atomic64_inc_return
-static __always_inline s64 atomic64_inc_return(atomic64_t *v)
+#if defined(arch_atomic64_fetch_andnot)
+static inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_inc_return(v);
+       return arch_atomic64_fetch_andnot(i, v);
 }
+#define atomic64_fetch_andnot atomic64_fetch_andnot
 #endif
 
-#ifdef arch_atomic_dec_return
-#define atomic_dec_return atomic_dec_return
-static __always_inline int atomic_dec_return(atomic_t *v)
+#if defined(arch_atomic64_fetch_andnot_acquire)
+static inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_dec_return(v);
+       return arch_atomic64_fetch_andnot_acquire(i, v);
 }
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
 #endif
 
-#ifdef arch_atomic64_dec_return
-#define atomic64_dec_return atomic64_dec_return
-static __always_inline s64 atomic64_dec_return(atomic64_t *v)
+#if defined(arch_atomic64_fetch_andnot_release)
+static inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_dec_return(v);
+       return arch_atomic64_fetch_andnot_release(i, v);
 }
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
 #endif
 
-#ifdef arch_atomic64_inc_not_zero
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
+#if defined(arch_atomic64_fetch_andnot_relaxed)
+static inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_inc_not_zero(v);
+       return arch_atomic64_fetch_andnot_relaxed(i, v);
 }
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
 #endif
 
-#ifdef arch_atomic64_dec_if_positive
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline void
+atomic64_or(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_dec_if_positive(v);
+       arch_atomic64_or(i, v);
+}
+#define atomic64_or atomic64_or
+
+#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
+static inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_fetch_or(i, v);
 }
+#define atomic64_fetch_or atomic64_fetch_or
 #endif
 
-#ifdef arch_atomic_dec_and_test
-#define atomic_dec_and_test atomic_dec_and_test
-static __always_inline bool atomic_dec_and_test(atomic_t *v)
+#if defined(arch_atomic64_fetch_or_acquire)
+static inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_dec_and_test(v);
+       return arch_atomic64_fetch_or_acquire(i, v);
 }
+#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
 #endif
 
-#ifdef arch_atomic64_dec_and_test
-#define atomic64_dec_and_test atomic64_dec_and_test
-static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
+#if defined(arch_atomic64_fetch_or_release)
+static inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_dec_and_test(v);
+       return arch_atomic64_fetch_or_release(i, v);
 }
+#define atomic64_fetch_or_release atomic64_fetch_or_release
 #endif
 
-#ifdef arch_atomic_inc_and_test
-#define atomic_inc_and_test atomic_inc_and_test
-static __always_inline bool atomic_inc_and_test(atomic_t *v)
+#if defined(arch_atomic64_fetch_or_relaxed)
+static inline s64
+atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_inc_and_test(v);
+       return arch_atomic64_fetch_or_relaxed(i, v);
 }
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
 #endif
 
-#ifdef arch_atomic64_inc_and_test
-#define atomic64_inc_and_test atomic64_inc_and_test
-static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
+static inline void
+atomic64_xor(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_inc_and_test(v);
+       arch_atomic64_xor(i, v);
+}
+#define atomic64_xor atomic64_xor
+
+#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
+static inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_fetch_xor(i, v);
 }
+#define atomic64_fetch_xor atomic64_fetch_xor
 #endif
 
-static __always_inline int atomic_add_return(int i, atomic_t *v)
+#if defined(arch_atomic64_fetch_xor_acquire)
+static inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_add_return(i, v);
+       return arch_atomic64_fetch_xor_acquire(i, v);
 }
+#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
+#endif
 
-static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_fetch_xor_release)
+static inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_add_return(i, v);
+       return arch_atomic64_fetch_xor_release(i, v);
 }
+#define atomic64_fetch_xor_release atomic64_fetch_xor_release
+#endif
 
-static __always_inline int atomic_sub_return(int i, atomic_t *v)
+#if defined(arch_atomic64_fetch_xor_relaxed)
+static inline s64
+atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_sub_return(i, v);
+       return arch_atomic64_fetch_xor_relaxed(i, v);
 }
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#endif
 
-static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
+#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
+static inline s64
+atomic64_xchg(atomic64_t *v, s64 i)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_sub_return(i, v);
+       return arch_atomic64_xchg(v, i);
 }
+#define atomic64_xchg atomic64_xchg
+#endif
 
-static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+#if defined(arch_atomic64_xchg_acquire)
+static inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 i)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_fetch_add(i, v);
+       return arch_atomic64_xchg_acquire(v, i);
 }
+#define atomic64_xchg_acquire atomic64_xchg_acquire
+#endif
 
-static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_xchg_release)
+static inline s64
+atomic64_xchg_release(atomic64_t *v, s64 i)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_fetch_add(i, v);
+       return arch_atomic64_xchg_release(v, i);
 }
+#define atomic64_xchg_release atomic64_xchg_release
+#endif
 
-static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+#if defined(arch_atomic64_xchg_relaxed)
+static inline s64
+atomic64_xchg_relaxed(atomic64_t *v, s64 i)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_fetch_sub(i, v);
+       return arch_atomic64_xchg_relaxed(v, i);
 }
+#define atomic64_xchg_relaxed atomic64_xchg_relaxed
+#endif
 
-static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
+#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
+static inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_fetch_sub(i, v);
+       return arch_atomic64_cmpxchg(v, old, new);
 }
+#define atomic64_cmpxchg atomic64_cmpxchg
+#endif
 
-static __always_inline int atomic_fetch_and(int i, atomic_t *v)
+#if defined(arch_atomic64_cmpxchg_acquire)
+static inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_fetch_and(i, v);
+       return arch_atomic64_cmpxchg_acquire(v, old, new);
 }
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
+#endif
 
-static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_cmpxchg_release)
+static inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_fetch_and(i, v);
+       return arch_atomic64_cmpxchg_release(v, old, new);
 }
+#define atomic64_cmpxchg_release atomic64_cmpxchg_release
+#endif
 
-static __always_inline int atomic_fetch_or(int i, atomic_t *v)
+#if defined(arch_atomic64_cmpxchg_relaxed)
+static inline s64
+atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_fetch_or(i, v);
+       return arch_atomic64_cmpxchg_relaxed(v, old, new);
 }
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+#endif
 
-static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_try_cmpxchg)
+static inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_fetch_or(i, v);
+       kasan_check_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg(v, old, new);
 }
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+#endif
 
-static __always_inline int atomic_fetch_xor(int i, atomic_t *v)
+#if defined(arch_atomic64_try_cmpxchg_acquire)
+static inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_fetch_xor(i, v);
+       kasan_check_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg_acquire(v, old, new);
 }
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
+#endif
 
-static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_try_cmpxchg_release)
+static inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic64_fetch_xor(i, v);
+       kasan_check_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg_release(v, old, new);
 }
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
+#endif
 
-#ifdef arch_atomic_sub_and_test
-#define atomic_sub_and_test atomic_sub_and_test
-static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
+#if defined(arch_atomic64_try_cmpxchg_relaxed)
+static inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_sub_and_test(i, v);
+       kasan_check_write(old, sizeof(*old));
+       return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
 }
+#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
 #endif
 
-#ifdef arch_atomic64_sub_and_test
-#define atomic64_sub_and_test atomic64_sub_and_test
-static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_sub_and_test)
+static inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic64_sub_and_test(i, v);
 }
+#define atomic64_sub_and_test atomic64_sub_and_test
 #endif
 
-#ifdef arch_atomic_add_negative
-#define atomic_add_negative atomic_add_negative
-static __always_inline bool atomic_add_negative(int i, atomic_t *v)
+#if defined(arch_atomic64_dec_and_test)
+static inline bool
+atomic64_dec_and_test(atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
-       return arch_atomic_add_negative(i, v);
+       return arch_atomic64_dec_and_test(v);
 }
+#define atomic64_dec_and_test atomic64_dec_and_test
 #endif
 
-#ifdef arch_atomic64_add_negative
-#define atomic64_add_negative atomic64_add_negative
-static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
+#if defined(arch_atomic64_inc_and_test)
+static inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_inc_and_test(v);
+}
+#define atomic64_inc_and_test atomic64_inc_and_test
+#endif
+
+#if defined(arch_atomic64_add_negative)
+static inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
 {
        kasan_check_write(v, sizeof(*v));
        return arch_atomic64_add_negative(i, v);
 }
+#define atomic64_add_negative atomic64_add_negative
+#endif
+
+#if defined(arch_atomic64_fetch_add_unless)
+static inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_fetch_add_unless(v, a, u);
+}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#endif
+
+#if defined(arch_atomic64_add_unless)
+static inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_add_unless(v, a, u);
+}
+#define atomic64_add_unless atomic64_add_unless
+#endif
+
+#if defined(arch_atomic64_inc_not_zero)
+static inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_inc_not_zero(v);
+}
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+#endif
+
+#if defined(arch_atomic64_inc_unless_negative)
+static inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_inc_unless_negative(v);
+}
+#define atomic64_inc_unless_negative atomic64_inc_unless_negative
 #endif
 
-#define xchg(ptr, new)                                                 \
+#if defined(arch_atomic64_dec_unless_positive)
+static inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_dec_unless_positive(v);
+}
+#define atomic64_dec_unless_positive atomic64_dec_unless_positive
+#endif
+
+#if defined(arch_atomic64_dec_if_positive)
+static inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+       kasan_check_write(v, sizeof(*v));
+       return arch_atomic64_dec_if_positive(v);
+}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
+#endif
+
+#if !defined(arch_xchg_relaxed) || defined(arch_xchg)
+#define xchg(ptr, ...)                                         \
+({                                                                     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_xchg(__ai_ptr, __VA_ARGS__);                               \
+})
+#endif
+
+#if defined(arch_xchg_acquire)
+#define xchg_acquire(ptr, ...)                                         \
+({                                                                     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_xchg_acquire(__ai_ptr, __VA_ARGS__);                               \
+})
+#endif
+
+#if defined(arch_xchg_release)
+#define xchg_release(ptr, ...)                                         \
+({                                                                     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_xchg_release(__ai_ptr, __VA_ARGS__);                               \
+})
+#endif
+
+#if defined(arch_xchg_relaxed)
+#define xchg_relaxed(ptr, ...)                                         \
+({                                                                     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_xchg_relaxed(__ai_ptr, __VA_ARGS__);                               \
+})
+#endif
+
+#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg)
+#define cmpxchg(ptr, ...)                                              \
 ({                                                                     \
        typeof(ptr) __ai_ptr = (ptr);                                   \
-       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
-       arch_xchg(__ai_ptr, (new));                                     \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_cmpxchg(__ai_ptr, __VA_ARGS__);                            \
 })
+#endif
+
+#if defined(arch_cmpxchg_acquire)
+#define cmpxchg_acquire(ptr, ...)                                              \
+({                                                                     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__);                            \
+})
+#endif
+
+#if defined(arch_cmpxchg_release)
+#define cmpxchg_release(ptr, ...)                                              \
+({                                                                     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_cmpxchg_release(__ai_ptr, __VA_ARGS__);                            \
+})
+#endif
+
+#if defined(arch_cmpxchg_relaxed)
+#define cmpxchg_relaxed(ptr, ...)                                              \
+({                                                                     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__);                            \
+})
+#endif
 
-#define cmpxchg(ptr, old, new)                                         \
+#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64)
+#define cmpxchg64(ptr, ...)                                            \
 ({                                                                     \
        typeof(ptr) __ai_ptr = (ptr);                                   \
-       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
-       arch_cmpxchg(__ai_ptr, (old), (new));                           \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_cmpxchg64(__ai_ptr, __VA_ARGS__);                          \
 })
+#endif
+
+#if defined(arch_cmpxchg64_acquire)
+#define cmpxchg64_acquire(ptr, ...)                                            \
+({                                                                     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__);                          \
+})
+#endif
+
+#if defined(arch_cmpxchg64_release)
+#define cmpxchg64_release(ptr, ...)                                            \
+({                                                                     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__);                          \
+})
+#endif
 
-#define sync_cmpxchg(ptr, old, new)                                    \
+#if defined(arch_cmpxchg64_relaxed)
+#define cmpxchg64_relaxed(ptr, ...)                                            \
 ({                                                                     \
        typeof(ptr) __ai_ptr = (ptr);                                   \
-       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
-       arch_sync_cmpxchg(__ai_ptr, (old), (new));                      \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__);                          \
 })
+#endif
 
-#define cmpxchg_local(ptr, old, new)                                   \
+#define cmpxchg_local(ptr, ...)                                                \
 ({                                                                     \
        typeof(ptr) __ai_ptr = (ptr);                                   \
-       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
-       arch_cmpxchg_local(__ai_ptr, (old), (new));                     \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_cmpxchg_local(__ai_ptr, __VA_ARGS__);                              \
 })
 
-#define cmpxchg64(ptr, old, new)                                       \
+#define cmpxchg64_local(ptr, ...)                                              \
 ({                                                                     \
        typeof(ptr) __ai_ptr = (ptr);                                   \
-       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
-       arch_cmpxchg64(__ai_ptr, (old), (new));                         \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__);                            \
 })
 
-#define cmpxchg64_local(ptr, old, new)                                 \
+#define sync_cmpxchg(ptr, ...)                                         \
 ({                                                                     \
        typeof(ptr) __ai_ptr = (ptr);                                   \
-       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));                 \
-       arch_cmpxchg64_local(__ai_ptr, (old), (new));                   \
+       kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));         \
+       arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__);                               \
 })
 
-#define cmpxchg_double(p1, p2, o1, o2, n1, n2)                         \
+#define cmpxchg_double(ptr, ...)                                               \
 ({                                                                     \
-       typeof(p1) __ai_p1 = (p1);                                      \
-       kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1));               \
-       arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2));     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr));             \
+       arch_cmpxchg_double(__ai_ptr, __VA_ARGS__);                             \
 })
 
-#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2)                           \
-({                                                                             \
-       typeof(p1) __ai_p1 = (p1);                                              \
-       kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1));                       \
-       arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2));       \
+
+#define cmpxchg_double_local(ptr, ...)                                         \
+({                                                                     \
+       typeof(ptr) __ai_ptr = (ptr);                                   \
+       kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr));             \
+       arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__);                               \
 })
 
-#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
index 87d1447..a833d38 100644 (file)
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-long.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
 #ifndef _ASM_GENERIC_ATOMIC_LONG_H
 #define _ASM_GENERIC_ATOMIC_LONG_H
-/*
- * Copyright (C) 2005 Silicon Graphics, Inc.
- *     Christoph Lameter
- *
- * Allows to provide arch independent atomic definitions without the need to
- * edit all arch specific atomic.h files.
- */
 
 #include <asm/types.h>
 
-/*
- * Suppport for atomic_long_t
- *
- * Casts for parameters are avoided for existing atomic functions in order to
- * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
- * macros of a platform may have.
- */
+#ifdef CONFIG_64BIT
+typedef atomic64_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i)            ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire  atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed  atomic64_cond_read_relaxed
+#else
+typedef atomic_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i)            ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire  atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed  atomic_cond_read_relaxed
+#endif
 
-#if BITS_PER_LONG == 64
+#ifdef CONFIG_64BIT
 
-typedef atomic64_t atomic_long_t;
+static inline long
+atomic_long_read(const atomic_long_t *v)
+{
+       return atomic64_read(v);
+}
 
-#define ATOMIC_LONG_INIT(i)    ATOMIC64_INIT(i)
-#define ATOMIC_LONG_PFX(x)     atomic64 ## x
-#define ATOMIC_LONG_TYPE       s64
+static inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+       return atomic64_read_acquire(v);
+}
 
-#else
+static inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+       atomic64_set(v, i);
+}
 
-typedef atomic_t atomic_long_t;
+static inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+       atomic64_set_release(v, i);
+}
 
-#define ATOMIC_LONG_INIT(i)    ATOMIC_INIT(i)
-#define ATOMIC_LONG_PFX(x)     atomic ## x
-#define ATOMIC_LONG_TYPE       int
+static inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+       atomic64_add(i, v);
+}
 
-#endif
+static inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+       return atomic64_add_return(i, v);
+}
+
+static inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+       return atomic64_add_return_acquire(i, v);
+}
+
+static inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+       return atomic64_add_return_release(i, v);
+}
+
+static inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+       return atomic64_add_return_relaxed(i, v);
+}
+
+static inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_add(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_add_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_add_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_add_relaxed(i, v);
+}
+
+static inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+       atomic64_sub(i, v);
+}
+
+static inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+       return atomic64_sub_return(i, v);
+}
+
+static inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+       return atomic64_sub_return_acquire(i, v);
+}
+
+static inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+       return atomic64_sub_return_release(i, v);
+}
+
+static inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+       return atomic64_sub_return_relaxed(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_sub(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_sub_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_sub_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_sub_relaxed(i, v);
+}
+
+static inline void
+atomic_long_inc(atomic_long_t *v)
+{
+       atomic64_inc(v);
+}
+
+static inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+       return atomic64_inc_return(v);
+}
+
+static inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+       return atomic64_inc_return_acquire(v);
+}
+
+static inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+       return atomic64_inc_return_release(v);
+}
+
+static inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+       return atomic64_inc_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+       return atomic64_fetch_inc(v);
+}
+
+static inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+       return atomic64_fetch_inc_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+       return atomic64_fetch_inc_release(v);
+}
+
+static inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+       return atomic64_fetch_inc_relaxed(v);
+}
+
+static inline void
+atomic_long_dec(atomic_long_t *v)
+{
+       atomic64_dec(v);
+}
+
+static inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+       return atomic64_dec_return(v);
+}
+
+static inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+       return atomic64_dec_return_acquire(v);
+}
+
+static inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+       return atomic64_dec_return_release(v);
+}
+
+static inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+       return atomic64_dec_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+       return atomic64_fetch_dec(v);
+}
+
+static inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+       return atomic64_fetch_dec_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+       return atomic64_fetch_dec_release(v);
+}
+
+static inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+       return atomic64_fetch_dec_relaxed(v);
+}
+
+static inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+       atomic64_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_and_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_and_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_and_relaxed(i, v);
+}
+
+static inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+       atomic64_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_andnot_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_andnot_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_andnot_relaxed(i, v);
+}
+
+static inline void
+atomic_long_or(long i, atomic_long_t *v)
+{
+       atomic64_or(i, v);
+}
+
+static inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_or(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_or_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_or_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_or_relaxed(i, v);
+}
+
+static inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+       atomic64_xor(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_xor(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_xor_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_xor_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+       return atomic64_fetch_xor_relaxed(i, v);
+}
+
+static inline long
+atomic_long_xchg(atomic_long_t *v, long i)
+{
+       return atomic64_xchg(v, i);
+}
+
+static inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+       return atomic64_xchg_acquire(v, i);
+}
+
+static inline long
+atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+       return atomic64_xchg_release(v, i);
+}
+
+static inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+       return atomic64_xchg_relaxed(v, i);
+}
+
+static inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+       return atomic64_cmpxchg(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+       return atomic64_cmpxchg_acquire(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+       return atomic64_cmpxchg_release(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+       return atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+       return atomic64_try_cmpxchg(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+       return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+       return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+       return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+       return atomic64_sub_and_test(i, v);
+}
+
+static inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+       return atomic64_dec_and_test(v);
+}
+
+static inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+       return atomic64_inc_and_test(v);
+}
+
+static inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+       return atomic64_add_negative(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+       return atomic64_fetch_add_unless(v, a, u);
+}
+
+static inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+       return atomic64_add_unless(v, a, u);
+}
+
+static inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
+{
+       return atomic64_inc_not_zero(v);
+}
+
+static inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+       return atomic64_inc_unless_negative(v);
+}
+
+static inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+       return atomic64_dec_unless_positive(v);
+}
+
+static inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+       return atomic64_dec_if_positive(v);
+}
+
+#else /* CONFIG_64BIT */
+
+static inline long
+atomic_long_read(const atomic_long_t *v)
+{
+       return atomic_read(v);
+}
+
+static inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+       return atomic_read_acquire(v);
+}
+
+static inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+       atomic_set(v, i);
+}
+
+static inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+       atomic_set_release(v, i);
+}
+
+static inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+       atomic_add(i, v);
+}
+
+static inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+       return atomic_add_return(i, v);
+}
+
+static inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+       return atomic_add_return_acquire(i, v);
+}
+
+static inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+       return atomic_add_return_release(i, v);
+}
+
+static inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+       return atomic_add_return_relaxed(i, v);
+}
+
+static inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+       return atomic_fetch_add(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+       return atomic_fetch_add_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+       return atomic_fetch_add_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+       return atomic_fetch_add_relaxed(i, v);
+}
+
+static inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+       atomic_sub(i, v);
+}
+
+static inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+       return atomic_sub_return(i, v);
+}
+
+static inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+       return atomic_sub_return_acquire(i, v);
+}
+
+static inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+       return atomic_sub_return_release(i, v);
+}
+
+static inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+       return atomic_sub_return_relaxed(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+       return atomic_fetch_sub(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+       return atomic_fetch_sub_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+       return atomic_fetch_sub_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+       return atomic_fetch_sub_relaxed(i, v);
+}
+
+static inline void
+atomic_long_inc(atomic_long_t *v)
+{
+       atomic_inc(v);
+}
+
+static inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+       return atomic_inc_return(v);
+}
+
+static inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+       return atomic_inc_return_acquire(v);
+}
+
+static inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+       return atomic_inc_return_release(v);
+}
+
+static inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+       return atomic_inc_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+       return atomic_fetch_inc(v);
+}
+
+static inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+       return atomic_fetch_inc_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+       return atomic_fetch_inc_release(v);
+}
+
+static inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+       return atomic_fetch_inc_relaxed(v);
+}
+
+static inline void
+atomic_long_dec(atomic_long_t *v)
+{
+       atomic_dec(v);
+}
+
+static inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+       return atomic_dec_return(v);
+}
+
+static inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+       return atomic_dec_return_acquire(v);
+}
+
+static inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+       return atomic_dec_return_release(v);
+}
+
+static inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+       return atomic_dec_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+       return atomic_fetch_dec(v);
+}
+
+static inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+       return atomic_fetch_dec_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+       return atomic_fetch_dec_release(v);
+}
+
+static inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+       return atomic_fetch_dec_relaxed(v);
+}
+
+static inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+       atomic_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+       return atomic_fetch_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+       return atomic_fetch_and_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+       return atomic_fetch_and_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+       return atomic_fetch_and_relaxed(i, v);
+}
+
+static inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+       atomic_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+       return atomic_fetch_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+       return atomic_fetch_andnot_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+       return atomic_fetch_andnot_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+       return atomic_fetch_andnot_relaxed(i, v);
+}
 
-#define ATOMIC_LONG_READ_OP(mo)                                                \
-static inline long atomic_long_read##mo(const atomic_long_t *l)                \
-{                                                                      \
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
-                                                                       \
-       return (long)ATOMIC_LONG_PFX(_read##mo)(v);                     \
-}
-ATOMIC_LONG_READ_OP()
-ATOMIC_LONG_READ_OP(_acquire)
-
-#undef ATOMIC_LONG_READ_OP
-
-#define ATOMIC_LONG_SET_OP(mo)                                         \
-static inline void atomic_long_set##mo(atomic_long_t *l, long i)       \
-{                                                                      \
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
-                                                                       \
-       ATOMIC_LONG_PFX(_set##mo)(v, i);                                \
-}
-ATOMIC_LONG_SET_OP()
-ATOMIC_LONG_SET_OP(_release)
-
-#undef ATOMIC_LONG_SET_OP
-
-#define ATOMIC_LONG_ADD_SUB_OP(op, mo)                                 \
-static inline long                                                     \
-atomic_long_##op##_return##mo(long i, atomic_long_t *l)                        \
-{                                                                      \
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
-                                                                       \
-       return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v);         \
-}
-ATOMIC_LONG_ADD_SUB_OP(add,)
-ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
-ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
-ATOMIC_LONG_ADD_SUB_OP(add, _release)
-ATOMIC_LONG_ADD_SUB_OP(sub,)
-ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
-ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
-ATOMIC_LONG_ADD_SUB_OP(sub, _release)
-
-#undef ATOMIC_LONG_ADD_SUB_OP
-
-#define atomic_long_cmpxchg_relaxed(l, old, new) \
-       (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
-                                          (old), (new)))
-#define atomic_long_cmpxchg_acquire(l, old, new) \
-       (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
-                                          (old), (new)))
-#define atomic_long_cmpxchg_release(l, old, new) \
-       (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
-                                          (old), (new)))
-#define atomic_long_cmpxchg(l, old, new) \
-       (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
-
-
-#define atomic_long_try_cmpxchg_relaxed(l, old, new) \
-       (ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
-                                          (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-#define atomic_long_try_cmpxchg_acquire(l, old, new) \
-       (ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
-                                          (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-#define atomic_long_try_cmpxchg_release(l, old, new) \
-       (ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
-                                          (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-#define atomic_long_try_cmpxchg(l, old, new) \
-       (ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \
-                                      (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-
-
-#define atomic_long_xchg_relaxed(v, new) \
-       (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-#define atomic_long_xchg_acquire(v, new) \
-       (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-#define atomic_long_xchg_release(v, new) \
-       (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-#define atomic_long_xchg(v, new) \
-       (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-
-static __always_inline void atomic_long_inc(atomic_long_t *l)
-{
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
-       ATOMIC_LONG_PFX(_inc)(v);
-}
-
-static __always_inline void atomic_long_dec(atomic_long_t *l)
+static inline void
+atomic_long_or(long i, atomic_long_t *v)
 {
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+       atomic_or(i, v);
+}
 
-       ATOMIC_LONG_PFX(_dec)(v);
+static inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+       return atomic_fetch_or(i, v);
 }
 
-#define ATOMIC_LONG_FETCH_OP(op, mo)                                   \
-static inline long                                                     \
-atomic_long_fetch_##op##mo(long i, atomic_long_t *l)                   \
-{                                                                      \
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
-                                                                       \
-       return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v);            \
+static inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+       return atomic_fetch_or_acquire(i, v);
 }
 
-ATOMIC_LONG_FETCH_OP(add, )
-ATOMIC_LONG_FETCH_OP(add, _relaxed)
-ATOMIC_LONG_FETCH_OP(add, _acquire)
-ATOMIC_LONG_FETCH_OP(add, _release)
-ATOMIC_LONG_FETCH_OP(sub, )
-ATOMIC_LONG_FETCH_OP(sub, _relaxed)
-ATOMIC_LONG_FETCH_OP(sub, _acquire)
-ATOMIC_LONG_FETCH_OP(sub, _release)
-ATOMIC_LONG_FETCH_OP(and, )
-ATOMIC_LONG_FETCH_OP(and, _relaxed)
-ATOMIC_LONG_FETCH_OP(and, _acquire)
-ATOMIC_LONG_FETCH_OP(and, _release)
-ATOMIC_LONG_FETCH_OP(andnot, )
-ATOMIC_LONG_FETCH_OP(andnot, _relaxed)
-ATOMIC_LONG_FETCH_OP(andnot, _acquire)
-ATOMIC_LONG_FETCH_OP(andnot, _release)
-ATOMIC_LONG_FETCH_OP(or, )
-ATOMIC_LONG_FETCH_OP(or, _relaxed)
-ATOMIC_LONG_FETCH_OP(or, _acquire)
-ATOMIC_LONG_FETCH_OP(or, _release)
-ATOMIC_LONG_FETCH_OP(xor, )
-ATOMIC_LONG_FETCH_OP(xor, _relaxed)
-ATOMIC_LONG_FETCH_OP(xor, _acquire)
-ATOMIC_LONG_FETCH_OP(xor, _release)
+static inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+       return atomic_fetch_or_release(i, v);
+}
 
-#undef ATOMIC_LONG_FETCH_OP
+static inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+       return atomic_fetch_or_relaxed(i, v);
+}
 
-#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo)                                   \
-static inline long                                                     \
-atomic_long_fetch_##op##mo(atomic_long_t *l)                           \
-{                                                                      \
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
-                                                                       \
-       return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v);               \
+static inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+       atomic_xor(i, v);
 }
 
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc,)
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed)
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire)
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec,)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release)
+static inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+       return atomic_fetch_xor(i, v);
+}
 
-#undef ATOMIC_LONG_FETCH_INC_DEC_OP
+static inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+       return atomic_fetch_xor_acquire(i, v);
+}
 
-#define ATOMIC_LONG_OP(op)                                             \
-static __always_inline void                                            \
-atomic_long_##op(long i, atomic_long_t *l)                             \
-{                                                                      \
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
-                                                                       \
-       ATOMIC_LONG_PFX(_##op)(i, v);                                   \
+static inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+       return atomic_fetch_xor_release(i, v);
 }
 
-ATOMIC_LONG_OP(add)
-ATOMIC_LONG_OP(sub)
-ATOMIC_LONG_OP(and)
-ATOMIC_LONG_OP(andnot)
-ATOMIC_LONG_OP(or)
-ATOMIC_LONG_OP(xor)
+static inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+       return atomic_fetch_xor_relaxed(i, v);
+}
 
-#undef ATOMIC_LONG_OP
+static inline long
+atomic_long_xchg(atomic_long_t *v, long i)
+{
+       return atomic_xchg(v, i);
+}
 
-static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+static inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long i)
 {
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+       return atomic_xchg_acquire(v, i);
+}
 
-       return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
+static inline long
+atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+       return atomic_xchg_release(v, i);
 }
 
-static inline int atomic_long_dec_and_test(atomic_long_t *l)
+static inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long i)
 {
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+       return atomic_xchg_relaxed(v, i);
+}
 
-       return ATOMIC_LONG_PFX(_dec_and_test)(v);
+static inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+       return atomic_cmpxchg(v, old, new);
 }
 
-static inline int atomic_long_inc_and_test(atomic_long_t *l)
+static inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
 {
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+       return atomic_cmpxchg_acquire(v, old, new);
+}
 
-       return ATOMIC_LONG_PFX(_inc_and_test)(v);
+static inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+       return atomic_cmpxchg_release(v, old, new);
 }
 
-static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+static inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
 {
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+       return atomic_cmpxchg_relaxed(v, old, new);
+}
 
-       return ATOMIC_LONG_PFX(_add_negative)(i, v);
+static inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+       return atomic_try_cmpxchg(v, (int *)old, new);
 }
 
-#define ATOMIC_LONG_INC_DEC_OP(op, mo)                                 \
-static inline long                                                     \
-atomic_long_##op##_return##mo(atomic_long_t *l)                                \
-{                                                                      \
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
-                                                                       \
-       return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v);            \
+static inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+       return atomic_try_cmpxchg_acquire(v, (int *)old, new);
 }
-ATOMIC_LONG_INC_DEC_OP(inc,)
-ATOMIC_LONG_INC_DEC_OP(inc, _relaxed)
-ATOMIC_LONG_INC_DEC_OP(inc, _acquire)
-ATOMIC_LONG_INC_DEC_OP(inc, _release)
-ATOMIC_LONG_INC_DEC_OP(dec,)
-ATOMIC_LONG_INC_DEC_OP(dec, _relaxed)
-ATOMIC_LONG_INC_DEC_OP(dec, _acquire)
-ATOMIC_LONG_INC_DEC_OP(dec, _release)
 
-#undef ATOMIC_LONG_INC_DEC_OP
+static inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+       return atomic_try_cmpxchg_release(v, (int *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+       return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+}
+
+static inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+       return atomic_sub_and_test(i, v);
+}
+
+static inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+       return atomic_dec_and_test(v);
+}
+
+static inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+       return atomic_inc_and_test(v);
+}
+
+static inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+       return atomic_add_negative(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+       return atomic_fetch_add_unless(v, a, u);
+}
+
+static inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+       return atomic_add_unless(v, a, u);
+}
 
-static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+static inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
 {
-       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+       return atomic_inc_not_zero(v);
+}
 
-       return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
+static inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+       return atomic_inc_unless_negative(v);
 }
 
-#define atomic_long_inc_not_zero(l) \
-       ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
+static inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+       return atomic_dec_unless_positive(v);
+}
 
-#define atomic_long_cond_read_relaxed(v, c) \
-       ATOMIC_LONG_PFX(_cond_read_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (c))
-#define atomic_long_cond_read_acquire(v, c) \
-       ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))
+static inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+       return atomic_dec_if_positive(v);
+}
 
-#endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
+#endif /* CONFIG_64BIT */
+#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
new file mode 100644 (file)
index 0000000..1c02c01
--- /dev/null
@@ -0,0 +1,2294 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#ifndef xchg_relaxed
+#define xchg_relaxed           xchg
+#define xchg_acquire           xchg
+#define xchg_release           xchg
+#else /* xchg_relaxed */
+
+#ifndef xchg_acquire
+#define xchg_acquire(...) \
+       __atomic_op_acquire(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg_release
+#define xchg_release(...) \
+       __atomic_op_release(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg
+#define xchg(...) \
+       __atomic_op_fence(xchg, __VA_ARGS__)
+#endif
+
+#endif /* xchg_relaxed */
+
+#ifndef cmpxchg_relaxed
+#define cmpxchg_relaxed                cmpxchg
+#define cmpxchg_acquire                cmpxchg
+#define cmpxchg_release                cmpxchg
+#else /* cmpxchg_relaxed */
+
+#ifndef cmpxchg_acquire
+#define cmpxchg_acquire(...) \
+       __atomic_op_acquire(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg_release
+#define cmpxchg_release(...) \
+       __atomic_op_release(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg
+#define cmpxchg(...) \
+       __atomic_op_fence(cmpxchg, __VA_ARGS__)
+#endif
+
+#endif /* cmpxchg_relaxed */
+
+#ifndef cmpxchg64_relaxed
+#define cmpxchg64_relaxed              cmpxchg64
+#define cmpxchg64_acquire              cmpxchg64
+#define cmpxchg64_release              cmpxchg64
+#else /* cmpxchg64_relaxed */
+
+#ifndef cmpxchg64_acquire
+#define cmpxchg64_acquire(...) \
+       __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64_release
+#define cmpxchg64_release(...) \
+       __atomic_op_release(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64
+#define cmpxchg64(...) \
+       __atomic_op_fence(cmpxchg64, __VA_ARGS__)
+#endif
+
+#endif /* cmpxchg64_relaxed */
+
+#ifndef atomic_read_acquire
+static inline int
+atomic_read_acquire(const atomic_t *v)
+{
+       return smp_load_acquire(&(v)->counter);
+}
+#define atomic_read_acquire atomic_read_acquire
+#endif
+
+#ifndef atomic_set_release
+static inline void
+atomic_set_release(atomic_t *v, int i)
+{
+       smp_store_release(&(v)->counter, i);
+}
+#define atomic_set_release atomic_set_release
+#endif
+
+#ifndef atomic_add_return_relaxed
+#define atomic_add_return_acquire atomic_add_return
+#define atomic_add_return_release atomic_add_return
+#define atomic_add_return_relaxed atomic_add_return
+#else /* atomic_add_return_relaxed */
+
+#ifndef atomic_add_return_acquire
+static inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+       int ret = atomic_add_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_add_return_acquire atomic_add_return_acquire
+#endif
+
+#ifndef atomic_add_return_release
+static inline int
+atomic_add_return_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_add_return_relaxed(i, v);
+}
+#define atomic_add_return_release atomic_add_return_release
+#endif
+
+#ifndef atomic_add_return
+static inline int
+atomic_add_return(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_add_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_add_return atomic_add_return
+#endif
+
+#endif /* atomic_add_return_relaxed */
+
+#ifndef atomic_fetch_add_relaxed
+#define atomic_fetch_add_acquire atomic_fetch_add
+#define atomic_fetch_add_release atomic_fetch_add
+#define atomic_fetch_add_relaxed atomic_fetch_add
+#else /* atomic_fetch_add_relaxed */
+
+#ifndef atomic_fetch_add_acquire
+static inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+       int ret = atomic_fetch_add_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_fetch_add_acquire atomic_fetch_add_acquire
+#endif
+
+#ifndef atomic_fetch_add_release
+static inline int
+atomic_fetch_add_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_fetch_add_relaxed(i, v);
+}
+#define atomic_fetch_add_release atomic_fetch_add_release
+#endif
+
+#ifndef atomic_fetch_add
+static inline int
+atomic_fetch_add(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_fetch_add_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_fetch_add atomic_fetch_add
+#endif
+
+#endif /* atomic_fetch_add_relaxed */
+
+#ifndef atomic_sub_return_relaxed
+#define atomic_sub_return_acquire atomic_sub_return
+#define atomic_sub_return_release atomic_sub_return
+#define atomic_sub_return_relaxed atomic_sub_return
+#else /* atomic_sub_return_relaxed */
+
+#ifndef atomic_sub_return_acquire
+static inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+       int ret = atomic_sub_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_sub_return_acquire atomic_sub_return_acquire
+#endif
+
+#ifndef atomic_sub_return_release
+static inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_sub_return_relaxed(i, v);
+}
+#define atomic_sub_return_release atomic_sub_return_release
+#endif
+
+#ifndef atomic_sub_return
+static inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_sub_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_sub_return atomic_sub_return
+#endif
+
+#endif /* atomic_sub_return_relaxed */
+
+#ifndef atomic_fetch_sub_relaxed
+#define atomic_fetch_sub_acquire atomic_fetch_sub
+#define atomic_fetch_sub_release atomic_fetch_sub
+#define atomic_fetch_sub_relaxed atomic_fetch_sub
+#else /* atomic_fetch_sub_relaxed */
+
+#ifndef atomic_fetch_sub_acquire
+static inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+       int ret = atomic_fetch_sub_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
+#endif
+
+#ifndef atomic_fetch_sub_release
+static inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_fetch_sub_relaxed(i, v);
+}
+#define atomic_fetch_sub_release atomic_fetch_sub_release
+#endif
+
+#ifndef atomic_fetch_sub
+static inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_fetch_sub_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_fetch_sub atomic_fetch_sub
+#endif
+
+#endif /* atomic_fetch_sub_relaxed */
+
+#ifndef atomic_inc
+static inline void
+atomic_inc(atomic_t *v)
+{
+       atomic_add(1, v);
+}
+#define atomic_inc atomic_inc
+#endif
+
+#ifndef atomic_inc_return_relaxed
+#ifdef atomic_inc_return
+#define atomic_inc_return_acquire atomic_inc_return
+#define atomic_inc_return_release atomic_inc_return
+#define atomic_inc_return_relaxed atomic_inc_return
+#endif /* atomic_inc_return */
+
+#ifndef atomic_inc_return
+static inline int
+atomic_inc_return(atomic_t *v)
+{
+       return atomic_add_return(1, v);
+}
+#define atomic_inc_return atomic_inc_return
+#endif
+
+#ifndef atomic_inc_return_acquire
+static inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+       return atomic_add_return_acquire(1, v);
+}
+#define atomic_inc_return_acquire atomic_inc_return_acquire
+#endif
+
+#ifndef atomic_inc_return_release
+static inline int
+atomic_inc_return_release(atomic_t *v)
+{
+       return atomic_add_return_release(1, v);
+}
+#define atomic_inc_return_release atomic_inc_return_release
+#endif
+
+#ifndef atomic_inc_return_relaxed
+static inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+       return atomic_add_return_relaxed(1, v);
+}
+#define atomic_inc_return_relaxed atomic_inc_return_relaxed
+#endif
+
+#else /* atomic_inc_return_relaxed */
+
+#ifndef atomic_inc_return_acquire
+static inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+       int ret = atomic_inc_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_inc_return_acquire atomic_inc_return_acquire
+#endif
+
+#ifndef atomic_inc_return_release
+static inline int
+atomic_inc_return_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_inc_return_relaxed(v);
+}
+#define atomic_inc_return_release atomic_inc_return_release
+#endif
+
+#ifndef atomic_inc_return
+static inline int
+atomic_inc_return(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_inc_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_inc_return atomic_inc_return
+#endif
+
+#endif /* atomic_inc_return_relaxed */
+
+#ifndef atomic_fetch_inc_relaxed
+#ifdef atomic_fetch_inc
+#define atomic_fetch_inc_acquire atomic_fetch_inc
+#define atomic_fetch_inc_release atomic_fetch_inc
+#define atomic_fetch_inc_relaxed atomic_fetch_inc
+#endif /* atomic_fetch_inc */
+
+#ifndef atomic_fetch_inc
+static inline int
+atomic_fetch_inc(atomic_t *v)
+{
+       return atomic_fetch_add(1, v);
+}
+#define atomic_fetch_inc atomic_fetch_inc
+#endif
+
+#ifndef atomic_fetch_inc_acquire
+static inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+       return atomic_fetch_add_acquire(1, v);
+}
+#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
+#endif
+
+#ifndef atomic_fetch_inc_release
+static inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+       return atomic_fetch_add_release(1, v);
+}
+#define atomic_fetch_inc_release atomic_fetch_inc_release
+#endif
+
+#ifndef atomic_fetch_inc_relaxed
+static inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+       return atomic_fetch_add_relaxed(1, v);
+}
+#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
+#endif
+
+#else /* atomic_fetch_inc_relaxed */
+
+#ifndef atomic_fetch_inc_acquire
+static inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+       int ret = atomic_fetch_inc_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
+#endif
+
+#ifndef atomic_fetch_inc_release
+static inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_fetch_inc_relaxed(v);
+}
+#define atomic_fetch_inc_release atomic_fetch_inc_release
+#endif
+
+#ifndef atomic_fetch_inc
+static inline int
+atomic_fetch_inc(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_fetch_inc_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_fetch_inc atomic_fetch_inc
+#endif
+
+#endif /* atomic_fetch_inc_relaxed */
+
+#ifndef atomic_dec
+static inline void
+atomic_dec(atomic_t *v)
+{
+       atomic_sub(1, v);
+}
+#define atomic_dec atomic_dec
+#endif
+
+#ifndef atomic_dec_return_relaxed
+#ifdef atomic_dec_return
+#define atomic_dec_return_acquire atomic_dec_return
+#define atomic_dec_return_release atomic_dec_return
+#define atomic_dec_return_relaxed atomic_dec_return
+#endif /* atomic_dec_return */
+
+#ifndef atomic_dec_return
+static inline int
+atomic_dec_return(atomic_t *v)
+{
+       return atomic_sub_return(1, v);
+}
+#define atomic_dec_return atomic_dec_return
+#endif
+
+#ifndef atomic_dec_return_acquire
+static inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+       return atomic_sub_return_acquire(1, v);
+}
+#define atomic_dec_return_acquire atomic_dec_return_acquire
+#endif
+
+#ifndef atomic_dec_return_release
+static inline int
+atomic_dec_return_release(atomic_t *v)
+{
+       return atomic_sub_return_release(1, v);
+}
+#define atomic_dec_return_release atomic_dec_return_release
+#endif
+
+#ifndef atomic_dec_return_relaxed
+static inline int
+atomic_dec_return_relaxed(atomic_t *v)
+{
+       return atomic_sub_return_relaxed(1, v);
+}
+#define atomic_dec_return_relaxed atomic_dec_return_relaxed
+#endif
+
+#else /* atomic_dec_return_relaxed */
+
+#ifndef atomic_dec_return_acquire
+static inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+       int ret = atomic_dec_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_dec_return_acquire atomic_dec_return_acquire
+#endif
+
+#ifndef atomic_dec_return_release
+static inline int
+atomic_dec_return_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_dec_return_relaxed(v);
+}
+#define atomic_dec_return_release atomic_dec_return_release
+#endif
+
+#ifndef atomic_dec_return
+static inline int
+atomic_dec_return(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_dec_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_dec_return atomic_dec_return
+#endif
+
+#endif /* atomic_dec_return_relaxed */
+
+#ifndef atomic_fetch_dec_relaxed
+#ifdef atomic_fetch_dec
+#define atomic_fetch_dec_acquire atomic_fetch_dec
+#define atomic_fetch_dec_release atomic_fetch_dec
+#define atomic_fetch_dec_relaxed atomic_fetch_dec
+#endif /* atomic_fetch_dec */
+
+#ifndef atomic_fetch_dec
+static inline int
+atomic_fetch_dec(atomic_t *v)
+{
+       return atomic_fetch_sub(1, v);
+}
+#define atomic_fetch_dec atomic_fetch_dec
+#endif
+
+#ifndef atomic_fetch_dec_acquire
+static inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+       return atomic_fetch_sub_acquire(1, v);
+}
+#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
+#endif
+
+#ifndef atomic_fetch_dec_release
+static inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+       return atomic_fetch_sub_release(1, v);
+}
+#define atomic_fetch_dec_release atomic_fetch_dec_release
+#endif
+
+#ifndef atomic_fetch_dec_relaxed
+static inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+       return atomic_fetch_sub_relaxed(1, v);
+}
+#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
+#endif
+
+#else /* atomic_fetch_dec_relaxed */
+
+#ifndef atomic_fetch_dec_acquire
+static inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+       int ret = atomic_fetch_dec_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
+#endif
+
+#ifndef atomic_fetch_dec_release
+static inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_fetch_dec_relaxed(v);
+}
+#define atomic_fetch_dec_release atomic_fetch_dec_release
+#endif
+
+#ifndef atomic_fetch_dec
+static inline int
+atomic_fetch_dec(atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_fetch_dec_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_fetch_dec atomic_fetch_dec
+#endif
+
+#endif /* atomic_fetch_dec_relaxed */
+
+#ifndef atomic_fetch_and_relaxed
+#define atomic_fetch_and_acquire atomic_fetch_and
+#define atomic_fetch_and_release atomic_fetch_and
+#define atomic_fetch_and_relaxed atomic_fetch_and
+#else /* atomic_fetch_and_relaxed */
+
+#ifndef atomic_fetch_and_acquire
+static inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+       int ret = atomic_fetch_and_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_fetch_and_acquire atomic_fetch_and_acquire
+#endif
+
+#ifndef atomic_fetch_and_release
+static inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_fetch_and_relaxed(i, v);
+}
+#define atomic_fetch_and_release atomic_fetch_and_release
+#endif
+
+#ifndef atomic_fetch_and
+static inline int
+atomic_fetch_and(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_fetch_and_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_fetch_and atomic_fetch_and
+#endif
+
+#endif /* atomic_fetch_and_relaxed */
+
+#ifndef atomic_andnot
+static inline void
+atomic_andnot(int i, atomic_t *v)
+{
+       atomic_and(~i, v);
+}
+#define atomic_andnot atomic_andnot
+#endif
+
+#ifndef atomic_fetch_andnot_relaxed
+#ifdef atomic_fetch_andnot
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot
+#define atomic_fetch_andnot_release atomic_fetch_andnot
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
+#endif /* atomic_fetch_andnot */
+
+#ifndef atomic_fetch_andnot
+static inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+       return atomic_fetch_and(~i, v);
+}
+#define atomic_fetch_andnot atomic_fetch_andnot
+#endif
+
+#ifndef atomic_fetch_andnot_acquire
+static inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+       return atomic_fetch_and_acquire(~i, v);
+}
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
+#endif
+
+#ifndef atomic_fetch_andnot_release
+static inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+       return atomic_fetch_and_release(~i, v);
+}
+#define atomic_fetch_andnot_release atomic_fetch_andnot_release
+#endif
+
+#ifndef atomic_fetch_andnot_relaxed
+static inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+       return atomic_fetch_and_relaxed(~i, v);
+}
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+#endif
+
+#else /* atomic_fetch_andnot_relaxed */
+
+#ifndef atomic_fetch_andnot_acquire
+static inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+       int ret = atomic_fetch_andnot_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
+#endif
+
+#ifndef atomic_fetch_andnot_release
+static inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_fetch_andnot_relaxed(i, v);
+}
+#define atomic_fetch_andnot_release atomic_fetch_andnot_release
+#endif
+
+#ifndef atomic_fetch_andnot
+static inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_fetch_andnot_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_fetch_andnot atomic_fetch_andnot
+#endif
+
+#endif /* atomic_fetch_andnot_relaxed */
+
+#ifndef atomic_fetch_or_relaxed
+#define atomic_fetch_or_acquire atomic_fetch_or
+#define atomic_fetch_or_release atomic_fetch_or
+#define atomic_fetch_or_relaxed atomic_fetch_or
+#else /* atomic_fetch_or_relaxed */
+
+#ifndef atomic_fetch_or_acquire
+static inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+       int ret = atomic_fetch_or_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_fetch_or_acquire atomic_fetch_or_acquire
+#endif
+
+#ifndef atomic_fetch_or_release
+static inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_fetch_or_relaxed(i, v);
+}
+#define atomic_fetch_or_release atomic_fetch_or_release
+#endif
+
+#ifndef atomic_fetch_or
+static inline int
+atomic_fetch_or(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_fetch_or_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_fetch_or atomic_fetch_or
+#endif
+
+#endif /* atomic_fetch_or_relaxed */
+
+#ifndef atomic_fetch_xor_relaxed
+#define atomic_fetch_xor_acquire atomic_fetch_xor
+#define atomic_fetch_xor_release atomic_fetch_xor
+#define atomic_fetch_xor_relaxed atomic_fetch_xor
+#else /* atomic_fetch_xor_relaxed */
+
+#ifndef atomic_fetch_xor_acquire
+static inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+       int ret = atomic_fetch_xor_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
+#endif
+
+#ifndef atomic_fetch_xor_release
+static inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
+{
+       __atomic_release_fence();
+       return atomic_fetch_xor_relaxed(i, v);
+}
+#define atomic_fetch_xor_release atomic_fetch_xor_release
+#endif
+
+#ifndef atomic_fetch_xor
+static inline int
+atomic_fetch_xor(int i, atomic_t *v)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_fetch_xor_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_fetch_xor atomic_fetch_xor
+#endif
+
+#endif /* atomic_fetch_xor_relaxed */
+
+#ifndef atomic_xchg_relaxed
+#define atomic_xchg_acquire atomic_xchg
+#define atomic_xchg_release atomic_xchg
+#define atomic_xchg_relaxed atomic_xchg
+#else /* atomic_xchg_relaxed */
+
+#ifndef atomic_xchg_acquire
+static inline int
+atomic_xchg_acquire(atomic_t *v, int i)
+{
+       int ret = atomic_xchg_relaxed(v, i);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_xchg_acquire atomic_xchg_acquire
+#endif
+
+#ifndef atomic_xchg_release
+static inline int
+atomic_xchg_release(atomic_t *v, int i)
+{
+       __atomic_release_fence();
+       return atomic_xchg_relaxed(v, i);
+}
+#define atomic_xchg_release atomic_xchg_release
+#endif
+
+#ifndef atomic_xchg
+static inline int
+atomic_xchg(atomic_t *v, int i)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_xchg_relaxed(v, i);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_xchg atomic_xchg
+#endif
+
+#endif /* atomic_xchg_relaxed */
+
+#ifndef atomic_cmpxchg_relaxed
+#define atomic_cmpxchg_acquire atomic_cmpxchg
+#define atomic_cmpxchg_release atomic_cmpxchg
+#define atomic_cmpxchg_relaxed atomic_cmpxchg
+#else /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic_cmpxchg_acquire
+static inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+       int ret = atomic_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
+#endif
+
+#ifndef atomic_cmpxchg_release
+static inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+       __atomic_release_fence();
+       return atomic_cmpxchg_relaxed(v, old, new);
+}
+#define atomic_cmpxchg_release atomic_cmpxchg_release
+#endif
+
+#ifndef atomic_cmpxchg
+static inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       int ret;
+       __atomic_pre_full_fence();
+       ret = atomic_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_cmpxchg atomic_cmpxchg
+#endif
+
+#endif /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic_try_cmpxchg_relaxed
+#ifdef atomic_try_cmpxchg
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg
+#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
+#endif /* atomic_try_cmpxchg */
+
+#ifndef atomic_try_cmpxchg
+static inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = atomic_cmpxchg(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+#endif
+
+#ifndef atomic_try_cmpxchg_acquire
+static inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = atomic_cmpxchg_acquire(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef atomic_try_cmpxchg_release
+static inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = atomic_cmpxchg_release(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
+#endif
+
+#ifndef atomic_try_cmpxchg_relaxed
+static inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+       int r, o = *old;
+       r = atomic_cmpxchg_relaxed(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
+#endif
+
+#else /* atomic_try_cmpxchg_relaxed */
+
+#ifndef atomic_try_cmpxchg_acquire
+static inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+       bool ret = atomic_try_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
+#endif
+
+#ifndef atomic_try_cmpxchg_release
+static inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+       __atomic_release_fence();
+       return atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
+#endif
+
+#ifndef atomic_try_cmpxchg
+static inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+       bool ret;
+       __atomic_pre_full_fence();
+       ret = atomic_try_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+#endif
+
+#endif /* atomic_try_cmpxchg_relaxed */
+
+#ifndef atomic_sub_and_test
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+atomic_sub_and_test(int i, atomic_t *v)
+{
+       return atomic_sub_return(i, v) == 0;
+}
+#define atomic_sub_and_test atomic_sub_and_test
+#endif
+
+#ifndef atomic_dec_and_test
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline bool
+atomic_dec_and_test(atomic_t *v)
+{
+       return atomic_dec_return(v) == 0;
+}
+#define atomic_dec_and_test atomic_dec_and_test
+#endif
+
+#ifndef atomic_inc_and_test
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+atomic_inc_and_test(atomic_t *v)
+{
+       return atomic_inc_return(v) == 0;
+}
+#define atomic_inc_and_test atomic_inc_and_test
+#endif
+
+#ifndef atomic_add_negative
+/**
+ * atomic_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline bool
+atomic_add_negative(int i, atomic_t *v)
+{
+       return atomic_add_return(i, v) < 0;
+}
+#define atomic_add_negative atomic_add_negative
+#endif
+
+#ifndef atomic_fetch_add_unless
+/**
+ * atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+       int c = atomic_read(v);
+
+       do {
+               if (unlikely(c == u))
+                       break;
+       } while (!atomic_try_cmpxchg(v, &c, c + a));
+
+       return c;
+}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+#endif
+
+#ifndef atomic_add_unless
+/**
+ * atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+       return atomic_fetch_add_unless(v, a, u) != u;
+}
+#define atomic_add_unless atomic_add_unless
+#endif
+
+#ifndef atomic_inc_not_zero
+/**
+ * atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+       return atomic_add_unless(v, 1, 0);
+}
+#define atomic_inc_not_zero atomic_inc_not_zero
+#endif
+
+#ifndef atomic_inc_unless_negative
+static inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+       int c = atomic_read(v);
+
+       do {
+               if (unlikely(c < 0))
+                       return false;
+       } while (!atomic_try_cmpxchg(v, &c, c + 1));
+
+       return true;
+}
+#define atomic_inc_unless_negative atomic_inc_unless_negative
+#endif
+
+#ifndef atomic_dec_unless_positive
+static inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+       int c = atomic_read(v);
+
+       do {
+               if (unlikely(c > 0))
+                       return false;
+       } while (!atomic_try_cmpxchg(v, &c, c - 1));
+
+       return true;
+}
+#define atomic_dec_unless_positive atomic_dec_unless_positive
+#endif
+
+#ifndef atomic_dec_if_positive
+static inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+       int dec, c = atomic_read(v);
+
+       do {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+       } while (!atomic_try_cmpxchg(v, &c, dec));
+
+       return dec;
+}
+#define atomic_dec_if_positive atomic_dec_if_positive
+#endif
+
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+#ifndef atomic64_read_acquire
+static inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+       return smp_load_acquire(&(v)->counter);
+}
+#define atomic64_read_acquire atomic64_read_acquire
+#endif
+
+#ifndef atomic64_set_release
+static inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+       smp_store_release(&(v)->counter, i);
+}
+#define atomic64_set_release atomic64_set_release
+#endif
+
+#ifndef atomic64_add_return_relaxed
+#define atomic64_add_return_acquire atomic64_add_return
+#define atomic64_add_return_release atomic64_add_return
+#define atomic64_add_return_relaxed atomic64_add_return
+#else /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_add_return_acquire
+static inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = atomic64_add_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_add_return_acquire atomic64_add_return_acquire
+#endif
+
+#ifndef atomic64_add_return_release
+static inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_add_return_relaxed(i, v);
+}
+#define atomic64_add_return_release atomic64_add_return_release
+#endif
+
+#ifndef atomic64_add_return
+static inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_add_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_add_return atomic64_add_return
+#endif
+
+#endif /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_fetch_add_relaxed
+#define atomic64_fetch_add_acquire atomic64_fetch_add
+#define atomic64_fetch_add_release atomic64_fetch_add
+#define atomic64_fetch_add_relaxed atomic64_fetch_add
+#else /* atomic64_fetch_add_relaxed */
+
+#ifndef atomic64_fetch_add_acquire
+static inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = atomic64_fetch_add_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
+#endif
+
+#ifndef atomic64_fetch_add_release
+static inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_fetch_add_relaxed(i, v);
+}
+#define atomic64_fetch_add_release atomic64_fetch_add_release
+#endif
+
+#ifndef atomic64_fetch_add
+static inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_fetch_add_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_fetch_add atomic64_fetch_add
+#endif
+
+#endif /* atomic64_fetch_add_relaxed */
+
+#ifndef atomic64_sub_return_relaxed
+#define atomic64_sub_return_acquire atomic64_sub_return
+#define atomic64_sub_return_release atomic64_sub_return
+#define atomic64_sub_return_relaxed atomic64_sub_return
+#else /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_sub_return_acquire
+static inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = atomic64_sub_return_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_sub_return_acquire atomic64_sub_return_acquire
+#endif
+
+#ifndef atomic64_sub_return_release
+static inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_sub_return_relaxed(i, v);
+}
+#define atomic64_sub_return_release atomic64_sub_return_release
+#endif
+
+#ifndef atomic64_sub_return
+static inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_sub_return_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_sub_return atomic64_sub_return
+#endif
+
+#endif /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_fetch_sub_relaxed
+#define atomic64_fetch_sub_acquire atomic64_fetch_sub
+#define atomic64_fetch_sub_release atomic64_fetch_sub
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
+#else /* atomic64_fetch_sub_relaxed */
+
+#ifndef atomic64_fetch_sub_acquire
+static inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = atomic64_fetch_sub_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
+#endif
+
+#ifndef atomic64_fetch_sub_release
+static inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_fetch_sub_relaxed(i, v);
+}
+#define atomic64_fetch_sub_release atomic64_fetch_sub_release
+#endif
+
+#ifndef atomic64_fetch_sub
+static inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_fetch_sub_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_fetch_sub atomic64_fetch_sub
+#endif
+
+#endif /* atomic64_fetch_sub_relaxed */
+
+#ifndef atomic64_inc
+static inline void
+atomic64_inc(atomic64_t *v)
+{
+       atomic64_add(1, v);
+}
+#define atomic64_inc atomic64_inc
+#endif
+
+#ifndef atomic64_inc_return_relaxed
+#ifdef atomic64_inc_return
+#define atomic64_inc_return_acquire atomic64_inc_return
+#define atomic64_inc_return_release atomic64_inc_return
+#define atomic64_inc_return_relaxed atomic64_inc_return
+#endif /* atomic64_inc_return */
+
+#ifndef atomic64_inc_return
+static inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+       return atomic64_add_return(1, v);
+}
+#define atomic64_inc_return atomic64_inc_return
+#endif
+
+#ifndef atomic64_inc_return_acquire
+static inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+       return atomic64_add_return_acquire(1, v);
+}
+#define atomic64_inc_return_acquire atomic64_inc_return_acquire
+#endif
+
+#ifndef atomic64_inc_return_release
+static inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+       return atomic64_add_return_release(1, v);
+}
+#define atomic64_inc_return_release atomic64_inc_return_release
+#endif
+
+#ifndef atomic64_inc_return_relaxed
+static inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
+{
+       return atomic64_add_return_relaxed(1, v);
+}
+#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+#endif
+
+#else /* atomic64_inc_return_relaxed */
+
+#ifndef atomic64_inc_return_acquire
+static inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+       s64 ret = atomic64_inc_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_inc_return_acquire atomic64_inc_return_acquire
+#endif
+
+#ifndef atomic64_inc_return_release
+static inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_inc_return_relaxed(v);
+}
+#define atomic64_inc_return_release atomic64_inc_return_release
+#endif
+
+#ifndef atomic64_inc_return
+static inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_inc_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_inc_return atomic64_inc_return
+#endif
+
+#endif /* atomic64_inc_return_relaxed */
+
+#ifndef atomic64_fetch_inc_relaxed
+#ifdef atomic64_fetch_inc
+#define atomic64_fetch_inc_acquire atomic64_fetch_inc
+#define atomic64_fetch_inc_release atomic64_fetch_inc
+#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
+#endif /* atomic64_fetch_inc */
+
+#ifndef atomic64_fetch_inc
+static inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+       return atomic64_fetch_add(1, v);
+}
+#define atomic64_fetch_inc atomic64_fetch_inc
+#endif
+
+#ifndef atomic64_fetch_inc_acquire
+static inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+       return atomic64_fetch_add_acquire(1, v);
+}
+#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
+#endif
+
+#ifndef atomic64_fetch_inc_release
+static inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+       return atomic64_fetch_add_release(1, v);
+}
+#define atomic64_fetch_inc_release atomic64_fetch_inc_release
+#endif
+
+#ifndef atomic64_fetch_inc_relaxed
+static inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+       return atomic64_fetch_add_relaxed(1, v);
+}
+#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
+#endif
+
+#else /* atomic64_fetch_inc_relaxed */
+
+#ifndef atomic64_fetch_inc_acquire
+static inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+       s64 ret = atomic64_fetch_inc_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
+#endif
+
+#ifndef atomic64_fetch_inc_release
+static inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_fetch_inc_relaxed(v);
+}
+#define atomic64_fetch_inc_release atomic64_fetch_inc_release
+#endif
+
+#ifndef atomic64_fetch_inc
+static inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_fetch_inc_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_fetch_inc atomic64_fetch_inc
+#endif
+
+#endif /* atomic64_fetch_inc_relaxed */
+
+#ifndef atomic64_dec
+static inline void
+atomic64_dec(atomic64_t *v)
+{
+       atomic64_sub(1, v);
+}
+#define atomic64_dec atomic64_dec
+#endif
+
+#ifndef atomic64_dec_return_relaxed
+#ifdef atomic64_dec_return
+#define atomic64_dec_return_acquire atomic64_dec_return
+#define atomic64_dec_return_release atomic64_dec_return
+#define atomic64_dec_return_relaxed atomic64_dec_return
+#endif /* atomic64_dec_return */
+
+#ifndef atomic64_dec_return
+static inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+       return atomic64_sub_return(1, v);
+}
+#define atomic64_dec_return atomic64_dec_return
+#endif
+
+#ifndef atomic64_dec_return_acquire
+static inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+       return atomic64_sub_return_acquire(1, v);
+}
+#define atomic64_dec_return_acquire atomic64_dec_return_acquire
+#endif
+
+#ifndef atomic64_dec_return_release
+static inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+       return atomic64_sub_return_release(1, v);
+}
+#define atomic64_dec_return_release atomic64_dec_return_release
+#endif
+
+#ifndef atomic64_dec_return_relaxed
+static inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
+{
+       return atomic64_sub_return_relaxed(1, v);
+}
+#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#endif
+
+#else /* atomic64_dec_return_relaxed */
+
+#ifndef atomic64_dec_return_acquire
+static inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+       s64 ret = atomic64_dec_return_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_dec_return_acquire atomic64_dec_return_acquire
+#endif
+
+#ifndef atomic64_dec_return_release
+static inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_dec_return_relaxed(v);
+}
+#define atomic64_dec_return_release atomic64_dec_return_release
+#endif
+
+#ifndef atomic64_dec_return
+static inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_dec_return_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_dec_return atomic64_dec_return
+#endif
+
+#endif /* atomic64_dec_return_relaxed */
+
+#ifndef atomic64_fetch_dec_relaxed
+#ifdef atomic64_fetch_dec
+#define atomic64_fetch_dec_acquire atomic64_fetch_dec
+#define atomic64_fetch_dec_release atomic64_fetch_dec
+#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
+#endif /* atomic64_fetch_dec */
+
+#ifndef atomic64_fetch_dec
+static inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+       return atomic64_fetch_sub(1, v);
+}
+#define atomic64_fetch_dec atomic64_fetch_dec
+#endif
+
+#ifndef atomic64_fetch_dec_acquire
+static inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+       return atomic64_fetch_sub_acquire(1, v);
+}
+#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
+#endif
+
+#ifndef atomic64_fetch_dec_release
+static inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+       return atomic64_fetch_sub_release(1, v);
+}
+#define atomic64_fetch_dec_release atomic64_fetch_dec_release
+#endif
+
+#ifndef atomic64_fetch_dec_relaxed
+static inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+       return atomic64_fetch_sub_relaxed(1, v);
+}
+#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
+#endif
+
+#else /* atomic64_fetch_dec_relaxed */
+
+#ifndef atomic64_fetch_dec_acquire
+static inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+       s64 ret = atomic64_fetch_dec_relaxed(v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
+#endif
+
+#ifndef atomic64_fetch_dec_release
+static inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_fetch_dec_relaxed(v);
+}
+#define atomic64_fetch_dec_release atomic64_fetch_dec_release
+#endif
+
+#ifndef atomic64_fetch_dec
+static inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_fetch_dec_relaxed(v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_fetch_dec atomic64_fetch_dec
+#endif
+
+#endif /* atomic64_fetch_dec_relaxed */
+
+#ifndef atomic64_fetch_and_relaxed
+#define atomic64_fetch_and_acquire atomic64_fetch_and
+#define atomic64_fetch_and_release atomic64_fetch_and
+#define atomic64_fetch_and_relaxed atomic64_fetch_and
+#else /* atomic64_fetch_and_relaxed */
+
+#ifndef atomic64_fetch_and_acquire
+static inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = atomic64_fetch_and_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
+#endif
+
+#ifndef atomic64_fetch_and_release
+static inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_fetch_and_relaxed(i, v);
+}
+#define atomic64_fetch_and_release atomic64_fetch_and_release
+#endif
+
+#ifndef atomic64_fetch_and
+static inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_fetch_and_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_fetch_and atomic64_fetch_and
+#endif
+
+#endif /* atomic64_fetch_and_relaxed */
+
+#ifndef atomic64_andnot
+static inline void
+atomic64_andnot(s64 i, atomic64_t *v)
+{
+       atomic64_and(~i, v);
+}
+#define atomic64_andnot atomic64_andnot
+#endif
+
+#ifndef atomic64_fetch_andnot_relaxed
+#ifdef atomic64_fetch_andnot
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
+#endif /* atomic64_fetch_andnot */
+
+#ifndef atomic64_fetch_andnot
+static inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+       return atomic64_fetch_and(~i, v);
+}
+#define atomic64_fetch_andnot atomic64_fetch_andnot
+#endif
+
+#ifndef atomic64_fetch_andnot_acquire
+static inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+       return atomic64_fetch_and_acquire(~i, v);
+}
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef atomic64_fetch_andnot_release
+static inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+       return atomic64_fetch_and_release(~i, v);
+}
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
+#endif
+
+#ifndef atomic64_fetch_andnot_relaxed
+static inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+       return atomic64_fetch_and_relaxed(~i, v);
+}
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
+#endif
+
+#else /* atomic64_fetch_andnot_relaxed */
+
+#ifndef atomic64_fetch_andnot_acquire
+static inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = atomic64_fetch_andnot_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
+#endif
+
+#ifndef atomic64_fetch_andnot_release
+static inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_fetch_andnot_relaxed(i, v);
+}
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
+#endif
+
+#ifndef atomic64_fetch_andnot
+static inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_fetch_andnot_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_fetch_andnot atomic64_fetch_andnot
+#endif
+
+#endif /* atomic64_fetch_andnot_relaxed */
+
+#ifndef atomic64_fetch_or_relaxed
+#define atomic64_fetch_or_acquire atomic64_fetch_or
+#define atomic64_fetch_or_release atomic64_fetch_or
+#define atomic64_fetch_or_relaxed atomic64_fetch_or
+#else /* atomic64_fetch_or_relaxed */
+
+#ifndef atomic64_fetch_or_acquire
+static inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = atomic64_fetch_or_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
+#endif
+
+#ifndef atomic64_fetch_or_release
+static inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_fetch_or_relaxed(i, v);
+}
+#define atomic64_fetch_or_release atomic64_fetch_or_release
+#endif
+
+#ifndef atomic64_fetch_or
+static inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_fetch_or_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_fetch_or atomic64_fetch_or
+#endif
+
+#endif /* atomic64_fetch_or_relaxed */
+
+#ifndef atomic64_fetch_xor_relaxed
+#define atomic64_fetch_xor_acquire atomic64_fetch_xor
+#define atomic64_fetch_xor_release atomic64_fetch_xor
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
+#else /* atomic64_fetch_xor_relaxed */
+
+#ifndef atomic64_fetch_xor_acquire
+static inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+       s64 ret = atomic64_fetch_xor_relaxed(i, v);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
+#endif
+
+#ifndef atomic64_fetch_xor_release
+static inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+       __atomic_release_fence();
+       return atomic64_fetch_xor_relaxed(i, v);
+}
+#define atomic64_fetch_xor_release atomic64_fetch_xor_release
+#endif
+
+#ifndef atomic64_fetch_xor
+static inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_fetch_xor_relaxed(i, v);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_fetch_xor atomic64_fetch_xor
+#endif
+
+#endif /* atomic64_fetch_xor_relaxed */
+
+#ifndef atomic64_xchg_relaxed
+#define atomic64_xchg_acquire atomic64_xchg
+#define atomic64_xchg_release atomic64_xchg
+#define atomic64_xchg_relaxed atomic64_xchg
+#else /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_xchg_acquire
+static inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+       s64 ret = atomic64_xchg_relaxed(v, i);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_xchg_acquire atomic64_xchg_acquire
+#endif
+
+#ifndef atomic64_xchg_release
+static inline s64
+atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+       __atomic_release_fence();
+       return atomic64_xchg_relaxed(v, i);
+}
+#define atomic64_xchg_release atomic64_xchg_release
+#endif
+
+#ifndef atomic64_xchg
+static inline s64
+atomic64_xchg(atomic64_t *v, s64 i)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_xchg_relaxed(v, i);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_xchg atomic64_xchg
+#endif
+
+#endif /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_cmpxchg_relaxed
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg
+#define atomic64_cmpxchg_release atomic64_cmpxchg
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
+#else /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_cmpxchg_acquire
+static inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+       s64 ret = atomic64_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
+#endif
+
+#ifndef atomic64_cmpxchg_release
+static inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+       __atomic_release_fence();
+       return atomic64_cmpxchg_relaxed(v, old, new);
+}
+#define atomic64_cmpxchg_release atomic64_cmpxchg_release
+#endif
+
+#ifndef atomic64_cmpxchg
+static inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+       s64 ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_cmpxchg atomic64_cmpxchg
+#endif
+
+#endif /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_try_cmpxchg_relaxed
+#ifdef atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
+#endif /* atomic64_try_cmpxchg */
+
+#ifndef atomic64_try_cmpxchg
+static inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = atomic64_cmpxchg(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+#endif
+
+#ifndef atomic64_try_cmpxchg_acquire
+static inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = atomic64_cmpxchg_acquire(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef atomic64_try_cmpxchg_release
+static inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = atomic64_cmpxchg_release(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
+#endif
+
+#ifndef atomic64_try_cmpxchg_relaxed
+static inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+       s64 r, o = *old;
+       r = atomic64_cmpxchg_relaxed(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
+#endif
+
+#else /* atomic64_try_cmpxchg_relaxed */
+
+#ifndef atomic64_try_cmpxchg_acquire
+static inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+       bool ret = atomic64_try_cmpxchg_relaxed(v, old, new);
+       __atomic_acquire_fence();
+       return ret;
+}
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
+#endif
+
+#ifndef atomic64_try_cmpxchg_release
+static inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+       __atomic_release_fence();
+       return atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
+#endif
+
+#ifndef atomic64_try_cmpxchg
+static inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+       bool ret;
+       __atomic_pre_full_fence();
+       ret = atomic64_try_cmpxchg_relaxed(v, old, new);
+       __atomic_post_full_fence();
+       return ret;
+}
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+#endif
+
+#endif /* atomic64_try_cmpxchg_relaxed */
+
+#ifndef atomic64_sub_and_test
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+       return atomic64_sub_return(i, v) == 0;
+}
+#define atomic64_sub_and_test atomic64_sub_and_test
+#endif
+
+#ifndef atomic64_dec_and_test
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline bool
+atomic64_dec_and_test(atomic64_t *v)
+{
+       return atomic64_dec_return(v) == 0;
+}
+#define atomic64_dec_and_test atomic64_dec_and_test
+#endif
+
+#ifndef atomic64_inc_and_test
+/**
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+       return atomic64_inc_return(v) == 0;
+}
+#define atomic64_inc_and_test atomic64_inc_and_test
+#endif
+
+#ifndef atomic64_add_negative
+/**
+ * atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
+{
+       return atomic64_add_return(i, v) < 0;
+}
+#define atomic64_add_negative atomic64_add_negative
+#endif
+
+#ifndef atomic64_fetch_add_unless
+/**
+ * atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       s64 c = atomic64_read(v);
+
+       do {
+               if (unlikely(c == u))
+                       break;
+       } while (!atomic64_try_cmpxchg(v, &c, c + a));
+
+       return c;
+}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#endif
+
+#ifndef atomic64_add_unless
+/**
+ * atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+       return atomic64_fetch_add_unless(v, a, u) != u;
+}
+#define atomic64_add_unless atomic64_add_unless
+#endif
+
+#ifndef atomic64_inc_not_zero
+/**
+ * atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+       return atomic64_add_unless(v, 1, 0);
+}
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+#endif
+
+#ifndef atomic64_inc_unless_negative
+static inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+       s64 c = atomic64_read(v);
+
+       do {
+               if (unlikely(c < 0))
+                       return false;
+       } while (!atomic64_try_cmpxchg(v, &c, c + 1));
+
+       return true;
+}
+#define atomic64_inc_unless_negative atomic64_inc_unless_negative
+#endif
+
+#ifndef atomic64_dec_unless_positive
+static inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+       s64 c = atomic64_read(v);
+
+       do {
+               if (unlikely(c > 0))
+                       return false;
+       } while (!atomic64_try_cmpxchg(v, &c, c - 1));
+
+       return true;
+}
+#define atomic64_dec_unless_positive atomic64_dec_unless_positive
+#endif
+
+#ifndef atomic64_dec_if_positive
+static inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+       s64 dec, c = atomic64_read(v);
+
+       do {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+       } while (!atomic64_try_cmpxchg(v, &c, dec));
+
+       return dec;
+}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
+#endif
+
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
index 1e8e88b..4c0d009 100644 (file)
  * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
  */
 
-#ifndef atomic_read_acquire
-#define  atomic_read_acquire(v)                smp_load_acquire(&(v)->counter)
-#endif
-
-#ifndef atomic_set_release
-#define  atomic_set_release(v, i)      smp_store_release(&(v)->counter, (i))
-#endif
-
 /*
  * The idea here is to build acquire/release variants by adding explicit
  * barriers on top of the relaxed variant. In the case where the relaxed
        __ret;                                                          \
 })
 
-/* atomic_add_return_relaxed */
-#ifndef atomic_add_return_relaxed
-#define  atomic_add_return_relaxed     atomic_add_return
-#define  atomic_add_return_acquire     atomic_add_return
-#define  atomic_add_return_release     atomic_add_return
-
-#else /* atomic_add_return_relaxed */
-
-#ifndef atomic_add_return_acquire
-#define  atomic_add_return_acquire(...)                                        \
-       __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_add_return_release
-#define  atomic_add_return_release(...)                                        \
-       __atomic_op_release(atomic_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_add_return
-#define  atomic_add_return(...)                                                \
-       __atomic_op_fence(atomic_add_return, __VA_ARGS__)
-#endif
-#endif /* atomic_add_return_relaxed */
-
-#ifndef atomic_inc
-#define atomic_inc(v)                  atomic_add(1, (v))
-#endif
-
-/* atomic_inc_return_relaxed */
-#ifndef atomic_inc_return_relaxed
-
-#ifndef atomic_inc_return
-#define atomic_inc_return(v)           atomic_add_return(1, (v))
-#define atomic_inc_return_relaxed(v)   atomic_add_return_relaxed(1, (v))
-#define atomic_inc_return_acquire(v)   atomic_add_return_acquire(1, (v))
-#define atomic_inc_return_release(v)   atomic_add_return_release(1, (v))
-#else /* atomic_inc_return */
-#define  atomic_inc_return_relaxed     atomic_inc_return
-#define  atomic_inc_return_acquire     atomic_inc_return
-#define  atomic_inc_return_release     atomic_inc_return
-#endif /* atomic_inc_return */
-
-#else /* atomic_inc_return_relaxed */
-
-#ifndef atomic_inc_return_acquire
-#define  atomic_inc_return_acquire(...)                                        \
-       __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_inc_return_release
-#define  atomic_inc_return_release(...)                                        \
-       __atomic_op_release(atomic_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_inc_return
-#define  atomic_inc_return(...)                                                \
-       __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
-#endif
-#endif /* atomic_inc_return_relaxed */
-
-/* atomic_sub_return_relaxed */
-#ifndef atomic_sub_return_relaxed
-#define  atomic_sub_return_relaxed     atomic_sub_return
-#define  atomic_sub_return_acquire     atomic_sub_return
-#define  atomic_sub_return_release     atomic_sub_return
-
-#else /* atomic_sub_return_relaxed */
-
-#ifndef atomic_sub_return_acquire
-#define  atomic_sub_return_acquire(...)                                        \
-       __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_sub_return_release
-#define  atomic_sub_return_release(...)                                        \
-       __atomic_op_release(atomic_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_sub_return
-#define  atomic_sub_return(...)                                                \
-       __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
-#endif
-#endif /* atomic_sub_return_relaxed */
-
-#ifndef atomic_dec
-#define atomic_dec(v)                  atomic_sub(1, (v))
-#endif
-
-/* atomic_dec_return_relaxed */
-#ifndef atomic_dec_return_relaxed
-
-#ifndef atomic_dec_return
-#define atomic_dec_return(v)           atomic_sub_return(1, (v))
-#define atomic_dec_return_relaxed(v)   atomic_sub_return_relaxed(1, (v))
-#define atomic_dec_return_acquire(v)   atomic_sub_return_acquire(1, (v))
-#define atomic_dec_return_release(v)   atomic_sub_return_release(1, (v))
-#else /* atomic_dec_return */
-#define  atomic_dec_return_relaxed     atomic_dec_return
-#define  atomic_dec_return_acquire     atomic_dec_return
-#define  atomic_dec_return_release     atomic_dec_return
-#endif /* atomic_dec_return */
-
-#else /* atomic_dec_return_relaxed */
-
-#ifndef atomic_dec_return_acquire
-#define  atomic_dec_return_acquire(...)                                        \
-       __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_dec_return_release
-#define  atomic_dec_return_release(...)                                        \
-       __atomic_op_release(atomic_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic_dec_return
-#define  atomic_dec_return(...)                                                \
-       __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
-#endif
-#endif /* atomic_dec_return_relaxed */
-
-
-/* atomic_fetch_add_relaxed */
-#ifndef atomic_fetch_add_relaxed
-#define atomic_fetch_add_relaxed       atomic_fetch_add
-#define atomic_fetch_add_acquire       atomic_fetch_add
-#define atomic_fetch_add_release       atomic_fetch_add
-
-#else /* atomic_fetch_add_relaxed */
-
-#ifndef atomic_fetch_add_acquire
-#define atomic_fetch_add_acquire(...)                                  \
-       __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_add_release
-#define atomic_fetch_add_release(...)                                  \
-       __atomic_op_release(atomic_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_add
-#define atomic_fetch_add(...)                                          \
-       __atomic_op_fence(atomic_fetch_add, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_add_relaxed */
-
-/* atomic_fetch_inc_relaxed */
-#ifndef atomic_fetch_inc_relaxed
-
-#ifndef atomic_fetch_inc
-#define atomic_fetch_inc(v)            atomic_fetch_add(1, (v))
-#define atomic_fetch_inc_relaxed(v)    atomic_fetch_add_relaxed(1, (v))
-#define atomic_fetch_inc_acquire(v)    atomic_fetch_add_acquire(1, (v))
-#define atomic_fetch_inc_release(v)    atomic_fetch_add_release(1, (v))
-#else /* atomic_fetch_inc */
-#define atomic_fetch_inc_relaxed       atomic_fetch_inc
-#define atomic_fetch_inc_acquire       atomic_fetch_inc
-#define atomic_fetch_inc_release       atomic_fetch_inc
-#endif /* atomic_fetch_inc */
-
-#else /* atomic_fetch_inc_relaxed */
-
-#ifndef atomic_fetch_inc_acquire
-#define atomic_fetch_inc_acquire(...)                                  \
-       __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_inc_release
-#define atomic_fetch_inc_release(...)                                  \
-       __atomic_op_release(atomic_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_inc
-#define atomic_fetch_inc(...)                                          \
-       __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_inc_relaxed */
-
-/* atomic_fetch_sub_relaxed */
-#ifndef atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_relaxed       atomic_fetch_sub
-#define atomic_fetch_sub_acquire       atomic_fetch_sub
-#define atomic_fetch_sub_release       atomic_fetch_sub
-
-#else /* atomic_fetch_sub_relaxed */
-
-#ifndef atomic_fetch_sub_acquire
-#define atomic_fetch_sub_acquire(...)                                  \
-       __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_sub_release
-#define atomic_fetch_sub_release(...)                                  \
-       __atomic_op_release(atomic_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_sub
-#define atomic_fetch_sub(...)                                          \
-       __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_sub_relaxed */
-
-/* atomic_fetch_dec_relaxed */
-#ifndef atomic_fetch_dec_relaxed
-
-#ifndef atomic_fetch_dec
-#define atomic_fetch_dec(v)            atomic_fetch_sub(1, (v))
-#define atomic_fetch_dec_relaxed(v)    atomic_fetch_sub_relaxed(1, (v))
-#define atomic_fetch_dec_acquire(v)    atomic_fetch_sub_acquire(1, (v))
-#define atomic_fetch_dec_release(v)    atomic_fetch_sub_release(1, (v))
-#else /* atomic_fetch_dec */
-#define atomic_fetch_dec_relaxed       atomic_fetch_dec
-#define atomic_fetch_dec_acquire       atomic_fetch_dec
-#define atomic_fetch_dec_release       atomic_fetch_dec
-#endif /* atomic_fetch_dec */
-
-#else /* atomic_fetch_dec_relaxed */
-
-#ifndef atomic_fetch_dec_acquire
-#define atomic_fetch_dec_acquire(...)                                  \
-       __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_dec_release
-#define atomic_fetch_dec_release(...)                                  \
-       __atomic_op_release(atomic_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_dec
-#define atomic_fetch_dec(...)                                          \
-       __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_dec_relaxed */
-
-/* atomic_fetch_or_relaxed */
-#ifndef atomic_fetch_or_relaxed
-#define atomic_fetch_or_relaxed        atomic_fetch_or
-#define atomic_fetch_or_acquire        atomic_fetch_or
-#define atomic_fetch_or_release        atomic_fetch_or
-
-#else /* atomic_fetch_or_relaxed */
-
-#ifndef atomic_fetch_or_acquire
-#define atomic_fetch_or_acquire(...)                                   \
-       __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_or_release
-#define atomic_fetch_or_release(...)                                   \
-       __atomic_op_release(atomic_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_or
-#define atomic_fetch_or(...)                                           \
-       __atomic_op_fence(atomic_fetch_or, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_or_relaxed */
-
-/* atomic_fetch_and_relaxed */
-#ifndef atomic_fetch_and_relaxed
-#define atomic_fetch_and_relaxed       atomic_fetch_and
-#define atomic_fetch_and_acquire       atomic_fetch_and
-#define atomic_fetch_and_release       atomic_fetch_and
-
-#else /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_fetch_and_acquire
-#define atomic_fetch_and_acquire(...)                                  \
-       __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_and_release
-#define atomic_fetch_and_release(...)                                  \
-       __atomic_op_release(atomic_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_and
-#define atomic_fetch_and(...)                                          \
-       __atomic_op_fence(atomic_fetch_and, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_andnot
-#define atomic_andnot(i, v)            atomic_and(~(int)(i), (v))
-#endif
-
-#ifndef atomic_fetch_andnot_relaxed
-
-#ifndef atomic_fetch_andnot
-#define atomic_fetch_andnot(i, v)              atomic_fetch_and(~(int)(i), (v))
-#define atomic_fetch_andnot_relaxed(i, v)      atomic_fetch_and_relaxed(~(int)(i), (v))
-#define atomic_fetch_andnot_acquire(i, v)      atomic_fetch_and_acquire(~(int)(i), (v))
-#define atomic_fetch_andnot_release(i, v)      atomic_fetch_and_release(~(int)(i), (v))
-#else /* atomic_fetch_andnot */
-#define atomic_fetch_andnot_relaxed            atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire            atomic_fetch_andnot
-#define atomic_fetch_andnot_release            atomic_fetch_andnot
-#endif /* atomic_fetch_andnot */
-
-#else /* atomic_fetch_andnot_relaxed */
-
-#ifndef atomic_fetch_andnot_acquire
-#define atomic_fetch_andnot_acquire(...)                                       \
-       __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_andnot_release
-#define atomic_fetch_andnot_release(...)                                       \
-       __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_andnot
-#define atomic_fetch_andnot(...)                                               \
-       __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_andnot_relaxed */
-
-/* atomic_fetch_xor_relaxed */
-#ifndef atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_relaxed       atomic_fetch_xor
-#define atomic_fetch_xor_acquire       atomic_fetch_xor
-#define atomic_fetch_xor_release       atomic_fetch_xor
-
-#else /* atomic_fetch_xor_relaxed */
-
-#ifndef atomic_fetch_xor_acquire
-#define atomic_fetch_xor_acquire(...)                                  \
-       __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_xor_release
-#define atomic_fetch_xor_release(...)                                  \
-       __atomic_op_release(atomic_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic_fetch_xor
-#define atomic_fetch_xor(...)                                          \
-       __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__)
-#endif
-#endif /* atomic_fetch_xor_relaxed */
-
-
-/* atomic_xchg_relaxed */
-#ifndef atomic_xchg_relaxed
-#define  atomic_xchg_relaxed           atomic_xchg
-#define  atomic_xchg_acquire           atomic_xchg
-#define  atomic_xchg_release           atomic_xchg
-
-#else /* atomic_xchg_relaxed */
-
-#ifndef atomic_xchg_acquire
-#define  atomic_xchg_acquire(...)                                      \
-       __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_xchg_release
-#define  atomic_xchg_release(...)                                      \
-       __atomic_op_release(atomic_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_xchg
-#define  atomic_xchg(...)                                              \
-       __atomic_op_fence(atomic_xchg, __VA_ARGS__)
-#endif
-#endif /* atomic_xchg_relaxed */
-
-/* atomic_cmpxchg_relaxed */
-#ifndef atomic_cmpxchg_relaxed
-#define  atomic_cmpxchg_relaxed                atomic_cmpxchg
-#define  atomic_cmpxchg_acquire                atomic_cmpxchg
-#define  atomic_cmpxchg_release                atomic_cmpxchg
-
-#else /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_cmpxchg_acquire
-#define  atomic_cmpxchg_acquire(...)                                   \
-       __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_cmpxchg_release
-#define  atomic_cmpxchg_release(...)                                   \
-       __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic_cmpxchg
-#define  atomic_cmpxchg(...)                                           \
-       __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
-#endif
-#endif /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_try_cmpxchg
-
-#define __atomic_try_cmpxchg(type, _p, _po, _n)                                \
-({                                                                     \
-       typeof(_po) __po = (_po);                                       \
-       typeof(*(_po)) __r, __o = *__po;                                \
-       __r = atomic_cmpxchg##type((_p), __o, (_n));                    \
-       if (unlikely(__r != __o))                                       \
-               *__po = __r;                                            \
-       likely(__r == __o);                                             \
-})
-
-#define atomic_try_cmpxchg(_p, _po, _n)                __atomic_try_cmpxchg(, _p, _po, _n)
-#define atomic_try_cmpxchg_relaxed(_p, _po, _n)        __atomic_try_cmpxchg(_relaxed, _p, _po, _n)
-#define atomic_try_cmpxchg_acquire(_p, _po, _n)        __atomic_try_cmpxchg(_acquire, _p, _po, _n)
-#define atomic_try_cmpxchg_release(_p, _po, _n)        __atomic_try_cmpxchg(_release, _p, _po, _n)
-
-#else /* atomic_try_cmpxchg */
-#define atomic_try_cmpxchg_relaxed     atomic_try_cmpxchg
-#define atomic_try_cmpxchg_acquire     atomic_try_cmpxchg
-#define atomic_try_cmpxchg_release     atomic_try_cmpxchg
-#endif /* atomic_try_cmpxchg */
-
-/* cmpxchg_relaxed */
-#ifndef cmpxchg_relaxed
-#define  cmpxchg_relaxed               cmpxchg
-#define  cmpxchg_acquire               cmpxchg
-#define  cmpxchg_release               cmpxchg
-
-#else /* cmpxchg_relaxed */
-
-#ifndef cmpxchg_acquire
-#define  cmpxchg_acquire(...)                                          \
-       __atomic_op_acquire(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg_release
-#define  cmpxchg_release(...)                                          \
-       __atomic_op_release(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg
-#define  cmpxchg(...)                                                  \
-       __atomic_op_fence(cmpxchg, __VA_ARGS__)
-#endif
-#endif /* cmpxchg_relaxed */
-
-/* cmpxchg64_relaxed */
-#ifndef cmpxchg64_relaxed
-#define  cmpxchg64_relaxed             cmpxchg64
-#define  cmpxchg64_acquire             cmpxchg64
-#define  cmpxchg64_release             cmpxchg64
-
-#else /* cmpxchg64_relaxed */
-
-#ifndef cmpxchg64_acquire
-#define  cmpxchg64_acquire(...)                                                \
-       __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64_release
-#define  cmpxchg64_release(...)                                                \
-       __atomic_op_release(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64
-#define  cmpxchg64(...)                                                        \
-       __atomic_op_fence(cmpxchg64, __VA_ARGS__)
-#endif
-#endif /* cmpxchg64_relaxed */
-
-/* xchg_relaxed */
-#ifndef xchg_relaxed
-#define  xchg_relaxed                  xchg
-#define  xchg_acquire                  xchg
-#define  xchg_release                  xchg
-
-#else /* xchg_relaxed */
-
-#ifndef xchg_acquire
-#define  xchg_acquire(...)             __atomic_op_acquire(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg_release
-#define  xchg_release(...)             __atomic_op_release(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg
-#define  xchg(...)                     __atomic_op_fence(xchg, __VA_ARGS__)
-#endif
-#endif /* xchg_relaxed */
-
-/**
- * atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns the original value of @v.
- */
-#ifndef atomic_fetch_add_unless
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
-       int c = atomic_read(v);
-
-       do {
-               if (unlikely(c == u))
-                       break;
-       } while (!atomic_try_cmpxchg(v, &c, c + a));
-
-       return c;
-}
-#endif
-
-/**
- * atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static inline bool atomic_add_unless(atomic_t *v, int a, int u)
-{
-       return atomic_fetch_add_unless(v, a, u) != u;
-}
-
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-#ifndef atomic_inc_not_zero
-#define atomic_inc_not_zero(v)         atomic_add_unless((v), 1, 0)
-#endif
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#ifndef atomic_inc_and_test
-static inline bool atomic_inc_and_test(atomic_t *v)
-{
-       return atomic_inc_return(v) == 0;
-}
-#endif
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#ifndef atomic_dec_and_test
-static inline bool atomic_dec_and_test(atomic_t *v)
-{
-       return atomic_dec_return(v) == 0;
-}
-#endif
-
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#ifndef atomic_sub_and_test
-static inline bool atomic_sub_and_test(int i, atomic_t *v)
-{
-       return atomic_sub_return(i, v) == 0;
-}
-#endif
-
-/**
- * atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#ifndef atomic_add_negative
-static inline bool atomic_add_negative(int i, atomic_t *v)
-{
-       return atomic_add_return(i, v) < 0;
-}
-#endif
-
-#ifndef atomic_inc_unless_negative
-static inline bool atomic_inc_unless_negative(atomic_t *v)
-{
-       int c = atomic_read(v);
-
-       do {
-               if (unlikely(c < 0))
-                       return false;
-       } while (!atomic_try_cmpxchg(v, &c, c + 1));
-
-       return true;
-}
-#endif
-
-#ifndef atomic_dec_unless_positive
-static inline bool atomic_dec_unless_positive(atomic_t *v)
-{
-       int c = atomic_read(v);
-
-       do {
-               if (unlikely(c > 0))
-                       return false;
-       } while (!atomic_try_cmpxchg(v, &c, c - 1));
-
-       return true;
-}
-#endif
-
-/*
- * atomic_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic variable, v, was not decremented.
- */
-#ifndef atomic_dec_if_positive
-static inline int atomic_dec_if_positive(atomic_t *v)
-{
-       int dec, c = atomic_read(v);
-
-       do {
-               dec = c - 1;
-               if (unlikely(dec < 0))
-                       break;
-       } while (!atomic_try_cmpxchg(v, &c, dec));
-
-       return dec;
-}
-#endif
-
-#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
-#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#ifndef atomic64_read_acquire
-#define  atomic64_read_acquire(v)      smp_load_acquire(&(v)->counter)
-#endif
-
-#ifndef atomic64_set_release
-#define  atomic64_set_release(v, i)    smp_store_release(&(v)->counter, (i))
-#endif
-
-/* atomic64_add_return_relaxed */
-#ifndef atomic64_add_return_relaxed
-#define  atomic64_add_return_relaxed   atomic64_add_return
-#define  atomic64_add_return_acquire   atomic64_add_return
-#define  atomic64_add_return_release   atomic64_add_return
-
-#else /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_add_return_acquire
-#define  atomic64_add_return_acquire(...)                              \
-       __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_add_return_release
-#define  atomic64_add_return_release(...)                              \
-       __atomic_op_release(atomic64_add_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_add_return
-#define  atomic64_add_return(...)                                      \
-       __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_inc
-#define atomic64_inc(v)                        atomic64_add(1, (v))
-#endif
-
-/* atomic64_inc_return_relaxed */
-#ifndef atomic64_inc_return_relaxed
-
-#ifndef atomic64_inc_return
-#define atomic64_inc_return(v)         atomic64_add_return(1, (v))
-#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
-#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
-#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
-#else /* atomic64_inc_return */
-#define  atomic64_inc_return_relaxed   atomic64_inc_return
-#define  atomic64_inc_return_acquire   atomic64_inc_return
-#define  atomic64_inc_return_release   atomic64_inc_return
-#endif /* atomic64_inc_return */
-
-#else /* atomic64_inc_return_relaxed */
-
-#ifndef atomic64_inc_return_acquire
-#define  atomic64_inc_return_acquire(...)                              \
-       __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_inc_return_release
-#define  atomic64_inc_return_release(...)                              \
-       __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_inc_return
-#define  atomic64_inc_return(...)                                      \
-       __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_inc_return_relaxed */
-
-
-/* atomic64_sub_return_relaxed */
-#ifndef atomic64_sub_return_relaxed
-#define  atomic64_sub_return_relaxed   atomic64_sub_return
-#define  atomic64_sub_return_acquire   atomic64_sub_return
-#define  atomic64_sub_return_release   atomic64_sub_return
-
-#else /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_sub_return_acquire
-#define  atomic64_sub_return_acquire(...)                              \
-       __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_sub_return_release
-#define  atomic64_sub_return_release(...)                              \
-       __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_sub_return
-#define  atomic64_sub_return(...)                                      \
-       __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_dec
-#define atomic64_dec(v)                        atomic64_sub(1, (v))
-#endif
-
-/* atomic64_dec_return_relaxed */
-#ifndef atomic64_dec_return_relaxed
-
-#ifndef atomic64_dec_return
-#define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
-#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
-#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
-#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
-#else /* atomic64_dec_return */
-#define  atomic64_dec_return_relaxed   atomic64_dec_return
-#define  atomic64_dec_return_acquire   atomic64_dec_return
-#define  atomic64_dec_return_release   atomic64_dec_return
-#endif /* atomic64_dec_return */
-
-#else /* atomic64_dec_return_relaxed */
-
-#ifndef atomic64_dec_return_acquire
-#define  atomic64_dec_return_acquire(...)                              \
-       __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_dec_return_release
-#define  atomic64_dec_return_release(...)                              \
-       __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_dec_return
-#define  atomic64_dec_return(...)                                      \
-       __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
-#endif
-#endif /* atomic64_dec_return_relaxed */
-
-
-/* atomic64_fetch_add_relaxed */
-#ifndef atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_relaxed     atomic64_fetch_add
-#define atomic64_fetch_add_acquire     atomic64_fetch_add
-#define atomic64_fetch_add_release     atomic64_fetch_add
-
-#else /* atomic64_fetch_add_relaxed */
-
-#ifndef atomic64_fetch_add_acquire
-#define atomic64_fetch_add_acquire(...)                                        \
-       __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_add_release
-#define atomic64_fetch_add_release(...)                                        \
-       __atomic_op_release(atomic64_fetch_add, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_add
-#define atomic64_fetch_add(...)                                                \
-       __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_add_relaxed */
-
-/* atomic64_fetch_inc_relaxed */
-#ifndef atomic64_fetch_inc_relaxed
-
-#ifndef atomic64_fetch_inc
-#define atomic64_fetch_inc(v)          atomic64_fetch_add(1, (v))
-#define atomic64_fetch_inc_relaxed(v)  atomic64_fetch_add_relaxed(1, (v))
-#define atomic64_fetch_inc_acquire(v)  atomic64_fetch_add_acquire(1, (v))
-#define atomic64_fetch_inc_release(v)  atomic64_fetch_add_release(1, (v))
-#else /* atomic64_fetch_inc */
-#define atomic64_fetch_inc_relaxed     atomic64_fetch_inc
-#define atomic64_fetch_inc_acquire     atomic64_fetch_inc
-#define atomic64_fetch_inc_release     atomic64_fetch_inc
-#endif /* atomic64_fetch_inc */
-
-#else /* atomic64_fetch_inc_relaxed */
-
-#ifndef atomic64_fetch_inc_acquire
-#define atomic64_fetch_inc_acquire(...)                                        \
-       __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_inc_release
-#define atomic64_fetch_inc_release(...)                                        \
-       __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_inc
-#define atomic64_fetch_inc(...)                                                \
-       __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_inc_relaxed */
-
-/* atomic64_fetch_sub_relaxed */
-#ifndef atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_relaxed     atomic64_fetch_sub
-#define atomic64_fetch_sub_acquire     atomic64_fetch_sub
-#define atomic64_fetch_sub_release     atomic64_fetch_sub
-
-#else /* atomic64_fetch_sub_relaxed */
-
-#ifndef atomic64_fetch_sub_acquire
-#define atomic64_fetch_sub_acquire(...)                                        \
-       __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_sub_release
-#define atomic64_fetch_sub_release(...)                                        \
-       __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_sub
-#define atomic64_fetch_sub(...)                                                \
-       __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_sub_relaxed */
-
-/* atomic64_fetch_dec_relaxed */
-#ifndef atomic64_fetch_dec_relaxed
-
-#ifndef atomic64_fetch_dec
-#define atomic64_fetch_dec(v)          atomic64_fetch_sub(1, (v))
-#define atomic64_fetch_dec_relaxed(v)  atomic64_fetch_sub_relaxed(1, (v))
-#define atomic64_fetch_dec_acquire(v)  atomic64_fetch_sub_acquire(1, (v))
-#define atomic64_fetch_dec_release(v)  atomic64_fetch_sub_release(1, (v))
-#else /* atomic64_fetch_dec */
-#define atomic64_fetch_dec_relaxed     atomic64_fetch_dec
-#define atomic64_fetch_dec_acquire     atomic64_fetch_dec
-#define atomic64_fetch_dec_release     atomic64_fetch_dec
-#endif /* atomic64_fetch_dec */
-
-#else /* atomic64_fetch_dec_relaxed */
-
-#ifndef atomic64_fetch_dec_acquire
-#define atomic64_fetch_dec_acquire(...)                                        \
-       __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_dec_release
-#define atomic64_fetch_dec_release(...)                                        \
-       __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_dec
-#define atomic64_fetch_dec(...)                                                \
-       __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_dec_relaxed */
-
-/* atomic64_fetch_or_relaxed */
-#ifndef atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_relaxed      atomic64_fetch_or
-#define atomic64_fetch_or_acquire      atomic64_fetch_or
-#define atomic64_fetch_or_release      atomic64_fetch_or
-
-#else /* atomic64_fetch_or_relaxed */
-
-#ifndef atomic64_fetch_or_acquire
-#define atomic64_fetch_or_acquire(...)                                 \
-       __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_or_release
-#define atomic64_fetch_or_release(...)                                 \
-       __atomic_op_release(atomic64_fetch_or, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_or
-#define atomic64_fetch_or(...)                                         \
-       __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_or_relaxed */
-
-/* atomic64_fetch_and_relaxed */
-#ifndef atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_relaxed     atomic64_fetch_and
-#define atomic64_fetch_and_acquire     atomic64_fetch_and
-#define atomic64_fetch_and_release     atomic64_fetch_and
-
-#else /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_fetch_and_acquire
-#define atomic64_fetch_and_acquire(...)                                        \
-       __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_and_release
-#define atomic64_fetch_and_release(...)                                        \
-       __atomic_op_release(atomic64_fetch_and, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_and
-#define atomic64_fetch_and(...)                                                \
-       __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_andnot
-#define atomic64_andnot(i, v)          atomic64_and(~(long long)(i), (v))
-#endif
-
-#ifndef atomic64_fetch_andnot_relaxed
-
-#ifndef atomic64_fetch_andnot
-#define atomic64_fetch_andnot(i, v)            atomic64_fetch_and(~(long long)(i), (v))
-#define atomic64_fetch_andnot_relaxed(i, v)    atomic64_fetch_and_relaxed(~(long long)(i), (v))
-#define atomic64_fetch_andnot_acquire(i, v)    atomic64_fetch_and_acquire(~(long long)(i), (v))
-#define atomic64_fetch_andnot_release(i, v)    atomic64_fetch_and_release(~(long long)(i), (v))
-#else /* atomic64_fetch_andnot */
-#define atomic64_fetch_andnot_relaxed          atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire          atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release          atomic64_fetch_andnot
-#endif /* atomic64_fetch_andnot */
-
-#else /* atomic64_fetch_andnot_relaxed */
-
-#ifndef atomic64_fetch_andnot_acquire
-#define atomic64_fetch_andnot_acquire(...)                                     \
-       __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-#define atomic64_fetch_andnot_release(...)                                     \
-       __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_andnot
-#define atomic64_fetch_andnot(...)                                             \
-       __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_andnot_relaxed */
-
-/* atomic64_fetch_xor_relaxed */
-#ifndef atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_relaxed     atomic64_fetch_xor
-#define atomic64_fetch_xor_acquire     atomic64_fetch_xor
-#define atomic64_fetch_xor_release     atomic64_fetch_xor
-
-#else /* atomic64_fetch_xor_relaxed */
-
-#ifndef atomic64_fetch_xor_acquire
-#define atomic64_fetch_xor_acquire(...)                                        \
-       __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_xor_release
-#define atomic64_fetch_xor_release(...)                                        \
-       __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_fetch_xor
-#define atomic64_fetch_xor(...)                                                \
-       __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__)
-#endif
-#endif /* atomic64_fetch_xor_relaxed */
-
-
-/* atomic64_xchg_relaxed */
-#ifndef atomic64_xchg_relaxed
-#define  atomic64_xchg_relaxed         atomic64_xchg
-#define  atomic64_xchg_acquire         atomic64_xchg
-#define  atomic64_xchg_release         atomic64_xchg
-
-#else /* atomic64_xchg_relaxed */
-
-#ifndef atomic64_xchg_acquire
-#define  atomic64_xchg_acquire(...)                                    \
-       __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_xchg_release
-#define  atomic64_xchg_release(...)                                    \
-       __atomic_op_release(atomic64_xchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_xchg
-#define  atomic64_xchg(...)                                            \
-       __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
-#endif
-#endif /* atomic64_xchg_relaxed */
-
-/* atomic64_cmpxchg_relaxed */
-#ifndef atomic64_cmpxchg_relaxed
-#define  atomic64_cmpxchg_relaxed      atomic64_cmpxchg
-#define  atomic64_cmpxchg_acquire      atomic64_cmpxchg
-#define  atomic64_cmpxchg_release      atomic64_cmpxchg
-
-#else /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_cmpxchg_acquire
-#define  atomic64_cmpxchg_acquire(...)                                 \
-       __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_cmpxchg_release
-#define  atomic64_cmpxchg_release(...)                                 \
-       __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef atomic64_cmpxchg
-#define  atomic64_cmpxchg(...)                                         \
-       __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
-#endif
-#endif /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_try_cmpxchg
-
-#define __atomic64_try_cmpxchg(type, _p, _po, _n)                      \
-({                                                                     \
-       typeof(_po) __po = (_po);                                       \
-       typeof(*(_po)) __r, __o = *__po;                                \
-       __r = atomic64_cmpxchg##type((_p), __o, (_n));                  \
-       if (unlikely(__r != __o))                                       \
-               *__po = __r;                                            \
-       likely(__r == __o);                                             \
-})
-
-#define atomic64_try_cmpxchg(_p, _po, _n)              __atomic64_try_cmpxchg(, _p, _po, _n)
-#define atomic64_try_cmpxchg_relaxed(_p, _po, _n)      __atomic64_try_cmpxchg(_relaxed, _p, _po, _n)
-#define atomic64_try_cmpxchg_acquire(_p, _po, _n)      __atomic64_try_cmpxchg(_acquire, _p, _po, _n)
-#define atomic64_try_cmpxchg_release(_p, _po, _n)      __atomic64_try_cmpxchg(_release, _p, _po, _n)
-
-#else /* atomic64_try_cmpxchg */
-#define atomic64_try_cmpxchg_relaxed   atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_acquire   atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_release   atomic64_try_cmpxchg
-#endif /* atomic64_try_cmpxchg */
-
-/**
- * atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns the original value of @v.
- */
-#ifndef atomic64_fetch_add_unless
-static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
-                                                 long long u)
-{
-       long long c = atomic64_read(v);
-
-       do {
-               if (unlikely(c == u))
-                       break;
-       } while (!atomic64_try_cmpxchg(v, &c, c + a));
-
-       return c;
-}
-#endif
-
-/**
- * atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
-{
-       return atomic64_fetch_add_unless(v, a, u) != u;
-}
-
-/**
- * atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-#ifndef atomic64_inc_not_zero
-#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
-#endif
-
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#ifndef atomic64_inc_and_test
-static inline bool atomic64_inc_and_test(atomic64_t *v)
-{
-       return atomic64_inc_return(v) == 0;
-}
-#endif
-
-/**
- * atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-#ifndef atomic64_dec_and_test
-static inline bool atomic64_dec_and_test(atomic64_t *v)
-{
-       return atomic64_dec_return(v) == 0;
-}
-#endif
-
-/**
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-#ifndef atomic64_sub_and_test
-static inline bool atomic64_sub_and_test(long long i, atomic64_t *v)
-{
-       return atomic64_sub_return(i, v) == 0;
-}
-#endif
-
-/**
- * atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-#ifndef atomic64_add_negative
-static inline bool atomic64_add_negative(long long i, atomic64_t *v)
-{
-       return atomic64_add_return(i, v) < 0;
-}
-#endif
-
-#ifndef atomic64_inc_unless_negative
-static inline bool atomic64_inc_unless_negative(atomic64_t *v)
-{
-       long long c = atomic64_read(v);
-
-       do {
-               if (unlikely(c < 0))
-                       return false;
-       } while (!atomic64_try_cmpxchg(v, &c, c + 1));
-
-       return true;
-}
-#endif
-
-#ifndef atomic64_dec_unless_positive
-static inline bool atomic64_dec_unless_positive(atomic64_t *v)
-{
-       long long c = atomic64_read(v);
-
-       do {
-               if (unlikely(c > 0))
-                       return false;
-       } while (!atomic64_try_cmpxchg(v, &c, c - 1));
-
-       return true;
-}
-#endif
-
-/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic64_t
- *
- * The function returns the old value of *v minus 1, even if
- * the atomic64 variable, v, was not decremented.
- */
-#ifndef atomic64_dec_if_positive
-static inline long long atomic64_dec_if_positive(atomic64_t *v)
-{
-       long long dec, c = atomic64_read(v);
-
-       do {
-               dec = c - 1;
-               if (unlikely(dec < 0))
-                       break;
-       } while (!atomic64_try_cmpxchg(v, &c, dec));
-
-       return dec;
-}
-#endif
-
-#define atomic64_cond_read_relaxed(v, c)       smp_cond_load_relaxed(&(v)->counter, (c))
-#define atomic64_cond_read_acquire(v, c)       smp_cond_load_acquire(&(v)->counter, (c))
+#include <linux/atomic-fallback.h>
 
 #include <asm-generic/atomic-long.h>
 
diff --git a/scripts/atomic/atomic-tbl.sh b/scripts/atomic/atomic-tbl.sh
new file mode 100755 (executable)
index 0000000..9d6be53
--- /dev/null
@@ -0,0 +1,186 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# helpers for dealing with atomics.tbl
+
+#meta_in(meta, match)
+meta_in()
+{
+       case "$1" in
+       [$2]) return 0;;
+       esac
+
+       return 1
+}
+
+#meta_has_ret(meta)
+meta_has_ret()
+{
+       meta_in "$1" "bBiIfFlR"
+}
+
+#meta_has_acquire(meta)
+meta_has_acquire()
+{
+       meta_in "$1" "BFIlR"
+}
+
+#meta_has_release(meta)
+meta_has_release()
+{
+       meta_in "$1" "BFIRs"
+}
+
+#meta_has_relaxed(meta)
+meta_has_relaxed()
+{
+       meta_in "$1" "BFIR"
+}
+
+#find_fallback_template(pfx, name, sfx, order)
+find_fallback_template()
+{
+       local pfx="$1"; shift
+       local name="$1"; shift
+       local sfx="$1"; shift
+       local order="$1"; shift
+
+       local base=""
+       local file=""
+
+       # We may have fallbacks for a specific case (e.g. read_acquire()), or
+       # an entire class, e.g. *inc*().
+       #
+       # Start at the most specific, and fall back to the most general. Once
+       # we find a specific fallback, don't bother looking for more.
+       for base in "${pfx}${name}${sfx}${order}" "${name}"; do
+               file="${ATOMICDIR}/fallbacks/${base}"
+
+               if [ -f "${file}" ]; then
+                       printf "${file}"
+                       break
+               fi
+       done
+}
+
+#gen_ret_type(meta, int)
+gen_ret_type() {
+       local meta="$1"; shift
+       local int="$1"; shift
+
+       case "${meta}" in
+       [sv]) printf "void";;
+       [bB]) printf "bool";;
+       [aiIfFlR]) printf "${int}";;
+       esac
+}
+
+#gen_ret_stmt(meta)
+gen_ret_stmt()
+{
+       if meta_has_ret "${meta}"; then
+               printf "return ";
+       fi
+}
+
+# gen_param_name(arg)
+gen_param_name()
+{
+       # strip off the leading 'c' for 'cv'
+       local name="${1#c}"
+       printf "${name#*:}"
+}
+
+# gen_param_type(arg, int, atomic)
+gen_param_type()
+{
+       local type="${1%%:*}"; shift
+       local int="$1"; shift
+       local atomic="$1"; shift
+
+       case "${type}" in
+       i) type="${int} ";;
+       p) type="${int} *";;
+       v) type="${atomic}_t *";;
+       cv) type="const ${atomic}_t *";;
+       esac
+
+       printf "${type}"
+}
+
+#gen_param(arg, int, atomic)
+gen_param()
+{
+       local arg="$1"; shift
+       local int="$1"; shift
+       local atomic="$1"; shift
+       local name="$(gen_param_name "${arg}")"
+       local type="$(gen_param_type "${arg}" "${int}" "${atomic}")"
+
+       printf "${type}${name}"
+}
+
+#gen_params(int, atomic, arg...)
+gen_params()
+{
+       local int="$1"; shift
+       local atomic="$1"; shift
+
+       while [ "$#" -gt 0 ]; do
+               gen_param "$1" "${int}" "${atomic}"
+               [ "$#" -gt 1 ] && printf ", "
+               shift;
+       done
+}
+
+#gen_args(arg...)
+gen_args()
+{
+       while [ "$#" -gt 0 ]; do
+               printf "$(gen_param_name "$1")"
+               [ "$#" -gt 1 ] && printf ", "
+               shift;
+       done
+}
+
+#gen_proto_order_variants(meta, pfx, name, sfx, ...)
+gen_proto_order_variants()
+{
+       local meta="$1"; shift
+       local pfx="$1"; shift
+       local name="$1"; shift
+       local sfx="$1"; shift
+
+       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
+
+       if meta_has_acquire "${meta}"; then
+               gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
+       fi
+       if meta_has_release "${meta}"; then
+               gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
+       fi
+       if meta_has_relaxed "${meta}"; then
+               gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
+       fi
+}
+
+#gen_proto_variants(meta, name, ...)
+gen_proto_variants()
+{
+       local meta="$1"; shift
+       local name="$1"; shift
+       local pfx=""
+       local sfx=""
+
+       meta_in "${meta}" "fF" && pfx="fetch_"
+       meta_in "${meta}" "R" && sfx="_return"
+
+       gen_proto_order_variants "${meta}" "${pfx}" "${name}" "${sfx}" "$@"
+}
+
+#gen_proto(meta, ...)
+gen_proto() {
+       local meta="$1"; shift
+       for m in $(echo "${meta}" | fold -w1); do
+               gen_proto_variants "${m}" "$@"
+       done
+}
diff --git a/scripts/atomic/atomics.tbl b/scripts/atomic/atomics.tbl
new file mode 100755 (executable)
index 0000000..fbee2f6
--- /dev/null
@@ -0,0 +1,41 @@
+# name meta    args...
+#
+# Where meta contains a string of variants to generate.
+# Upper-case implies _{acquire,release,relaxed} variants.
+# Valid meta values are:
+# * B/b        - bool: returns bool
+# * v  - void: returns void
+# * I/i        - int: returns base type
+# * R  - return: returns base type (has _return variants)
+# * F/f        - fetch: returns base type (has fetch_ variants)
+# * l  - load: returns base type (has _acquire order variant)
+# * s  - store: returns void (has _release order variant)
+#
+# Where args contains list of type[:name], where type is:
+# * cv - const pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t)
+# * v  - pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t)
+# * i  - base type (int/s64/long)
+# * p  - pointer to base type (int/s64/long)
+#
+read                   l       cv
+set                    s       v       i
+add                    vRF     i       v
+sub                    vRF     i       v
+inc                    vRF     v
+dec                    vRF     v
+and                    vF      i       v
+andnot                 vF      i       v
+or                     vF      i       v
+xor                    vF      i       v
+xchg                   I       v       i
+cmpxchg                        I       v       i:old   i:new
+try_cmpxchg            B       v       p:old   i:new
+sub_and_test           b       i       v
+dec_and_test           b       v
+inc_and_test           b       v
+add_negative           b       i       v
+add_unless             fb      v       i:a     i:u
+inc_not_zero           b       v
+inc_unless_negative    b       v
+dec_unless_positive    b       v
+dec_if_positive                i       v
diff --git a/scripts/atomic/check-atomics.sh b/scripts/atomic/check-atomics.sh
new file mode 100755 (executable)
index 0000000..c30101c
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Check if atomic headers are up-to-date
+
+ATOMICDIR=$(dirname $0)
+ATOMICTBL=${ATOMICDIR}/atomics.tbl
+LINUXDIR=${ATOMICDIR}/../..
+
+cat <<EOF |
+gen-atomic-instrumented.sh      asm-generic/atomic-instrumented.h
+gen-atomic-long.sh              asm-generic/atomic-long.h
+gen-atomic-fallback.sh          linux/atomic-fallback.h
+EOF
+while read script header; do
+       if ! (${ATOMICDIR}/${script} ${ATOMICTBL} | diff - ${LINUXDIR}/include/${header} > /dev/null); then
+               printf "warning: include/${header} is out-of-date.\n"
+       fi
+done
diff --git a/scripts/atomic/fallbacks/acquire b/scripts/atomic/fallbacks/acquire
new file mode 100755 (executable)
index 0000000..e38871e
--- /dev/null
@@ -0,0 +1,9 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}${name}${sfx}_acquire(${params})
+{
+       ${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+       __atomic_acquire_fence();
+       return ret;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/add_negative b/scripts/atomic/fallbacks/add_negative
new file mode 100755 (executable)
index 0000000..e6f4815
--- /dev/null
@@ -0,0 +1,16 @@
+cat <<EOF
+/**
+ * ${atomic}_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type ${atomic}_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline bool
+${atomic}_add_negative(${int} i, ${atomic}_t *v)
+{
+       return ${atomic}_add_return(i, v) < 0;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/add_unless b/scripts/atomic/fallbacks/add_unless
new file mode 100755 (executable)
index 0000000..7925338
--- /dev/null
@@ -0,0 +1,16 @@
+cat << EOF
+/**
+ * ${atomic}_add_unless - add unless the number is already a given value
+ * @v: pointer of type ${atomic}_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool
+${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+{
+       return ${atomic}_fetch_add_unless(v, a, u) != u;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/andnot b/scripts/atomic/fallbacks/andnot
new file mode 100755 (executable)
index 0000000..9f3a321
--- /dev/null
@@ -0,0 +1,7 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
+{
+       ${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/dec b/scripts/atomic/fallbacks/dec
new file mode 100755 (executable)
index 0000000..10bbc82
--- /dev/null
@@ -0,0 +1,7 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
+{
+       ${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/dec_and_test b/scripts/atomic/fallbacks/dec_and_test
new file mode 100755 (executable)
index 0000000..0ce7103
--- /dev/null
@@ -0,0 +1,15 @@
+cat <<EOF
+/**
+ * ${atomic}_dec_and_test - decrement and test
+ * @v: pointer of type ${atomic}_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline bool
+${atomic}_dec_and_test(${atomic}_t *v)
+{
+       return ${atomic}_dec_return(v) == 0;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/dec_if_positive b/scripts/atomic/fallbacks/dec_if_positive
new file mode 100755 (executable)
index 0000000..c52eace
--- /dev/null
@@ -0,0 +1,15 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_dec_if_positive(${atomic}_t *v)
+{
+       ${int} dec, c = ${atomic}_read(v);
+
+       do {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+       } while (!${atomic}_try_cmpxchg(v, &c, dec));
+
+       return dec;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/dec_unless_positive b/scripts/atomic/fallbacks/dec_unless_positive
new file mode 100755 (executable)
index 0000000..8a2578f
--- /dev/null
@@ -0,0 +1,14 @@
+cat <<EOF
+static inline bool
+${atomic}_dec_unless_positive(${atomic}_t *v)
+{
+       ${int} c = ${atomic}_read(v);
+
+       do {
+               if (unlikely(c > 0))
+                       return false;
+       } while (!${atomic}_try_cmpxchg(v, &c, c - 1));
+
+       return true;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/fence b/scripts/atomic/fallbacks/fence
new file mode 100755 (executable)
index 0000000..82f68fa
--- /dev/null
@@ -0,0 +1,11 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}${name}${sfx}(${params})
+{
+       ${ret} ret;
+       __atomic_pre_full_fence();
+       ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+       __atomic_post_full_fence();
+       return ret;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/fetch_add_unless b/scripts/atomic/fallbacks/fetch_add_unless
new file mode 100755 (executable)
index 0000000..d2c091d
--- /dev/null
@@ -0,0 +1,23 @@
+cat << EOF
+/**
+ * ${atomic}_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type ${atomic}_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns original value of @v
+ */
+static inline ${int}
+${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
+{
+       ${int} c = ${atomic}_read(v);
+
+       do {
+               if (unlikely(c == u))
+                       break;
+       } while (!${atomic}_try_cmpxchg(v, &c, c + a));
+
+       return c;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/inc b/scripts/atomic/fallbacks/inc
new file mode 100755 (executable)
index 0000000..f866b3a
--- /dev/null
@@ -0,0 +1,7 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
+{
+       ${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/inc_and_test b/scripts/atomic/fallbacks/inc_and_test
new file mode 100755 (executable)
index 0000000..4e20688
--- /dev/null
@@ -0,0 +1,15 @@
+cat <<EOF
+/**
+ * ${atomic}_inc_and_test - increment and test
+ * @v: pointer of type ${atomic}_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+${atomic}_inc_and_test(${atomic}_t *v)
+{
+       return ${atomic}_inc_return(v) == 0;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/inc_not_zero b/scripts/atomic/fallbacks/inc_not_zero
new file mode 100755 (executable)
index 0000000..a7c45c8
--- /dev/null
@@ -0,0 +1,14 @@
+cat <<EOF
+/**
+ * ${atomic}_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type ${atomic}_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+static inline bool
+${atomic}_inc_not_zero(${atomic}_t *v)
+{
+       return ${atomic}_add_unless(v, 1, 0);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/inc_unless_negative b/scripts/atomic/fallbacks/inc_unless_negative
new file mode 100755 (executable)
index 0000000..0c266e7
--- /dev/null
@@ -0,0 +1,14 @@
+cat <<EOF
+static inline bool
+${atomic}_inc_unless_negative(${atomic}_t *v)
+{
+       ${int} c = ${atomic}_read(v);
+
+       do {
+               if (unlikely(c < 0))
+                       return false;
+       } while (!${atomic}_try_cmpxchg(v, &c, c + 1));
+
+       return true;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/read_acquire b/scripts/atomic/fallbacks/read_acquire
new file mode 100755 (executable)
index 0000000..75863b5
--- /dev/null
@@ -0,0 +1,7 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_read_acquire(const ${atomic}_t *v)
+{
+       return smp_load_acquire(&(v)->counter);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/release b/scripts/atomic/fallbacks/release
new file mode 100755 (executable)
index 0000000..3f628a3
--- /dev/null
@@ -0,0 +1,8 @@
+cat <<EOF
+static inline ${ret}
+${atomic}_${pfx}${name}${sfx}_release(${params})
+{
+       __atomic_release_fence();
+       ${retstmt}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
+}
+EOF
diff --git a/scripts/atomic/fallbacks/set_release b/scripts/atomic/fallbacks/set_release
new file mode 100755 (executable)
index 0000000..45bb5e0
--- /dev/null
@@ -0,0 +1,7 @@
+cat <<EOF
+static inline void
+${atomic}_set_release(${atomic}_t *v, ${int} i)
+{
+       smp_store_release(&(v)->counter, i);
+}
+EOF
diff --git a/scripts/atomic/fallbacks/sub_and_test b/scripts/atomic/fallbacks/sub_and_test
new file mode 100755 (executable)
index 0000000..289ef17
--- /dev/null
@@ -0,0 +1,16 @@
+cat <<EOF
+/**
+ * ${atomic}_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type ${atomic}_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline bool
+${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
+{
+       return ${atomic}_sub_return(i, v) == 0;
+}
+EOF
diff --git a/scripts/atomic/fallbacks/try_cmpxchg b/scripts/atomic/fallbacks/try_cmpxchg
new file mode 100755 (executable)
index 0000000..4ed85e2
--- /dev/null
@@ -0,0 +1,11 @@
+cat <<EOF
+static inline bool
+${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
+{
+       ${int} r, o = *old;
+       r = ${atomic}_cmpxchg${order}(v, o, new);
+       if (unlikely(r != o))
+               *old = r;
+       return likely(r == o);
+}
+EOF
diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh
new file mode 100755 (executable)
index 0000000..1bd7c17
--- /dev/null
@@ -0,0 +1,181 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+ATOMICDIR=$(dirname $0)
+
+. ${ATOMICDIR}/atomic-tbl.sh
+
+#gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
+gen_template_fallback()
+{
+       local template="$1"; shift
+       local meta="$1"; shift
+       local pfx="$1"; shift
+       local name="$1"; shift
+       local sfx="$1"; shift
+       local order="$1"; shift
+       local atomic="$1"; shift
+       local int="$1"; shift
+
+       local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
+
+       local ret="$(gen_ret_type "${meta}" "${int}")"
+       local retstmt="$(gen_ret_stmt "${meta}")"
+       local params="$(gen_params "${int}" "${atomic}" "$@")"
+       local args="$(gen_args "$@")"
+
+       if [ ! -z "${template}" ]; then
+               printf "#ifndef ${atomicname}\n"
+               . ${template}
+               printf "#define ${atomicname} ${atomicname}\n"
+               printf "#endif\n\n"
+       fi
+}
+
+#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
+gen_proto_fallback()
+{
+       local meta="$1"; shift
+       local pfx="$1"; shift
+       local name="$1"; shift
+       local sfx="$1"; shift
+       local order="$1"; shift
+
+       local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
+       gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
+}
+
+#gen_basic_fallbacks(basename)
+gen_basic_fallbacks()
+{
+       local basename="$1"; shift
+cat << EOF
+#define ${basename}_acquire ${basename}
+#define ${basename}_release ${basename}
+#define ${basename}_relaxed ${basename}
+EOF
+}
+
+#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
+gen_proto_order_variants()
+{
+       local meta="$1"; shift
+       local pfx="$1"; shift
+       local name="$1"; shift
+       local sfx="$1"; shift
+       local atomic="$1"
+
+       local basename="${atomic}_${pfx}${name}${sfx}"
+
+       local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
+
+       # If we don't have relaxed atomics, then we don't bother with ordering fallbacks
+       # read_acquire and set_release need to be templated, though
+       if ! meta_has_relaxed "${meta}"; then
+               gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
+
+               if meta_has_acquire "${meta}"; then
+                       gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
+               fi
+
+               if meta_has_release "${meta}"; then
+                       gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
+               fi
+
+               return
+       fi
+
+       printf "#ifndef ${basename}_relaxed\n"
+
+       if [ ! -z "${template}" ]; then
+               printf "#ifdef ${basename}\n"
+       fi
+
+       gen_basic_fallbacks "${basename}"
+
+       if [ ! -z "${template}" ]; then
+               printf "#endif /* ${atomic}_${pfx}${name}${sfx} */\n\n"
+               gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
+               gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
+               gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
+               gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
+       fi
+
+       printf "#else /* ${basename}_relaxed */\n\n"
+
+       gen_template_fallback "${ATOMICDIR}/fallbacks/acquire"  "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
+       gen_template_fallback "${ATOMICDIR}/fallbacks/release"  "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
+       gen_template_fallback "${ATOMICDIR}/fallbacks/fence"  "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
+
+       printf "#endif /* ${basename}_relaxed */\n\n"
+}
+
+gen_xchg_fallbacks()
+{
+       local xchg="$1"; shift
+cat <<EOF
+#ifndef ${xchg}_relaxed
+#define ${xchg}_relaxed                ${xchg}
+#define ${xchg}_acquire                ${xchg}
+#define ${xchg}_release                ${xchg}
+#else /* ${xchg}_relaxed */
+
+#ifndef ${xchg}_acquire
+#define ${xchg}_acquire(...) \\
+       __atomic_op_acquire(${xchg}, __VA_ARGS__)
+#endif
+
+#ifndef ${xchg}_release
+#define ${xchg}_release(...) \\
+       __atomic_op_release(${xchg}, __VA_ARGS__)
+#endif
+
+#ifndef ${xchg}
+#define ${xchg}(...) \\
+       __atomic_op_fence(${xchg}, __VA_ARGS__)
+#endif
+
+#endif /* ${xchg}_relaxed */
+
+EOF
+}
+
+cat << EOF
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by $0
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+EOF
+
+for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
+       gen_xchg_fallbacks "${xchg}"
+done
+
+grep '^[a-z]' "$1" | while read name meta args; do
+       gen_proto "${meta}" "${name}" "atomic" "int" ${args}
+done
+
+cat <<EOF
+#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+EOF
+
+grep '^[a-z]' "$1" | while read name meta args; do
+       gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
+done
+
+cat <<EOF
+#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
+#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+EOF
diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh
new file mode 100755 (executable)
index 0000000..e098123
--- /dev/null
@@ -0,0 +1,182 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+ATOMICDIR=$(dirname $0)
+
+. ${ATOMICDIR}/atomic-tbl.sh
+
+#gen_param_check(arg)
+gen_param_check()
+{
+       local arg="$1"; shift
+       local type="${arg%%:*}"
+       local name="$(gen_param_name "${arg}")"
+       local rw="write"
+
+       case "${type#c}" in
+       i) return;;
+       esac
+
+       # We don't write to constant parameters
+       [ ${type#c} != ${type} ] && rw="read"
+
+       printf "\tkasan_check_${rw}(${name}, sizeof(*${name}));\n"
+}
+
+#gen_param_check(arg...)
+gen_params_checks()
+{
+       while [ "$#" -gt 0 ]; do
+               gen_param_check "$1"
+               shift;
+       done
+}
+
+# gen_guard(meta, atomic, pfx, name, sfx, order)
+gen_guard()
+{
+       local meta="$1"; shift
+       local atomic="$1"; shift
+       local pfx="$1"; shift
+       local name="$1"; shift
+       local sfx="$1"; shift
+       local order="$1"; shift
+
+       local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
+
+       local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
+
+       # We definitely need a preprocessor symbol for this atomic if it is an
+       # ordering variant, or if there's a generic fallback.
+       if [ ! -z "${order}" ] || [ ! -z "${template}" ]; then
+               printf "defined(${atomicname})"
+               return
+       fi
+
+       # If this is a base variant, but a relaxed variant *may* exist, then we
+       # only have a preprocessor symbol if the relaxed variant isn't defined
+       if meta_has_relaxed "${meta}"; then
+               printf "!defined(${atomicname}_relaxed) || defined(${atomicname})"
+       fi
+}
+
+#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
+gen_proto_order_variant()
+{
+       local meta="$1"; shift
+       local pfx="$1"; shift
+       local name="$1"; shift
+       local sfx="$1"; shift
+       local order="$1"; shift
+       local atomic="$1"; shift
+       local int="$1"; shift
+
+       local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
+
+       local guard="$(gen_guard "${meta}" "${atomic}" "${pfx}" "${name}" "${sfx}" "${order}")"
+
+       local ret="$(gen_ret_type "${meta}" "${int}")"
+       local params="$(gen_params "${int}" "${atomic}" "$@")"
+       local checks="$(gen_params_checks "$@")"
+       local args="$(gen_args "$@")"
+       local retstmt="$(gen_ret_stmt "${meta}")"
+
+       [ ! -z "${guard}" ] && printf "#if ${guard}\n"
+
+cat <<EOF
+static inline ${ret}
+${atomicname}(${params})
+{
+${checks}
+       ${retstmt}arch_${atomicname}(${args});
+}
+#define ${atomicname} ${atomicname}
+EOF
+
+       [ ! -z "${guard}" ] && printf "#endif\n"
+
+       printf "\n"
+}
+
+gen_xchg()
+{
+       local xchg="$1"; shift
+       local mult="$1"; shift
+
+cat <<EOF
+#define ${xchg}(ptr, ...)                                              \\
+({                                                                     \\
+       typeof(ptr) __ai_ptr = (ptr);                                   \\
+       kasan_check_write(__ai_ptr, ${mult}sizeof(*__ai_ptr));          \\
+       arch_${xchg}(__ai_ptr, __VA_ARGS__);                            \\
+})
+EOF
+}
+
+gen_optional_xchg()
+{
+       local name="$1"; shift
+       local sfx="$1"; shift
+       local guard="defined(arch_${name}${sfx})"
+
+       [ -z "${sfx}" ] && guard="!defined(arch_${name}_relaxed) || defined(arch_${name})"
+
+       printf "#if ${guard}\n"
+       gen_xchg "${name}${sfx}" ""
+       printf "#endif\n\n"
+}
+
+cat << EOF
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by $0
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+/*
+ * This file provides wrappers with KASAN instrumentation for atomic operations.
+ * To use this functionality an arch's atomic.h file needs to define all
+ * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
+ * this file at the end. This file provides atomic_read() that forwards to
+ * arch_atomic_read() for actual atomic operation.
+ * Note: if an arch atomic operation is implemented by means of other atomic
+ * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
+ * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
+ * double instrumentation.
+ */
+#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
+#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/kasan-checks.h>
+
+EOF
+
+grep '^[a-z]' "$1" | while read name meta args; do
+       gen_proto "${meta}" "${name}" "atomic" "int" ${args}
+done
+
+grep '^[a-z]' "$1" | while read name meta args; do
+       gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
+done
+
+for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
+       for order in "" "_acquire" "_release" "_relaxed"; do
+               gen_optional_xchg "${xchg}" "${order}"
+       done
+done
+
+for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg"; do
+       gen_xchg "${xchg}" ""
+       printf "\n"
+done
+
+gen_xchg "cmpxchg_double" "2 * "
+
+printf "\n\n"
+
+gen_xchg "cmpxchg_double_local" "2 * "
+
+cat <<EOF
+
+#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
+EOF
diff --git a/scripts/atomic/gen-atomic-long.sh b/scripts/atomic/gen-atomic-long.sh
new file mode 100755 (executable)
index 0000000..c240a72
--- /dev/null
@@ -0,0 +1,101 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+ATOMICDIR=$(dirname $0)
+
+. ${ATOMICDIR}/atomic-tbl.sh
+
+#gen_cast(arg, int, atomic)
+gen_cast()
+{
+       local arg="$1"; shift
+       local int="$1"; shift
+       local atomic="$1"; shift
+
+       [ "${arg%%:*}" = "p" ] || return
+
+       printf "($(gen_param_type "${arg}" "${int}" "${atomic}"))"
+}
+
+#gen_args_cast(int, atomic, arg...)
+gen_args_cast()
+{
+       local int="$1"; shift
+       local atomic="$1"; shift
+
+       while [ "$#" -gt 0 ]; do
+               local cast="$(gen_cast "$1" "${int}" "${atomic}")"
+               local arg="$(gen_param_name "$1")"
+               printf "${cast}${arg}"
+               [ "$#" -gt 1 ] && printf ", "
+               shift;
+       done
+}
+
+#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
+gen_proto_order_variant()
+{
+       local meta="$1"; shift
+       local name="$1$2$3$4"; shift; shift; shift; shift
+       local atomic="$1"; shift
+       local int="$1"; shift
+
+       local ret="$(gen_ret_type "${meta}" "long")"
+       local params="$(gen_params "long" "atomic_long" "$@")"
+       local argscast="$(gen_args_cast "${int}" "${atomic}" "$@")"
+       local retstmt="$(gen_ret_stmt "${meta}")"
+
+cat <<EOF
+static inline ${ret}
+atomic_long_${name}(${params})
+{
+       ${retstmt}${atomic}_${name}(${argscast});
+}
+
+EOF
+}
+
+cat << EOF
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by $0
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _ASM_GENERIC_ATOMIC_LONG_H
+#define _ASM_GENERIC_ATOMIC_LONG_H
+
+#include <asm/types.h>
+
+#ifdef CONFIG_64BIT
+typedef atomic64_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i)            ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire  atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed  atomic64_cond_read_relaxed
+#else
+typedef atomic_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i)            ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire  atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed  atomic_cond_read_relaxed
+#endif
+
+#ifdef CONFIG_64BIT
+
+EOF
+
+grep '^[a-z]' "$1" | while read name meta args; do
+       gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
+done
+
+cat <<EOF
+#else /* CONFIG_64BIT */
+
+EOF
+
+grep '^[a-z]' "$1" | while read name meta args; do
+       gen_proto "${meta}" "${name}" "atomic" "int" ${args}
+done
+
+cat <<EOF
+#endif /* CONFIG_64BIT */
+#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+EOF