ARC: cmpxchg/xchg: implement relaxed variants (LLSC config only)
[linux-2.6-microblaze.git] / arch / arc / include / asm / cmpxchg.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4  */
5
6 #ifndef __ASM_ARC_CMPXCHG_H
7 #define __ASM_ARC_CMPXCHG_H
8
9 #include <linux/build_bug.h>
10 #include <linux/types.h>
11
12 #include <asm/barrier.h>
13 #include <asm/smp.h>
14
15 #ifdef CONFIG_ARC_HAS_LLSC
16
17 /*
18  * if (*ptr == @old)
19  *      *ptr = @new
20  */
21 #define __cmpxchg(ptr, old, new)                                        \
22 ({                                                                      \
23         __typeof__(*(ptr)) _prev;                                       \
24                                                                         \
25         __asm__ __volatile__(                                           \
26         "1:     llock  %0, [%1] \n"                                     \
27         "       brne   %0, %2, 2f       \n"                             \
28         "       scond  %3, [%1] \n"                                     \
29         "       bnz     1b              \n"                             \
30         "2:                             \n"                             \
31         : "=&r"(_prev)  /* Early clobber prevent reg reuse */           \
32         : "r"(ptr),     /* Not "m": llock only supports reg */          \
33           "ir"(old),                                                    \
34           "r"(new)      /* Not "ir": scond can't take LIMM */           \
35         : "cc",                                                         \
36           "memory");    /* gcc knows memory is clobbered */             \
37                                                                         \
38         _prev;                                                          \
39 })
40
41 #define arch_cmpxchg_relaxed(ptr, old, new)                             \
42 ({                                                                      \
43         __typeof__(ptr) _p_ = (ptr);                                    \
44         __typeof__(*(ptr)) _o_ = (old);                                 \
45         __typeof__(*(ptr)) _n_ = (new);                                 \
46         __typeof__(*(ptr)) _prev_;                                      \
47                                                                         \
48         switch(sizeof((_p_))) {                                         \
49         case 4:                                                         \
50                 _prev_ = __cmpxchg(_p_, _o_, _n_);                      \
51                 break;                                                  \
52         default:                                                        \
53                 BUILD_BUG();                                            \
54         }                                                               \
55         _prev_;                                                         \
56 })
57
58 #else
59
60 #define arch_cmpxchg(ptr, old, new)                                     \
61 ({                                                                      \
62         volatile __typeof__(ptr) _p_ = (ptr);                           \
63         __typeof__(*(ptr)) _o_ = (old);                                 \
64         __typeof__(*(ptr)) _n_ = (new);                                 \
65         __typeof__(*(ptr)) _prev_;                                      \
66         unsigned long __flags;                                          \
67                                                                         \
68         BUILD_BUG_ON(sizeof(_p_) != 4);                                 \
69                                                                         \
70         /*                                                              \
71          * spin lock/unlock provide the needed smp_mb() before/after    \
72          */                                                             \
73         atomic_ops_lock(__flags);                                       \
74         _prev_ = *_p_;                                                  \
75         if (_prev_ == _o_)                                              \
76                 *_p_ = _n_;                                             \
77         atomic_ops_unlock(__flags);                                     \
78         _prev_;                                                         \
79 })
80
81 #endif
82
83 /*
84  * atomic_cmpxchg is same as cmpxchg
85  *   LLSC: only different in data-type, semantics are exactly same
86  *  !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
87  *         semantics, and this lock also happens to be used by atomic_*()
88  */
89 #define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
90
91 /*
92  * xchg
93  */
94 #ifdef CONFIG_ARC_HAS_LLSC
95
96 #define __xchg(ptr, val)                                                \
97 ({                                                                      \
98         __asm__ __volatile__(                                           \
99         "       ex  %0, [%1]    \n"     /* set new value */             \
100         : "+r"(val)                                                     \
101         : "r"(ptr)                                                      \
102         : "memory");                                                    \
103         _val_;          /* get old value */                             \
104 })
105
106 #define arch_xchg_relaxed(ptr, val)                                     \
107 ({                                                                      \
108         __typeof__(ptr) _p_ = (ptr);                                    \
109         __typeof__(*(ptr)) _val_ = (val);                               \
110                                                                         \
111         switch(sizeof(*(_p_))) {                                        \
112         case 4:                                                         \
113                 _val_ = __xchg(_p_, _val_);                             \
114                 break;                                                  \
115         default:                                                        \
116                 BUILD_BUG();                                            \
117         }                                                               \
118         _val_;                                                          \
119 })
120
121 #else  /* !CONFIG_ARC_HAS_LLSC */
122
123 /*
124  * EX instructions is baseline and present in !LLSC too. But in this
125  * regime it still needs use @atomic_ops_lock spinlock to allow interop
126  * with cmpxchg() which uses spinlock in !LLSC
127  * (llist.h use xchg and cmpxchg on sama data)
128  */
129
130 #define arch_xchg(ptr, val)                                             \
131 ({                                                                      \
132         __typeof__(ptr) _p_ = (ptr);                                    \
133         __typeof__(*(ptr)) _val_ = (val);                               \
134                                                                         \
135         unsigned long __flags;                                          \
136                                                                         \
137         atomic_ops_lock(__flags);                                       \
138                                                                         \
139         __asm__ __volatile__(                                           \
140         "       ex  %0, [%1]    \n"                                     \
141         : "+r"(_val_)                                                   \
142         : "r"(_p_)                                                      \
143         : "memory");                                                    \
144                                                                         \
145         atomic_ops_unlock(__flags);                                     \
146         _val_;                                                          \
147 })
148
149 #endif
150
151 /*
152  * "atomic" variant of xchg()
153  * REQ: It needs to follow the same serialization rules as other atomic_xxx()
154  * Since xchg() doesn't always do that, it would seem that following definition
155  * is incorrect. But here's the rationale:
156  *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
157  *   LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
158  *         is natively "SMP safe", no serialization required).
159  *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
160  *         could clobber them. atomic_xchg() itself would be 1 insn, so it
161  *         can't be clobbered by others. Thus no serialization required when
162  *         atomic_xchg is involved.
163  */
164 #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
165
166 #endif