Merge tag 'nand/for-4.16' of git://git.infradead.org/linux-mtd into mtd/next
[linux-2.6-microblaze.git] / arch / ia64 / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_IA64_ATOMIC_H
3 #define _ASM_IA64_ATOMIC_H
4
5 /*
6  * Atomic operations that C can't guarantee us.  Useful for
7  * resource counting etc..
8  *
9  * NOTE: don't mess with the types below!  The "unsigned long" and
10  * "int" types were carefully placed so as to ensure proper operation
11  * of the macros.
12  *
13  * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
14  *      David Mosberger-Tang <davidm@hpl.hp.com>
15  */
16 #include <linux/types.h>
17
18 #include <asm/intrinsics.h>
19 #include <asm/barrier.h>
20
21
22 #define ATOMIC_INIT(i)          { (i) }
23 #define ATOMIC64_INIT(i)        { (i) }
24
25 #define atomic_read(v)          READ_ONCE((v)->counter)
26 #define atomic64_read(v)        READ_ONCE((v)->counter)
27
28 #define atomic_set(v,i)         WRITE_ONCE(((v)->counter), (i))
29 #define atomic64_set(v,i)       WRITE_ONCE(((v)->counter), (i))
30
31 #define ATOMIC_OP(op, c_op)                                             \
32 static __inline__ int                                                   \
33 ia64_atomic_##op (int i, atomic_t *v)                                   \
34 {                                                                       \
35         __s32 old, new;                                                 \
36         CMPXCHG_BUGCHECK_DECL                                           \
37                                                                         \
38         do {                                                            \
39                 CMPXCHG_BUGCHECK(v);                                    \
40                 old = atomic_read(v);                                   \
41                 new = old c_op i;                                       \
42         } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
43         return new;                                                     \
44 }
45
46 #define ATOMIC_FETCH_OP(op, c_op)                                       \
47 static __inline__ int                                                   \
48 ia64_atomic_fetch_##op (int i, atomic_t *v)                             \
49 {                                                                       \
50         __s32 old, new;                                                 \
51         CMPXCHG_BUGCHECK_DECL                                           \
52                                                                         \
53         do {                                                            \
54                 CMPXCHG_BUGCHECK(v);                                    \
55                 old = atomic_read(v);                                   \
56                 new = old c_op i;                                       \
57         } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
58         return old;                                                     \
59 }
60
61 #define ATOMIC_OPS(op, c_op)                                            \
62         ATOMIC_OP(op, c_op)                                             \
63         ATOMIC_FETCH_OP(op, c_op)
64
65 ATOMIC_OPS(add, +)
66 ATOMIC_OPS(sub, -)
67
68 #define atomic_add_return(i,v)                                          \
69 ({                                                                      \
70         int __ia64_aar_i = (i);                                         \
71         (__builtin_constant_p(i)                                        \
72          && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
73              || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
74              || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
75              || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
76                 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
77                 : ia64_atomic_add(__ia64_aar_i, v);                     \
78 })
79
80 #define atomic_sub_return(i,v)                                          \
81 ({                                                                      \
82         int __ia64_asr_i = (i);                                         \
83         (__builtin_constant_p(i)                                        \
84          && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
85              || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
86              || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
87              || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
88                 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
89                 : ia64_atomic_sub(__ia64_asr_i, v);                     \
90 })
91
92 #define atomic_fetch_add(i,v)                                           \
93 ({                                                                      \
94         int __ia64_aar_i = (i);                                         \
95         (__builtin_constant_p(i)                                        \
96          && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
97              || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
98              || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
99              || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
100                 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)       \
101                 : ia64_atomic_fetch_add(__ia64_aar_i, v);               \
102 })
103
104 #define atomic_fetch_sub(i,v)                                           \
105 ({                                                                      \
106         int __ia64_asr_i = (i);                                         \
107         (__builtin_constant_p(i)                                        \
108          && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
109              || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
110              || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
111              || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
112                 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)      \
113                 : ia64_atomic_fetch_sub(__ia64_asr_i, v);               \
114 })
115
116 ATOMIC_FETCH_OP(and, &)
117 ATOMIC_FETCH_OP(or, |)
118 ATOMIC_FETCH_OP(xor, ^)
119
120 #define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
121 #define atomic_or(i,v)  (void)ia64_atomic_fetch_or(i,v)
122 #define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
123
124 #define atomic_fetch_and(i,v)   ia64_atomic_fetch_and(i,v)
125 #define atomic_fetch_or(i,v)    ia64_atomic_fetch_or(i,v)
126 #define atomic_fetch_xor(i,v)   ia64_atomic_fetch_xor(i,v)
127
128 #undef ATOMIC_OPS
129 #undef ATOMIC_FETCH_OP
130 #undef ATOMIC_OP
131
132 #define ATOMIC64_OP(op, c_op)                                           \
133 static __inline__ long                                                  \
134 ia64_atomic64_##op (__s64 i, atomic64_t *v)                             \
135 {                                                                       \
136         __s64 old, new;                                                 \
137         CMPXCHG_BUGCHECK_DECL                                           \
138                                                                         \
139         do {                                                            \
140                 CMPXCHG_BUGCHECK(v);                                    \
141                 old = atomic64_read(v);                                 \
142                 new = old c_op i;                                       \
143         } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
144         return new;                                                     \
145 }
146
147 #define ATOMIC64_FETCH_OP(op, c_op)                                     \
148 static __inline__ long                                                  \
149 ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v)                       \
150 {                                                                       \
151         __s64 old, new;                                                 \
152         CMPXCHG_BUGCHECK_DECL                                           \
153                                                                         \
154         do {                                                            \
155                 CMPXCHG_BUGCHECK(v);                                    \
156                 old = atomic64_read(v);                                 \
157                 new = old c_op i;                                       \
158         } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
159         return old;                                                     \
160 }
161
162 #define ATOMIC64_OPS(op, c_op)                                          \
163         ATOMIC64_OP(op, c_op)                                           \
164         ATOMIC64_FETCH_OP(op, c_op)
165
166 ATOMIC64_OPS(add, +)
167 ATOMIC64_OPS(sub, -)
168
169 #define atomic64_add_return(i,v)                                        \
170 ({                                                                      \
171         long __ia64_aar_i = (i);                                        \
172         (__builtin_constant_p(i)                                        \
173          && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
174              || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
175              || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
176              || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
177                 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
178                 : ia64_atomic64_add(__ia64_aar_i, v);                   \
179 })
180
181 #define atomic64_sub_return(i,v)                                        \
182 ({                                                                      \
183         long __ia64_asr_i = (i);                                        \
184         (__builtin_constant_p(i)                                        \
185          && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
186              || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
187              || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
188              || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
189                 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
190                 : ia64_atomic64_sub(__ia64_asr_i, v);                   \
191 })
192
193 #define atomic64_fetch_add(i,v)                                         \
194 ({                                                                      \
195         long __ia64_aar_i = (i);                                        \
196         (__builtin_constant_p(i)                                        \
197          && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
198              || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
199              || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
200              || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
201                 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)       \
202                 : ia64_atomic64_fetch_add(__ia64_aar_i, v);             \
203 })
204
205 #define atomic64_fetch_sub(i,v)                                         \
206 ({                                                                      \
207         long __ia64_asr_i = (i);                                        \
208         (__builtin_constant_p(i)                                        \
209          && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
210              || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
211              || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
212              || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
213                 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)      \
214                 : ia64_atomic64_fetch_sub(__ia64_asr_i, v);             \
215 })
216
217 ATOMIC64_FETCH_OP(and, &)
218 ATOMIC64_FETCH_OP(or, |)
219 ATOMIC64_FETCH_OP(xor, ^)
220
221 #define atomic64_and(i,v)       (void)ia64_atomic64_fetch_and(i,v)
222 #define atomic64_or(i,v)        (void)ia64_atomic64_fetch_or(i,v)
223 #define atomic64_xor(i,v)       (void)ia64_atomic64_fetch_xor(i,v)
224
225 #define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
226 #define atomic64_fetch_or(i,v)  ia64_atomic64_fetch_or(i,v)
227 #define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
228
229 #undef ATOMIC64_OPS
230 #undef ATOMIC64_FETCH_OP
231 #undef ATOMIC64_OP
232
233 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
234 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
235
236 #define atomic64_cmpxchg(v, old, new) \
237         (cmpxchg(&((v)->counter), old, new))
238 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
239
240 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
241 {
242         int c, old;
243         c = atomic_read(v);
244         for (;;) {
245                 if (unlikely(c == (u)))
246                         break;
247                 old = atomic_cmpxchg((v), c, c + (a));
248                 if (likely(old == c))
249                         break;
250                 c = old;
251         }
252         return c;
253 }
254
255
256 static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
257 {
258         long c, old;
259         c = atomic64_read(v);
260         for (;;) {
261                 if (unlikely(c == (u)))
262                         break;
263                 old = atomic64_cmpxchg((v), c, c + (a));
264                 if (likely(old == c))
265                         break;
266                 c = old;
267         }
268         return c != (u);
269 }
270
271 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
272
273 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
274 {
275         long c, old, dec;
276         c = atomic64_read(v);
277         for (;;) {
278                 dec = c - 1;
279                 if (unlikely(dec < 0))
280                         break;
281                 old = atomic64_cmpxchg((v), c, dec);
282                 if (likely(old == c))
283                         break;
284                 c = old;
285         }
286         return dec;
287 }
288
289 /*
290  * Atomically add I to V and return TRUE if the resulting value is
291  * negative.
292  */
293 static __inline__ int
294 atomic_add_negative (int i, atomic_t *v)
295 {
296         return atomic_add_return(i, v) < 0;
297 }
298
299 static __inline__ long
300 atomic64_add_negative (__s64 i, atomic64_t *v)
301 {
302         return atomic64_add_return(i, v) < 0;
303 }
304
305 #define atomic_dec_return(v)            atomic_sub_return(1, (v))
306 #define atomic_inc_return(v)            atomic_add_return(1, (v))
307 #define atomic64_dec_return(v)          atomic64_sub_return(1, (v))
308 #define atomic64_inc_return(v)          atomic64_add_return(1, (v))
309
310 #define atomic_sub_and_test(i,v)        (atomic_sub_return((i), (v)) == 0)
311 #define atomic_dec_and_test(v)          (atomic_sub_return(1, (v)) == 0)
312 #define atomic_inc_and_test(v)          (atomic_add_return(1, (v)) == 0)
313 #define atomic64_sub_and_test(i,v)      (atomic64_sub_return((i), (v)) == 0)
314 #define atomic64_dec_and_test(v)        (atomic64_sub_return(1, (v)) == 0)
315 #define atomic64_inc_and_test(v)        (atomic64_add_return(1, (v)) == 0)
316
317 #define atomic_add(i,v)                 (void)atomic_add_return((i), (v))
318 #define atomic_sub(i,v)                 (void)atomic_sub_return((i), (v))
319 #define atomic_inc(v)                   atomic_add(1, (v))
320 #define atomic_dec(v)                   atomic_sub(1, (v))
321
322 #define atomic64_add(i,v)               (void)atomic64_add_return((i), (v))
323 #define atomic64_sub(i,v)               (void)atomic64_sub_return((i), (v))
324 #define atomic64_inc(v)                 atomic64_add(1, (v))
325 #define atomic64_dec(v)                 atomic64_sub(1, (v))
326
327 #endif /* _ASM_IA64_ATOMIC_H */