Merge tag 'fs.idmapped.v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/braune...
[linux-2.6-microblaze.git] / arch / powerpc / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_ATOMIC_H_
3 #define _ASM_POWERPC_ATOMIC_H_
4
5 /*
6  * PowerPC atomic operations
7  */
8
9 #ifdef __KERNEL__
10 #include <linux/types.h>
11 #include <asm/cmpxchg.h>
12 #include <asm/barrier.h>
13 #include <asm/asm-const.h>
14
15 /*
16  * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17  * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18  * on the platform without lwsync.
19  */
20 #define __atomic_acquire_fence()                                        \
21         __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
22
23 #define __atomic_release_fence()                                        \
24         __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
25
26 static __inline__ int arch_atomic_read(const atomic_t *v)
27 {
28         int t;
29
30         __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter));
31
32         return t;
33 }
34
35 static __inline__ void arch_atomic_set(atomic_t *v, int i)
36 {
37         __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
38 }
39
40 #define ATOMIC_OP(op, asm_op)                                           \
41 static __inline__ void arch_atomic_##op(int a, atomic_t *v)             \
42 {                                                                       \
43         int t;                                                          \
44                                                                         \
45         __asm__ __volatile__(                                           \
46 "1:     lwarx   %0,0,%3         # atomic_" #op "\n"                     \
47         #asm_op " %0,%2,%0\n"                                           \
48 "       stwcx.  %0,0,%3 \n"                                             \
49 "       bne-    1b\n"                                                   \
50         : "=&r" (t), "+m" (v->counter)                                  \
51         : "r" (a), "r" (&v->counter)                                    \
52         : "cc");                                                        \
53 }                                                                       \
54
55 #define ATOMIC_OP_RETURN_RELAXED(op, asm_op)                            \
56 static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
57 {                                                                       \
58         int t;                                                          \
59                                                                         \
60         __asm__ __volatile__(                                           \
61 "1:     lwarx   %0,0,%3         # atomic_" #op "_return_relaxed\n"      \
62         #asm_op " %0,%2,%0\n"                                           \
63 "       stwcx.  %0,0,%3\n"                                              \
64 "       bne-    1b\n"                                                   \
65         : "=&r" (t), "+m" (v->counter)                                  \
66         : "r" (a), "r" (&v->counter)                                    \
67         : "cc");                                                        \
68                                                                         \
69         return t;                                                       \
70 }
71
72 #define ATOMIC_FETCH_OP_RELAXED(op, asm_op)                             \
73 static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v)  \
74 {                                                                       \
75         int res, t;                                                     \
76                                                                         \
77         __asm__ __volatile__(                                           \
78 "1:     lwarx   %0,0,%4         # atomic_fetch_" #op "_relaxed\n"       \
79         #asm_op " %1,%3,%0\n"                                           \
80 "       stwcx.  %1,0,%4\n"                                              \
81 "       bne-    1b\n"                                                   \
82         : "=&r" (res), "=&r" (t), "+m" (v->counter)                     \
83         : "r" (a), "r" (&v->counter)                                    \
84         : "cc");                                                        \
85                                                                         \
86         return res;                                                     \
87 }
88
89 #define ATOMIC_OPS(op, asm_op)                                          \
90         ATOMIC_OP(op, asm_op)                                           \
91         ATOMIC_OP_RETURN_RELAXED(op, asm_op)                            \
92         ATOMIC_FETCH_OP_RELAXED(op, asm_op)
93
94 ATOMIC_OPS(add, add)
95 ATOMIC_OPS(sub, subf)
96
97 #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
98 #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
99
100 #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
101 #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
102
103 #undef ATOMIC_OPS
104 #define ATOMIC_OPS(op, asm_op)                                          \
105         ATOMIC_OP(op, asm_op)                                           \
106         ATOMIC_FETCH_OP_RELAXED(op, asm_op)
107
108 ATOMIC_OPS(and, and)
109 ATOMIC_OPS(or, or)
110 ATOMIC_OPS(xor, xor)
111
112 #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
113 #define arch_atomic_fetch_or_relaxed  arch_atomic_fetch_or_relaxed
114 #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
115
116 #undef ATOMIC_OPS
117 #undef ATOMIC_FETCH_OP_RELAXED
118 #undef ATOMIC_OP_RETURN_RELAXED
119 #undef ATOMIC_OP
120
121 static __inline__ void arch_atomic_inc(atomic_t *v)
122 {
123         int t;
124
125         __asm__ __volatile__(
126 "1:     lwarx   %0,0,%2         # atomic_inc\n\
127         addic   %0,%0,1\n"
128 "       stwcx.  %0,0,%2 \n\
129         bne-    1b"
130         : "=&r" (t), "+m" (v->counter)
131         : "r" (&v->counter)
132         : "cc", "xer");
133 }
134 #define arch_atomic_inc arch_atomic_inc
135
136 static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v)
137 {
138         int t;
139
140         __asm__ __volatile__(
141 "1:     lwarx   %0,0,%2         # atomic_inc_return_relaxed\n"
142 "       addic   %0,%0,1\n"
143 "       stwcx.  %0,0,%2\n"
144 "       bne-    1b"
145         : "=&r" (t), "+m" (v->counter)
146         : "r" (&v->counter)
147         : "cc", "xer");
148
149         return t;
150 }
151
152 static __inline__ void arch_atomic_dec(atomic_t *v)
153 {
154         int t;
155
156         __asm__ __volatile__(
157 "1:     lwarx   %0,0,%2         # atomic_dec\n\
158         addic   %0,%0,-1\n"
159 "       stwcx.  %0,0,%2\n\
160         bne-    1b"
161         : "=&r" (t), "+m" (v->counter)
162         : "r" (&v->counter)
163         : "cc", "xer");
164 }
165 #define arch_atomic_dec arch_atomic_dec
166
167 static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v)
168 {
169         int t;
170
171         __asm__ __volatile__(
172 "1:     lwarx   %0,0,%2         # atomic_dec_return_relaxed\n"
173 "       addic   %0,%0,-1\n"
174 "       stwcx.  %0,0,%2\n"
175 "       bne-    1b"
176         : "=&r" (t), "+m" (v->counter)
177         : "r" (&v->counter)
178         : "cc", "xer");
179
180         return t;
181 }
182
183 #define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
184 #define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
185
186 #define arch_atomic_cmpxchg(v, o, n) \
187         (arch_cmpxchg(&((v)->counter), (o), (n)))
188 #define arch_atomic_cmpxchg_relaxed(v, o, n) \
189         arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
190 #define arch_atomic_cmpxchg_acquire(v, o, n) \
191         arch_cmpxchg_acquire(&((v)->counter), (o), (n))
192
193 #define arch_atomic_xchg(v, new) \
194         (arch_xchg(&((v)->counter), new))
195 #define arch_atomic_xchg_relaxed(v, new) \
196         arch_xchg_relaxed(&((v)->counter), (new))
197
198 /*
199  * Don't want to override the generic atomic_try_cmpxchg_acquire, because
200  * we add a lock hint to the lwarx, which may not be wanted for the
201  * _acquire case (and is not used by the other _acquire variants so it
202  * would be a surprise).
203  */
204 static __always_inline bool
205 arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
206 {
207         int r, o = *old;
208
209         __asm__ __volatile__ (
210 "1:\t"  PPC_LWARX(%0,0,%2,1) "  # atomic_try_cmpxchg_acquire    \n"
211 "       cmpw    0,%0,%3                                                 \n"
212 "       bne-    2f                                                      \n"
213 "       stwcx.  %4,0,%2                                                 \n"
214 "       bne-    1b                                                      \n"
215 "\t"    PPC_ACQUIRE_BARRIER "                                           \n"
216 "2:                                                                     \n"
217         : "=&r" (r), "+m" (v->counter)
218         : "r" (&v->counter), "r" (o), "r" (new)
219         : "cr0", "memory");
220
221         if (unlikely(r != o))
222                 *old = r;
223         return likely(r == o);
224 }
225
226 /**
227  * atomic_fetch_add_unless - add unless the number is a given value
228  * @v: pointer of type atomic_t
229  * @a: the amount to add to v...
230  * @u: ...unless v is equal to u.
231  *
232  * Atomically adds @a to @v, so long as it was not @u.
233  * Returns the old value of @v.
234  */
235 static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
236 {
237         int t;
238
239         __asm__ __volatile__ (
240         PPC_ATOMIC_ENTRY_BARRIER
241 "1:     lwarx   %0,0,%1         # atomic_fetch_add_unless\n\
242         cmpw    0,%0,%3 \n\
243         beq     2f \n\
244         add     %0,%2,%0 \n"
245 "       stwcx.  %0,0,%1 \n\
246         bne-    1b \n"
247         PPC_ATOMIC_EXIT_BARRIER
248 "       subf    %0,%2,%0 \n\
249 2:"
250         : "=&r" (t)
251         : "r" (&v->counter), "r" (a), "r" (u)
252         : "cc", "memory");
253
254         return t;
255 }
256 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
257
258 /**
259  * atomic_inc_not_zero - increment unless the number is zero
260  * @v: pointer of type atomic_t
261  *
262  * Atomically increments @v by 1, so long as @v is non-zero.
263  * Returns non-zero if @v was non-zero, and zero otherwise.
264  */
265 static __inline__ int arch_atomic_inc_not_zero(atomic_t *v)
266 {
267         int t1, t2;
268
269         __asm__ __volatile__ (
270         PPC_ATOMIC_ENTRY_BARRIER
271 "1:     lwarx   %0,0,%2         # atomic_inc_not_zero\n\
272         cmpwi   0,%0,0\n\
273         beq-    2f\n\
274         addic   %1,%0,1\n"
275 "       stwcx.  %1,0,%2\n\
276         bne-    1b\n"
277         PPC_ATOMIC_EXIT_BARRIER
278         "\n\
279 2:"
280         : "=&r" (t1), "=&r" (t2)
281         : "r" (&v->counter)
282         : "cc", "xer", "memory");
283
284         return t1;
285 }
286 #define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v))
287
288 /*
289  * Atomically test *v and decrement if it is greater than 0.
290  * The function returns the old value of *v minus 1, even if
291  * the atomic variable, v, was not decremented.
292  */
293 static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
294 {
295         int t;
296
297         __asm__ __volatile__(
298         PPC_ATOMIC_ENTRY_BARRIER
299 "1:     lwarx   %0,0,%1         # atomic_dec_if_positive\n\
300         cmpwi   %0,1\n\
301         addi    %0,%0,-1\n\
302         blt-    2f\n"
303 "       stwcx.  %0,0,%1\n\
304         bne-    1b"
305         PPC_ATOMIC_EXIT_BARRIER
306         "\n\
307 2:"     : "=&b" (t)
308         : "r" (&v->counter)
309         : "cc", "memory");
310
311         return t;
312 }
313 #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
314
315 #ifdef __powerpc64__
316
317 #define ATOMIC64_INIT(i)        { (i) }
318
319 static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
320 {
321         s64 t;
322
323         __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"UPD_CONSTR(v->counter));
324
325         return t;
326 }
327
328 static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
329 {
330         __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
331 }
332
333 #define ATOMIC64_OP(op, asm_op)                                         \
334 static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v)         \
335 {                                                                       \
336         s64 t;                                                          \
337                                                                         \
338         __asm__ __volatile__(                                           \
339 "1:     ldarx   %0,0,%3         # atomic64_" #op "\n"                   \
340         #asm_op " %0,%2,%0\n"                                           \
341 "       stdcx.  %0,0,%3 \n"                                             \
342 "       bne-    1b\n"                                                   \
343         : "=&r" (t), "+m" (v->counter)                                  \
344         : "r" (a), "r" (&v->counter)                                    \
345         : "cc");                                                        \
346 }
347
348 #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op)                          \
349 static inline s64                                                       \
350 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)               \
351 {                                                                       \
352         s64 t;                                                          \
353                                                                         \
354         __asm__ __volatile__(                                           \
355 "1:     ldarx   %0,0,%3         # atomic64_" #op "_return_relaxed\n"    \
356         #asm_op " %0,%2,%0\n"                                           \
357 "       stdcx.  %0,0,%3\n"                                              \
358 "       bne-    1b\n"                                                   \
359         : "=&r" (t), "+m" (v->counter)                                  \
360         : "r" (a), "r" (&v->counter)                                    \
361         : "cc");                                                        \
362                                                                         \
363         return t;                                                       \
364 }
365
366 #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)                           \
367 static inline s64                                                       \
368 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)                \
369 {                                                                       \
370         s64 res, t;                                                     \
371                                                                         \
372         __asm__ __volatile__(                                           \
373 "1:     ldarx   %0,0,%4         # atomic64_fetch_" #op "_relaxed\n"     \
374         #asm_op " %1,%3,%0\n"                                           \
375 "       stdcx.  %1,0,%4\n"                                              \
376 "       bne-    1b\n"                                                   \
377         : "=&r" (res), "=&r" (t), "+m" (v->counter)                     \
378         : "r" (a), "r" (&v->counter)                                    \
379         : "cc");                                                        \
380                                                                         \
381         return res;                                                     \
382 }
383
384 #define ATOMIC64_OPS(op, asm_op)                                        \
385         ATOMIC64_OP(op, asm_op)                                         \
386         ATOMIC64_OP_RETURN_RELAXED(op, asm_op)                          \
387         ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
388
389 ATOMIC64_OPS(add, add)
390 ATOMIC64_OPS(sub, subf)
391
392 #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
393 #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
394
395 #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
396 #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
397
398 #undef ATOMIC64_OPS
399 #define ATOMIC64_OPS(op, asm_op)                                        \
400         ATOMIC64_OP(op, asm_op)                                         \
401         ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
402
403 ATOMIC64_OPS(and, and)
404 ATOMIC64_OPS(or, or)
405 ATOMIC64_OPS(xor, xor)
406
407 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
408 #define arch_atomic64_fetch_or_relaxed  arch_atomic64_fetch_or_relaxed
409 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
410
411 #undef ATOPIC64_OPS
412 #undef ATOMIC64_FETCH_OP_RELAXED
413 #undef ATOMIC64_OP_RETURN_RELAXED
414 #undef ATOMIC64_OP
415
416 static __inline__ void arch_atomic64_inc(atomic64_t *v)
417 {
418         s64 t;
419
420         __asm__ __volatile__(
421 "1:     ldarx   %0,0,%2         # atomic64_inc\n\
422         addic   %0,%0,1\n\
423         stdcx.  %0,0,%2 \n\
424         bne-    1b"
425         : "=&r" (t), "+m" (v->counter)
426         : "r" (&v->counter)
427         : "cc", "xer");
428 }
429 #define arch_atomic64_inc arch_atomic64_inc
430
431 static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
432 {
433         s64 t;
434
435         __asm__ __volatile__(
436 "1:     ldarx   %0,0,%2         # atomic64_inc_return_relaxed\n"
437 "       addic   %0,%0,1\n"
438 "       stdcx.  %0,0,%2\n"
439 "       bne-    1b"
440         : "=&r" (t), "+m" (v->counter)
441         : "r" (&v->counter)
442         : "cc", "xer");
443
444         return t;
445 }
446
447 static __inline__ void arch_atomic64_dec(atomic64_t *v)
448 {
449         s64 t;
450
451         __asm__ __volatile__(
452 "1:     ldarx   %0,0,%2         # atomic64_dec\n\
453         addic   %0,%0,-1\n\
454         stdcx.  %0,0,%2\n\
455         bne-    1b"
456         : "=&r" (t), "+m" (v->counter)
457         : "r" (&v->counter)
458         : "cc", "xer");
459 }
460 #define arch_atomic64_dec arch_atomic64_dec
461
462 static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
463 {
464         s64 t;
465
466         __asm__ __volatile__(
467 "1:     ldarx   %0,0,%2         # atomic64_dec_return_relaxed\n"
468 "       addic   %0,%0,-1\n"
469 "       stdcx.  %0,0,%2\n"
470 "       bne-    1b"
471         : "=&r" (t), "+m" (v->counter)
472         : "r" (&v->counter)
473         : "cc", "xer");
474
475         return t;
476 }
477
478 #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
479 #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
480
481 /*
482  * Atomically test *v and decrement if it is greater than 0.
483  * The function returns the old value of *v minus 1.
484  */
485 static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
486 {
487         s64 t;
488
489         __asm__ __volatile__(
490         PPC_ATOMIC_ENTRY_BARRIER
491 "1:     ldarx   %0,0,%1         # atomic64_dec_if_positive\n\
492         addic.  %0,%0,-1\n\
493         blt-    2f\n\
494         stdcx.  %0,0,%1\n\
495         bne-    1b"
496         PPC_ATOMIC_EXIT_BARRIER
497         "\n\
498 2:"     : "=&r" (t)
499         : "r" (&v->counter)
500         : "cc", "xer", "memory");
501
502         return t;
503 }
504 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
505
506 #define arch_atomic64_cmpxchg(v, o, n) \
507         (arch_cmpxchg(&((v)->counter), (o), (n)))
508 #define arch_atomic64_cmpxchg_relaxed(v, o, n) \
509         arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
510 #define arch_atomic64_cmpxchg_acquire(v, o, n) \
511         arch_cmpxchg_acquire(&((v)->counter), (o), (n))
512
513 #define arch_atomic64_xchg(v, new) \
514         (arch_xchg(&((v)->counter), new))
515 #define arch_atomic64_xchg_relaxed(v, new) \
516         arch_xchg_relaxed(&((v)->counter), (new))
517
518 /**
519  * atomic64_fetch_add_unless - add unless the number is a given value
520  * @v: pointer of type atomic64_t
521  * @a: the amount to add to v...
522  * @u: ...unless v is equal to u.
523  *
524  * Atomically adds @a to @v, so long as it was not @u.
525  * Returns the old value of @v.
526  */
527 static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
528 {
529         s64 t;
530
531         __asm__ __volatile__ (
532         PPC_ATOMIC_ENTRY_BARRIER
533 "1:     ldarx   %0,0,%1         # atomic64_fetch_add_unless\n\
534         cmpd    0,%0,%3 \n\
535         beq     2f \n\
536         add     %0,%2,%0 \n"
537 "       stdcx.  %0,0,%1 \n\
538         bne-    1b \n"
539         PPC_ATOMIC_EXIT_BARRIER
540 "       subf    %0,%2,%0 \n\
541 2:"
542         : "=&r" (t)
543         : "r" (&v->counter), "r" (a), "r" (u)
544         : "cc", "memory");
545
546         return t;
547 }
548 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
549
550 /**
551  * atomic_inc64_not_zero - increment unless the number is zero
552  * @v: pointer of type atomic64_t
553  *
554  * Atomically increments @v by 1, so long as @v is non-zero.
555  * Returns non-zero if @v was non-zero, and zero otherwise.
556  */
557 static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
558 {
559         s64 t1, t2;
560
561         __asm__ __volatile__ (
562         PPC_ATOMIC_ENTRY_BARRIER
563 "1:     ldarx   %0,0,%2         # atomic64_inc_not_zero\n\
564         cmpdi   0,%0,0\n\
565         beq-    2f\n\
566         addic   %1,%0,1\n\
567         stdcx.  %1,0,%2\n\
568         bne-    1b\n"
569         PPC_ATOMIC_EXIT_BARRIER
570         "\n\
571 2:"
572         : "=&r" (t1), "=&r" (t2)
573         : "r" (&v->counter)
574         : "cc", "xer", "memory");
575
576         return t1 != 0;
577 }
578 #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
579
580 #endif /* __powerpc64__ */
581
582 #endif /* __KERNEL__ */
583 #endif /* _ASM_POWERPC_ATOMIC_H_ */