Merge tag 'defconfig-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / arch / s390 / include / asm / spinlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *
7  *  Derived from "include/asm-i386/spinlock.h"
8  */
9
10 #ifndef __ASM_SPINLOCK_H
11 #define __ASM_SPINLOCK_H
12
13 #include <linux/smp.h>
14 #include <asm/atomic_ops.h>
15 #include <asm/barrier.h>
16 #include <asm/processor.h>
17 #include <asm/alternative.h>
18
19 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
20
21 extern int spin_retry;
22
23 bool arch_vcpu_is_preempted(int cpu);
24
25 #define vcpu_is_preempted arch_vcpu_is_preempted
26
27 /*
28  * Simple spin lock operations.  There are two variants, one clears IRQ's
29  * on the local processor, one does not.
30  *
31  * We make no fairness assumptions. They have a cost.
32  *
33  * (the type definitions are in asm/spinlock_types.h)
34  */
35
36 void arch_spin_relax(arch_spinlock_t *lock);
37 #define arch_spin_relax arch_spin_relax
38
39 void arch_spin_lock_wait(arch_spinlock_t *);
40 int arch_spin_trylock_retry(arch_spinlock_t *);
41 void arch_spin_lock_setup(int cpu);
42
43 static inline u32 arch_spin_lockval(int cpu)
44 {
45         return cpu + 1;
46 }
47
48 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
49 {
50         return lock.lock == 0;
51 }
52
53 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
54 {
55         return READ_ONCE(lp->lock) != 0;
56 }
57
58 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
59 {
60         barrier();
61         return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
62 }
63
64 static inline void arch_spin_lock(arch_spinlock_t *lp)
65 {
66         if (!arch_spin_trylock_once(lp))
67                 arch_spin_lock_wait(lp);
68 }
69
70 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
71                                         unsigned long flags)
72 {
73         if (!arch_spin_trylock_once(lp))
74                 arch_spin_lock_wait(lp);
75 }
76 #define arch_spin_lock_flags    arch_spin_lock_flags
77
78 static inline int arch_spin_trylock(arch_spinlock_t *lp)
79 {
80         if (!arch_spin_trylock_once(lp))
81                 return arch_spin_trylock_retry(lp);
82         return 1;
83 }
84
85 static inline void arch_spin_unlock(arch_spinlock_t *lp)
86 {
87         typecheck(int, lp->lock);
88         asm_inline volatile(
89                 ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
90                 "       sth     %1,%0\n"
91                 : "=R" (((unsigned short *) &lp->lock)[1])
92                 : "d" (0) : "cc", "memory");
93 }
94
95 /*
96  * Read-write spinlocks, allowing multiple readers
97  * but only one writer.
98  *
99  * NOTE! it is quite common to have readers in interrupts
100  * but no interrupt writers. For those circumstances we
101  * can "mix" irq-safe locks - any writer needs to get a
102  * irq-safe write-lock, but readers can get non-irqsafe
103  * read-locks.
104  */
105
106 #define arch_read_relax(rw) barrier()
107 #define arch_write_relax(rw) barrier()
108
109 void arch_read_lock_wait(arch_rwlock_t *lp);
110 void arch_write_lock_wait(arch_rwlock_t *lp);
111
112 static inline void arch_read_lock(arch_rwlock_t *rw)
113 {
114         int old;
115
116         old = __atomic_add(1, &rw->cnts);
117         if (old & 0xffff0000)
118                 arch_read_lock_wait(rw);
119 }
120
121 static inline void arch_read_unlock(arch_rwlock_t *rw)
122 {
123         __atomic_add_const_barrier(-1, &rw->cnts);
124 }
125
126 static inline void arch_write_lock(arch_rwlock_t *rw)
127 {
128         if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
129                 arch_write_lock_wait(rw);
130 }
131
132 static inline void arch_write_unlock(arch_rwlock_t *rw)
133 {
134         __atomic_add_barrier(-0x30000, &rw->cnts);
135 }
136
137
138 static inline int arch_read_trylock(arch_rwlock_t *rw)
139 {
140         int old;
141
142         old = READ_ONCE(rw->cnts);
143         return (!(old & 0xffff0000) &&
144                 __atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
145 }
146
147 static inline int arch_write_trylock(arch_rwlock_t *rw)
148 {
149         int old;
150
151         old = READ_ONCE(rw->cnts);
152         return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
153 }
154
155 #endif /* __ASM_SPINLOCK_H */