Merge tag 'libnvdimm-fix-5.5-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / powerpc / include / asm / spinlock.h
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4 #ifdef __KERNEL__
5
6 /*
7  * Simple spin lock operations.  
8  *
9  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
10  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
11  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
12  *      Rework to support virtual processors
13  *
14  * Type of int is used as a full 64b word is not necessary.
15  *
16  * (the type definitions are in asm/spinlock_types.h)
17  */
18 #include <linux/irqflags.h>
19 #ifdef CONFIG_PPC64
20 #include <asm/paca.h>
21 #include <asm/hvcall.h>
22 #endif
23 #include <asm/synch.h>
24 #include <asm/ppc-opcode.h>
25 #include <asm/asm-405.h>
26
27 #ifdef CONFIG_PPC64
28 /* use 0x800000yy when locked, where yy == CPU number */
29 #ifdef __BIG_ENDIAN__
30 #define LOCK_TOKEN      (*(u32 *)(&get_paca()->lock_token))
31 #else
32 #define LOCK_TOKEN      (*(u32 *)(&get_paca()->paca_index))
33 #endif
34 #else
35 #define LOCK_TOKEN      1
36 #endif
37
38 #ifdef CONFIG_PPC_PSERIES
39 DECLARE_STATIC_KEY_FALSE(shared_processor);
40
41 #define vcpu_is_preempted vcpu_is_preempted
42 static inline bool vcpu_is_preempted(int cpu)
43 {
44         if (!static_branch_unlikely(&shared_processor))
45                 return false;
46         return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
47 }
48 #endif
49
50 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
51 {
52         return lock.slock == 0;
53 }
54
55 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
56 {
57         smp_mb();
58         return !arch_spin_value_unlocked(*lock);
59 }
60
61 /*
62  * This returns the old value in the lock, so we succeeded
63  * in getting the lock if the return value is 0.
64  */
65 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
66 {
67         unsigned long tmp, token;
68
69         token = LOCK_TOKEN;
70         __asm__ __volatile__(
71 "1:     " PPC_LWARX(%0,0,%2,1) "\n\
72         cmpwi           0,%0,0\n\
73         bne-            2f\n\
74         stwcx.          %1,0,%2\n\
75         bne-            1b\n"
76         PPC_ACQUIRE_BARRIER
77 "2:"
78         : "=&r" (tmp)
79         : "r" (token), "r" (&lock->slock)
80         : "cr0", "memory");
81
82         return tmp;
83 }
84
85 static inline int arch_spin_trylock(arch_spinlock_t *lock)
86 {
87         return __arch_spin_trylock(lock) == 0;
88 }
89
90 /*
91  * On a system with shared processors (that is, where a physical
92  * processor is multiplexed between several virtual processors),
93  * there is no point spinning on a lock if the holder of the lock
94  * isn't currently scheduled on a physical processor.  Instead
95  * we detect this situation and ask the hypervisor to give the
96  * rest of our timeslice to the lock holder.
97  *
98  * So that we can tell which virtual processor is holding a lock,
99  * we put 0x80000000 | smp_processor_id() in the lock when it is
100  * held.  Conveniently, we have a word in the paca that holds this
101  * value.
102  */
103
104 #if defined(CONFIG_PPC_SPLPAR)
105 /* We only yield to the hypervisor if we are in shared processor mode */
106 void splpar_spin_yield(arch_spinlock_t *lock);
107 void splpar_rw_yield(arch_rwlock_t *lock);
108 #else /* SPLPAR */
109 static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
110 static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
111 #endif
112
113 static inline bool is_shared_processor(void)
114 {
115 #ifdef CONFIG_PPC_SPLPAR
116         return static_branch_unlikely(&shared_processor);
117 #else
118         return false;
119 #endif
120 }
121
122 static inline void spin_yield(arch_spinlock_t *lock)
123 {
124         if (is_shared_processor())
125                 splpar_spin_yield(lock);
126         else
127                 barrier();
128 }
129
130 static inline void rw_yield(arch_rwlock_t *lock)
131 {
132         if (is_shared_processor())
133                 splpar_rw_yield(lock);
134         else
135                 barrier();
136 }
137
138 static inline void arch_spin_lock(arch_spinlock_t *lock)
139 {
140         while (1) {
141                 if (likely(__arch_spin_trylock(lock) == 0))
142                         break;
143                 do {
144                         HMT_low();
145                         if (is_shared_processor())
146                                 splpar_spin_yield(lock);
147                 } while (unlikely(lock->slock != 0));
148                 HMT_medium();
149         }
150 }
151
152 static inline
153 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
154 {
155         unsigned long flags_dis;
156
157         while (1) {
158                 if (likely(__arch_spin_trylock(lock) == 0))
159                         break;
160                 local_save_flags(flags_dis);
161                 local_irq_restore(flags);
162                 do {
163                         HMT_low();
164                         if (is_shared_processor())
165                                 splpar_spin_yield(lock);
166                 } while (unlikely(lock->slock != 0));
167                 HMT_medium();
168                 local_irq_restore(flags_dis);
169         }
170 }
171 #define arch_spin_lock_flags arch_spin_lock_flags
172
173 static inline void arch_spin_unlock(arch_spinlock_t *lock)
174 {
175         __asm__ __volatile__("# arch_spin_unlock\n\t"
176                                 PPC_RELEASE_BARRIER: : :"memory");
177         lock->slock = 0;
178 }
179
180 /*
181  * Read-write spinlocks, allowing multiple readers
182  * but only one writer.
183  *
184  * NOTE! it is quite common to have readers in interrupts
185  * but no interrupt writers. For those circumstances we
186  * can "mix" irq-safe locks - any writer needs to get a
187  * irq-safe write-lock, but readers can get non-irqsafe
188  * read-locks.
189  */
190
191 #ifdef CONFIG_PPC64
192 #define __DO_SIGN_EXTEND        "extsw  %0,%0\n"
193 #define WRLOCK_TOKEN            LOCK_TOKEN      /* it's negative */
194 #else
195 #define __DO_SIGN_EXTEND
196 #define WRLOCK_TOKEN            (-1)
197 #endif
198
199 /*
200  * This returns the old value in the lock + 1,
201  * so we got a read lock if the return value is > 0.
202  */
203 static inline long __arch_read_trylock(arch_rwlock_t *rw)
204 {
205         long tmp;
206
207         __asm__ __volatile__(
208 "1:     " PPC_LWARX(%0,0,%1,1) "\n"
209         __DO_SIGN_EXTEND
210 "       addic.          %0,%0,1\n\
211         ble-            2f\n"
212         PPC405_ERR77(0,%1)
213 "       stwcx.          %0,0,%1\n\
214         bne-            1b\n"
215         PPC_ACQUIRE_BARRIER
216 "2:"    : "=&r" (tmp)
217         : "r" (&rw->lock)
218         : "cr0", "xer", "memory");
219
220         return tmp;
221 }
222
223 /*
224  * This returns the old value in the lock,
225  * so we got the write lock if the return value is 0.
226  */
227 static inline long __arch_write_trylock(arch_rwlock_t *rw)
228 {
229         long tmp, token;
230
231         token = WRLOCK_TOKEN;
232         __asm__ __volatile__(
233 "1:     " PPC_LWARX(%0,0,%2,1) "\n\
234         cmpwi           0,%0,0\n\
235         bne-            2f\n"
236         PPC405_ERR77(0,%1)
237 "       stwcx.          %1,0,%2\n\
238         bne-            1b\n"
239         PPC_ACQUIRE_BARRIER
240 "2:"    : "=&r" (tmp)
241         : "r" (token), "r" (&rw->lock)
242         : "cr0", "memory");
243
244         return tmp;
245 }
246
247 static inline void arch_read_lock(arch_rwlock_t *rw)
248 {
249         while (1) {
250                 if (likely(__arch_read_trylock(rw) > 0))
251                         break;
252                 do {
253                         HMT_low();
254                         if (is_shared_processor())
255                                 splpar_rw_yield(rw);
256                 } while (unlikely(rw->lock < 0));
257                 HMT_medium();
258         }
259 }
260
261 static inline void arch_write_lock(arch_rwlock_t *rw)
262 {
263         while (1) {
264                 if (likely(__arch_write_trylock(rw) == 0))
265                         break;
266                 do {
267                         HMT_low();
268                         if (is_shared_processor())
269                                 splpar_rw_yield(rw);
270                 } while (unlikely(rw->lock != 0));
271                 HMT_medium();
272         }
273 }
274
275 static inline int arch_read_trylock(arch_rwlock_t *rw)
276 {
277         return __arch_read_trylock(rw) > 0;
278 }
279
280 static inline int arch_write_trylock(arch_rwlock_t *rw)
281 {
282         return __arch_write_trylock(rw) == 0;
283 }
284
285 static inline void arch_read_unlock(arch_rwlock_t *rw)
286 {
287         long tmp;
288
289         __asm__ __volatile__(
290         "# read_unlock\n\t"
291         PPC_RELEASE_BARRIER
292 "1:     lwarx           %0,0,%1\n\
293         addic           %0,%0,-1\n"
294         PPC405_ERR77(0,%1)
295 "       stwcx.          %0,0,%1\n\
296         bne-            1b"
297         : "=&r"(tmp)
298         : "r"(&rw->lock)
299         : "cr0", "xer", "memory");
300 }
301
302 static inline void arch_write_unlock(arch_rwlock_t *rw)
303 {
304         __asm__ __volatile__("# write_unlock\n\t"
305                                 PPC_RELEASE_BARRIER: : :"memory");
306         rw->lock = 0;
307 }
308
309 #define arch_spin_relax(lock)   spin_yield(lock)
310 #define arch_read_relax(lock)   rw_yield(lock)
311 #define arch_write_relax(lock)  rw_yield(lock)
312
313 /* See include/linux/spinlock.h */
314 #define smp_mb__after_spinlock()   smp_mb()
315
316 #endif /* __KERNEL__ */
317 #endif /* __ASM_SPINLOCK_H */