1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_H
3 #define _ASM_POWERPC_SIMPLE_SPINLOCK_H
6 * Simple spin lock operations.
8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11 * Rework to support virtual processors
13 * Type of int is used as a full 64b word is not necessary.
15 * (the type definitions are in asm/simple_spinlock_types.h)
17 #include <linux/irqflags.h>
18 #include <asm/paravirt.h>
20 #include <asm/synch.h>
21 #include <asm/ppc-opcode.h>
24 /* use 0x800000yy when locked, where yy == CPU number */
26 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
28 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
34 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
36 return lock.slock == 0;
39 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
41 return !arch_spin_value_unlocked(READ_ONCE(*lock));
45 * This returns the old value in the lock, so we succeeded
46 * in getting the lock if the return value is 0.
48 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
50 unsigned long tmp, token;
54 "1: " PPC_LWARX(%0,0,%2,1) "\n\
62 : "r" (token), "r" (&lock->slock)
68 static inline int arch_spin_trylock(arch_spinlock_t *lock)
70 return __arch_spin_trylock(lock) == 0;
74 * On a system with shared processors (that is, where a physical
75 * processor is multiplexed between several virtual processors),
76 * there is no point spinning on a lock if the holder of the lock
77 * isn't currently scheduled on a physical processor. Instead
78 * we detect this situation and ask the hypervisor to give the
79 * rest of our timeslice to the lock holder.
81 * So that we can tell which virtual processor is holding a lock,
82 * we put 0x80000000 | smp_processor_id() in the lock when it is
83 * held. Conveniently, we have a word in the paca that holds this
87 #if defined(CONFIG_PPC_SPLPAR)
88 /* We only yield to the hypervisor if we are in shared processor mode */
89 void splpar_spin_yield(arch_spinlock_t *lock);
90 void splpar_rw_yield(arch_rwlock_t *lock);
92 static inline void splpar_spin_yield(arch_spinlock_t *lock) {}
93 static inline void splpar_rw_yield(arch_rwlock_t *lock) {}
96 static inline void spin_yield(arch_spinlock_t *lock)
98 if (is_shared_processor())
99 splpar_spin_yield(lock);
104 static inline void rw_yield(arch_rwlock_t *lock)
106 if (is_shared_processor())
107 splpar_rw_yield(lock);
112 static inline void arch_spin_lock(arch_spinlock_t *lock)
115 if (likely(__arch_spin_trylock(lock) == 0))
119 if (is_shared_processor())
120 splpar_spin_yield(lock);
121 } while (unlikely(lock->slock != 0));
127 void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
129 unsigned long flags_dis;
132 if (likely(__arch_spin_trylock(lock) == 0))
134 local_save_flags(flags_dis);
135 local_irq_restore(flags);
138 if (is_shared_processor())
139 splpar_spin_yield(lock);
140 } while (unlikely(lock->slock != 0));
142 local_irq_restore(flags_dis);
145 #define arch_spin_lock_flags arch_spin_lock_flags
147 static inline void arch_spin_unlock(arch_spinlock_t *lock)
149 __asm__ __volatile__("# arch_spin_unlock\n\t"
150 PPC_RELEASE_BARRIER: : :"memory");
155 * Read-write spinlocks, allowing multiple readers
156 * but only one writer.
158 * NOTE! it is quite common to have readers in interrupts
159 * but no interrupt writers. For those circumstances we
160 * can "mix" irq-safe locks - any writer needs to get a
161 * irq-safe write-lock, but readers can get non-irqsafe
166 #define __DO_SIGN_EXTEND "extsw %0,%0\n"
167 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
169 #define __DO_SIGN_EXTEND
170 #define WRLOCK_TOKEN (-1)
174 * This returns the old value in the lock + 1,
175 * so we got a read lock if the return value is > 0.
177 static inline long __arch_read_trylock(arch_rwlock_t *rw)
181 __asm__ __volatile__(
182 "1: " PPC_LWARX(%0,0,%1,1) "\n"
191 : "cr0", "xer", "memory");
197 * This returns the old value in the lock,
198 * so we got the write lock if the return value is 0.
200 static inline long __arch_write_trylock(arch_rwlock_t *rw)
204 token = WRLOCK_TOKEN;
205 __asm__ __volatile__(
206 "1: " PPC_LWARX(%0,0,%2,1) "\n\
213 : "r" (token), "r" (&rw->lock)
219 static inline void arch_read_lock(arch_rwlock_t *rw)
222 if (likely(__arch_read_trylock(rw) > 0))
226 if (is_shared_processor())
228 } while (unlikely(rw->lock < 0));
233 static inline void arch_write_lock(arch_rwlock_t *rw)
236 if (likely(__arch_write_trylock(rw) == 0))
240 if (is_shared_processor())
242 } while (unlikely(rw->lock != 0));
247 static inline int arch_read_trylock(arch_rwlock_t *rw)
249 return __arch_read_trylock(rw) > 0;
252 static inline int arch_write_trylock(arch_rwlock_t *rw)
254 return __arch_write_trylock(rw) == 0;
257 static inline void arch_read_unlock(arch_rwlock_t *rw)
261 __asm__ __volatile__(
270 : "cr0", "xer", "memory");
273 static inline void arch_write_unlock(arch_rwlock_t *rw)
275 __asm__ __volatile__("# write_unlock\n\t"
276 PPC_RELEASE_BARRIER: : :"memory");
280 #define arch_spin_relax(lock) spin_yield(lock)
281 #define arch_read_relax(lock) rw_yield(lock)
282 #define arch_write_relax(lock) rw_yield(lock)
284 #endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */