1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015 Regents of the University of California
4 * Copyright (C) 2017 SiFive
7 #ifndef _ASM_RISCV_SPINLOCK_H
8 #define _ASM_RISCV_SPINLOCK_H
10 #include <linux/kernel.h>
11 #include <asm/current.h>
12 #include <asm/fence.h>
15 * Simple spin lock operations. These provide no fairness guarantees.
18 /* FIXME: Replace this with a ticket lock, like MIPS. */
20 #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
22 static inline void arch_spin_unlock(arch_spinlock_t *lock)
24 smp_store_release(&lock->lock, 0);
27 static inline int arch_spin_trylock(arch_spinlock_t *lock)
31 __asm__ __volatile__ (
32 " amoswap.w %0, %2, %1\n"
34 : "=r" (busy), "+A" (lock->lock)
41 static inline void arch_spin_lock(arch_spinlock_t *lock)
44 if (arch_spin_is_locked(lock))
47 if (arch_spin_trylock(lock))
52 /***********************************************************/
54 static inline void arch_read_lock(arch_rwlock_t *lock)
65 : "+A" (lock->lock), "=&r" (tmp)
69 static inline void arch_write_lock(arch_rwlock_t *lock)
80 : "+A" (lock->lock), "=&r" (tmp)
84 static inline int arch_read_trylock(arch_rwlock_t *lock)
96 : "+A" (lock->lock), "=&r" (busy)
102 static inline int arch_write_trylock(arch_rwlock_t *lock)
106 __asm__ __volatile__(
112 RISCV_ACQUIRE_BARRIER
114 : "+A" (lock->lock), "=&r" (busy)
120 static inline void arch_read_unlock(arch_rwlock_t *lock)
122 __asm__ __volatile__(
123 RISCV_RELEASE_BARRIER
124 " amoadd.w x0, %1, %0\n"
130 static inline void arch_write_unlock(arch_rwlock_t *lock)
132 smp_store_release(&lock->lock, 0);
135 #endif /* _ASM_RISCV_SPINLOCK_H */