1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * A stand-alone ticket spinlock implementation for use by the non-VHE
4 * KVM hypervisor code running at EL2.
6 * Copyright (C) 2020 Google LLC
7 * Author: Will Deacon <will@kernel.org>
9 * Heavily based on the implementation removed by c11090474d70 which was:
10 * Copyright (C) 2012 ARM Ltd.
13 #ifndef __ARM64_KVM_NVHE_SPINLOCK_H__
14 #define __ARM64_KVM_NVHE_SPINLOCK_H__
16 #include <asm/alternative.h>
18 #include <asm/rwonce.h>
20 typedef union hyp_spinlock {
31 #define hyp_spin_lock_init(l) \
33 *(l) = (hyp_spinlock_t){ .__val = 0 }; \
36 static inline void hyp_spin_lock(hyp_spinlock_t *lock)
39 hyp_spinlock_t lockval, newval;
42 /* Atomically increment the next ticket. */
43 ARM64_LSE_ATOMIC_INSN(
45 " prfm pstl1strm, %3\n"
47 " add %w1, %w0, #(1 << 16)\n"
48 " stxr %w2, %w1, %3\n"
51 " mov %w2, #(1 << 16)\n"
52 " ldadda %w2, %w0, %3\n"
55 /* Did we get the lock? */
56 " eor %w1, %w0, %w0, ror #16\n"
59 * No: spin on the owner. Send a local event to avoid missing an
60 * unlock before the exclusive load.
65 " eor %w1, %w2, %w0, lsr #16\n"
67 /* We got the lock. Critical section starts here. */
69 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
74 static inline void hyp_spin_unlock(hyp_spinlock_t *lock)
79 ARM64_LSE_ATOMIC_INSN(
88 : "=Q" (lock->owner), "=&r" (tmp)
93 static inline bool hyp_spin_is_locked(hyp_spinlock_t *lock)
95 hyp_spinlock_t lockval = READ_ONCE(*lock);
97 return lockval.owner != lockval.next;
100 #ifdef CONFIG_NVHE_EL2_DEBUG
101 static inline void hyp_assert_lock_held(hyp_spinlock_t *lock)
104 * The __pkvm_init() path accesses protected data-structures without
105 * holding locks as the other CPUs are guaranteed to not enter EL2
106 * concurrently at this point in time. The point by which EL2 is
107 * initialized on all CPUs is reflected in the pkvm static key, so
108 * wait until it is set before checking the lock state.
110 if (static_branch_likely(&kvm_protected_mode_initialized))
111 BUG_ON(!hyp_spin_is_locked(lock));
114 static inline void hyp_assert_lock_held(hyp_spinlock_t *lock) { }
117 #endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */