2 #include <linux/percpu.h>
3 #include <linux/mutex.h>
4 #include <linux/sched.h>
5 #include "mcs_spinlock.h"
10 * An MCS like lock especially tailored for optimistic spinning for sleeping
11 * lock implementations (mutex, rwsem, etc).
13 * Using a single mcs node per CPU is safe because sleeping locks should not be
14 * called from interrupt context and we have preemption disabled while
17 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
20 * We use the value 0 to represent "no CPU", thus the encoded value
21 * will be the CPU number incremented by 1.
23 static inline int encode_cpu(int cpu_nr)
28 static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
30 int cpu_nr = encoded_cpu_val - 1;
32 return per_cpu_ptr(&osq_node, cpu_nr);
36 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
37 * Can return NULL in case we were the last queued and we updated @lock instead.
39 static inline struct optimistic_spin_node *
40 osq_wait_next(struct optimistic_spin_queue *lock,
41 struct optimistic_spin_node *node,
42 struct optimistic_spin_node *prev)
44 struct optimistic_spin_node *next = NULL;
45 int curr = encode_cpu(smp_processor_id());
49 * If there is a prev node in queue, then the 'old' value will be
50 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
51 * we're currently last in queue, then the queue will then become empty.
53 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
56 if (atomic_read(&lock->tail) == curr &&
57 atomic_cmpxchg(&lock->tail, curr, old) == curr) {
59 * We were the last queued, we moved @lock back. @prev
60 * will now observe @lock and will complete its
67 * We must xchg() the @node->next value, because if we were to
68 * leave it in, a concurrent unlock()/unqueue() from
69 * @node->next might complete Step-A and think its @prev is
72 * If the concurrent unlock()/unqueue() wins the race, we'll
73 * wait for either @lock to point to us, through its Step-B, or
74 * wait for a new @node->next from its Step-C.
77 next = xchg(&node->next, NULL);
82 arch_mutex_cpu_relax();
88 bool osq_lock(struct optimistic_spin_queue *lock)
90 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
91 struct optimistic_spin_node *prev, *next;
92 int curr = encode_cpu(smp_processor_id());
99 old = atomic_xchg(&lock->tail, curr);
100 if (old == OSQ_UNLOCKED_VAL)
103 prev = decode_cpu(old);
105 ACCESS_ONCE(prev->next) = node;
108 * Normally @prev is untouchable after the above store; because at that
109 * moment unlock can proceed and wipe the node element from stack.
111 * However, since our nodes are static per-cpu storage, we're
112 * guaranteed their existence -- this allows us to apply
113 * cmpxchg in an attempt to undo our queueing.
116 while (!smp_load_acquire(&node->locked)) {
118 * If we need to reschedule bail... so we can block.
123 arch_mutex_cpu_relax();
129 * Step - A -- stabilize @prev
131 * Undo our @prev->next assignment; this will make @prev's
132 * unlock()/unqueue() wait for a next pointer since @lock points to us
137 if (prev->next == node &&
138 cmpxchg(&prev->next, node, NULL) == node)
142 * We can only fail the cmpxchg() racing against an unlock(),
143 * in which case we should observe @node->locked becomming
146 if (smp_load_acquire(&node->locked))
149 arch_mutex_cpu_relax();
152 * Or we race against a concurrent unqueue()'s step-B, in which
153 * case its step-C will write us a new @node->prev pointer.
155 prev = ACCESS_ONCE(node->prev);
159 * Step - B -- stabilize @next
161 * Similar to unlock(), wait for @node->next or move @lock from @node
165 next = osq_wait_next(lock, node, prev);
172 * @prev is stable because its still waiting for a new @prev->next
173 * pointer, @next is stable because our @node->next pointer is NULL and
174 * it will wait in Step-A.
177 ACCESS_ONCE(next->prev) = prev;
178 ACCESS_ONCE(prev->next) = next;
183 void osq_unlock(struct optimistic_spin_queue *lock)
185 struct optimistic_spin_node *node, *next;
186 int curr = encode_cpu(smp_processor_id());
189 * Fast path for the uncontended case.
191 if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
195 * Second most likely case.
197 node = this_cpu_ptr(&osq_node);
198 next = xchg(&node->next, NULL);
200 ACCESS_ONCE(next->locked) = 1;
204 next = osq_wait_next(lock, node, NULL);
206 ACCESS_ONCE(next->locked) = 1;