Documentation: mtd: update the document for m25p80
[linux-2.6-microblaze.git] / kernel / locking / rtmutex.c
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *
11  *  See Documentation/rt-mutex-design.txt for details.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/sched.h>
16 #include <linux/sched/rt.h>
17 #include <linux/sched/deadline.h>
18 #include <linux/timer.h>
19
20 #include "rtmutex_common.h"
21
22 /*
23  * lock->owner state tracking:
24  *
25  * lock->owner holds the task_struct pointer of the owner. Bit 0
26  * is used to keep track of the "lock has waiters" state.
27  *
28  * owner        bit0
29  * NULL         0       lock is free (fast acquire possible)
30  * NULL         1       lock is free and has waiters and the top waiter
31  *                              is going to take the lock*
32  * taskpointer  0       lock is held (fast release possible)
33  * taskpointer  1       lock is held and has waiters**
34  *
35  * The fast atomic compare exchange based acquire and release is only
36  * possible when bit 0 of lock->owner is 0.
37  *
38  * (*) It also can be a transitional state when grabbing the lock
39  * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
40  * we need to set the bit0 before looking at the lock, and the owner may be
41  * NULL in this small time, hence this can be a transitional state.
42  *
43  * (**) There is a small time when bit 0 is set but there are no
44  * waiters. This can happen when grabbing the lock in the slow path.
45  * To prevent a cmpxchg of the owner releasing the lock, we need to
46  * set this bit before looking at the lock.
47  */
48
49 static void
50 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
51 {
52         unsigned long val = (unsigned long)owner;
53
54         if (rt_mutex_has_waiters(lock))
55                 val |= RT_MUTEX_HAS_WAITERS;
56
57         lock->owner = (struct task_struct *)val;
58 }
59
60 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
61 {
62         lock->owner = (struct task_struct *)
63                         ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
64 }
65
66 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67 {
68         if (!rt_mutex_has_waiters(lock))
69                 clear_rt_mutex_waiters(lock);
70 }
71
72 /*
73  * We can speed up the acquire/release, if the architecture
74  * supports cmpxchg and if there's no debugging state to be set up
75  */
76 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
77 # define rt_mutex_cmpxchg(l,c,n)        (cmpxchg(&l->owner, c, n) == c)
78 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
79 {
80         unsigned long owner, *p = (unsigned long *) &lock->owner;
81
82         do {
83                 owner = *p;
84         } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
85 }
86 #else
87 # define rt_mutex_cmpxchg(l,c,n)        (0)
88 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
89 {
90         lock->owner = (struct task_struct *)
91                         ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
92 }
93 #endif
94
95 static inline int
96 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
97                      struct rt_mutex_waiter *right)
98 {
99         if (left->prio < right->prio)
100                 return 1;
101
102         /*
103          * If both waiters have dl_prio(), we check the deadlines of the
104          * associated tasks.
105          * If left waiter has a dl_prio(), and we didn't return 1 above,
106          * then right waiter has a dl_prio() too.
107          */
108         if (dl_prio(left->prio))
109                 return (left->task->dl.deadline < right->task->dl.deadline);
110
111         return 0;
112 }
113
114 static void
115 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
116 {
117         struct rb_node **link = &lock->waiters.rb_node;
118         struct rb_node *parent = NULL;
119         struct rt_mutex_waiter *entry;
120         int leftmost = 1;
121
122         while (*link) {
123                 parent = *link;
124                 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
125                 if (rt_mutex_waiter_less(waiter, entry)) {
126                         link = &parent->rb_left;
127                 } else {
128                         link = &parent->rb_right;
129                         leftmost = 0;
130                 }
131         }
132
133         if (leftmost)
134                 lock->waiters_leftmost = &waiter->tree_entry;
135
136         rb_link_node(&waiter->tree_entry, parent, link);
137         rb_insert_color(&waiter->tree_entry, &lock->waiters);
138 }
139
140 static void
141 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
142 {
143         if (RB_EMPTY_NODE(&waiter->tree_entry))
144                 return;
145
146         if (lock->waiters_leftmost == &waiter->tree_entry)
147                 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
148
149         rb_erase(&waiter->tree_entry, &lock->waiters);
150         RB_CLEAR_NODE(&waiter->tree_entry);
151 }
152
153 static void
154 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
155 {
156         struct rb_node **link = &task->pi_waiters.rb_node;
157         struct rb_node *parent = NULL;
158         struct rt_mutex_waiter *entry;
159         int leftmost = 1;
160
161         while (*link) {
162                 parent = *link;
163                 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
164                 if (rt_mutex_waiter_less(waiter, entry)) {
165                         link = &parent->rb_left;
166                 } else {
167                         link = &parent->rb_right;
168                         leftmost = 0;
169                 }
170         }
171
172         if (leftmost)
173                 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
174
175         rb_link_node(&waiter->pi_tree_entry, parent, link);
176         rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
177 }
178
179 static void
180 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
181 {
182         if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
183                 return;
184
185         if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
186                 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
187
188         rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
189         RB_CLEAR_NODE(&waiter->pi_tree_entry);
190 }
191
192 /*
193  * Calculate task priority from the waiter tree priority
194  *
195  * Return task->normal_prio when the waiter tree is empty or when
196  * the waiter is not allowed to do priority boosting
197  */
198 int rt_mutex_getprio(struct task_struct *task)
199 {
200         if (likely(!task_has_pi_waiters(task)))
201                 return task->normal_prio;
202
203         return min(task_top_pi_waiter(task)->prio,
204                    task->normal_prio);
205 }
206
207 struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
208 {
209         if (likely(!task_has_pi_waiters(task)))
210                 return NULL;
211
212         return task_top_pi_waiter(task)->task;
213 }
214
215 /*
216  * Called by sched_setscheduler() to check whether the priority change
217  * is overruled by a possible priority boosting.
218  */
219 int rt_mutex_check_prio(struct task_struct *task, int newprio)
220 {
221         if (!task_has_pi_waiters(task))
222                 return 0;
223
224         return task_top_pi_waiter(task)->task->prio <= newprio;
225 }
226
227 /*
228  * Adjust the priority of a task, after its pi_waiters got modified.
229  *
230  * This can be both boosting and unboosting. task->pi_lock must be held.
231  */
232 static void __rt_mutex_adjust_prio(struct task_struct *task)
233 {
234         int prio = rt_mutex_getprio(task);
235
236         if (task->prio != prio || dl_prio(prio))
237                 rt_mutex_setprio(task, prio);
238 }
239
240 /*
241  * Adjust task priority (undo boosting). Called from the exit path of
242  * rt_mutex_slowunlock() and rt_mutex_slowlock().
243  *
244  * (Note: We do this outside of the protection of lock->wait_lock to
245  * allow the lock to be taken while or before we readjust the priority
246  * of task. We do not use the spin_xx_mutex() variants here as we are
247  * outside of the debug path.)
248  */
249 static void rt_mutex_adjust_prio(struct task_struct *task)
250 {
251         unsigned long flags;
252
253         raw_spin_lock_irqsave(&task->pi_lock, flags);
254         __rt_mutex_adjust_prio(task);
255         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
256 }
257
258 /*
259  * Max number of times we'll walk the boosting chain:
260  */
261 int max_lock_depth = 1024;
262
263 /*
264  * Adjust the priority chain. Also used for deadlock detection.
265  * Decreases task's usage by one - may thus free the task.
266  *
267  * @task: the task owning the mutex (owner) for which a chain walk is probably
268  *        needed
269  * @deadlock_detect: do we have to carry out deadlock detection?
270  * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
271  *             things for a task that has just got its priority adjusted, and
272  *             is waiting on a mutex)
273  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
274  *               its priority to the mutex owner (can be NULL in the case
275  *               depicted above or if the top waiter is gone away and we are
276  *               actually deboosting the owner)
277  * @top_task: the current top waiter
278  *
279  * Returns 0 or -EDEADLK.
280  */
281 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
282                                       int deadlock_detect,
283                                       struct rt_mutex *orig_lock,
284                                       struct rt_mutex_waiter *orig_waiter,
285                                       struct task_struct *top_task)
286 {
287         struct rt_mutex *lock;
288         struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
289         int detect_deadlock, ret = 0, depth = 0;
290         unsigned long flags;
291
292         detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
293                                                          deadlock_detect);
294
295         /*
296          * The (de)boosting is a step by step approach with a lot of
297          * pitfalls. We want this to be preemptible and we want hold a
298          * maximum of two locks per step. So we have to check
299          * carefully whether things change under us.
300          */
301  again:
302         if (++depth > max_lock_depth) {
303                 static int prev_max;
304
305                 /*
306                  * Print this only once. If the admin changes the limit,
307                  * print a new message when reaching the limit again.
308                  */
309                 if (prev_max != max_lock_depth) {
310                         prev_max = max_lock_depth;
311                         printk(KERN_WARNING "Maximum lock depth %d reached "
312                                "task: %s (%d)\n", max_lock_depth,
313                                top_task->comm, task_pid_nr(top_task));
314                 }
315                 put_task_struct(task);
316
317                 return deadlock_detect ? -EDEADLK : 0;
318         }
319  retry:
320         /*
321          * Task can not go away as we did a get_task() before !
322          */
323         raw_spin_lock_irqsave(&task->pi_lock, flags);
324
325         waiter = task->pi_blocked_on;
326         /*
327          * Check whether the end of the boosting chain has been
328          * reached or the state of the chain has changed while we
329          * dropped the locks.
330          */
331         if (!waiter)
332                 goto out_unlock_pi;
333
334         /*
335          * Check the orig_waiter state. After we dropped the locks,
336          * the previous owner of the lock might have released the lock.
337          */
338         if (orig_waiter && !rt_mutex_owner(orig_lock))
339                 goto out_unlock_pi;
340
341         /*
342          * Drop out, when the task has no waiters. Note,
343          * top_waiter can be NULL, when we are in the deboosting
344          * mode!
345          */
346         if (top_waiter && (!task_has_pi_waiters(task) ||
347                            top_waiter != task_top_pi_waiter(task)))
348                 goto out_unlock_pi;
349
350         /*
351          * When deadlock detection is off then we check, if further
352          * priority adjustment is necessary.
353          */
354         if (!detect_deadlock && waiter->prio == task->prio)
355                 goto out_unlock_pi;
356
357         lock = waiter->lock;
358         if (!raw_spin_trylock(&lock->wait_lock)) {
359                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
360                 cpu_relax();
361                 goto retry;
362         }
363
364         /* Deadlock detection */
365         if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
366                 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
367                 raw_spin_unlock(&lock->wait_lock);
368                 ret = deadlock_detect ? -EDEADLK : 0;
369                 goto out_unlock_pi;
370         }
371
372         top_waiter = rt_mutex_top_waiter(lock);
373
374         /* Requeue the waiter */
375         rt_mutex_dequeue(lock, waiter);
376         waiter->prio = task->prio;
377         rt_mutex_enqueue(lock, waiter);
378
379         /* Release the task */
380         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
381         if (!rt_mutex_owner(lock)) {
382                 /*
383                  * If the requeue above changed the top waiter, then we need
384                  * to wake the new top waiter up to try to get the lock.
385                  */
386
387                 if (top_waiter != rt_mutex_top_waiter(lock))
388                         wake_up_process(rt_mutex_top_waiter(lock)->task);
389                 raw_spin_unlock(&lock->wait_lock);
390                 goto out_put_task;
391         }
392         put_task_struct(task);
393
394         /* Grab the next task */
395         task = rt_mutex_owner(lock);
396         get_task_struct(task);
397         raw_spin_lock_irqsave(&task->pi_lock, flags);
398
399         if (waiter == rt_mutex_top_waiter(lock)) {
400                 /* Boost the owner */
401                 rt_mutex_dequeue_pi(task, top_waiter);
402                 rt_mutex_enqueue_pi(task, waiter);
403                 __rt_mutex_adjust_prio(task);
404
405         } else if (top_waiter == waiter) {
406                 /* Deboost the owner */
407                 rt_mutex_dequeue_pi(task, waiter);
408                 waiter = rt_mutex_top_waiter(lock);
409                 rt_mutex_enqueue_pi(task, waiter);
410                 __rt_mutex_adjust_prio(task);
411         }
412
413         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
414
415         top_waiter = rt_mutex_top_waiter(lock);
416         raw_spin_unlock(&lock->wait_lock);
417
418         if (!detect_deadlock && waiter != top_waiter)
419                 goto out_put_task;
420
421         goto again;
422
423  out_unlock_pi:
424         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
425  out_put_task:
426         put_task_struct(task);
427
428         return ret;
429 }
430
431 /*
432  * Try to take an rt-mutex
433  *
434  * Must be called with lock->wait_lock held.
435  *
436  * @lock:   the lock to be acquired.
437  * @task:   the task which wants to acquire the lock
438  * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
439  */
440 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
441                 struct rt_mutex_waiter *waiter)
442 {
443         /*
444          * We have to be careful here if the atomic speedups are
445          * enabled, such that, when
446          *  - no other waiter is on the lock
447          *  - the lock has been released since we did the cmpxchg
448          * the lock can be released or taken while we are doing the
449          * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
450          *
451          * The atomic acquire/release aware variant of
452          * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
453          * the WAITERS bit, the atomic release / acquire can not
454          * happen anymore and lock->wait_lock protects us from the
455          * non-atomic case.
456          *
457          * Note, that this might set lock->owner =
458          * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
459          * any more. This is fixed up when we take the ownership.
460          * This is the transitional state explained at the top of this file.
461          */
462         mark_rt_mutex_waiters(lock);
463
464         if (rt_mutex_owner(lock))
465                 return 0;
466
467         /*
468          * It will get the lock because of one of these conditions:
469          * 1) there is no waiter
470          * 2) higher priority than waiters
471          * 3) it is top waiter
472          */
473         if (rt_mutex_has_waiters(lock)) {
474                 if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
475                         if (!waiter || waiter != rt_mutex_top_waiter(lock))
476                                 return 0;
477                 }
478         }
479
480         if (waiter || rt_mutex_has_waiters(lock)) {
481                 unsigned long flags;
482                 struct rt_mutex_waiter *top;
483
484                 raw_spin_lock_irqsave(&task->pi_lock, flags);
485
486                 /* remove the queued waiter. */
487                 if (waiter) {
488                         rt_mutex_dequeue(lock, waiter);
489                         task->pi_blocked_on = NULL;
490                 }
491
492                 /*
493                  * We have to enqueue the top waiter(if it exists) into
494                  * task->pi_waiters list.
495                  */
496                 if (rt_mutex_has_waiters(lock)) {
497                         top = rt_mutex_top_waiter(lock);
498                         rt_mutex_enqueue_pi(task, top);
499                 }
500                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
501         }
502
503         /* We got the lock. */
504         debug_rt_mutex_lock(lock);
505
506         rt_mutex_set_owner(lock, task);
507
508         rt_mutex_deadlock_account_lock(lock, task);
509
510         return 1;
511 }
512
513 /*
514  * Task blocks on lock.
515  *
516  * Prepare waiter and propagate pi chain
517  *
518  * This must be called with lock->wait_lock held.
519  */
520 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
521                                    struct rt_mutex_waiter *waiter,
522                                    struct task_struct *task,
523                                    int detect_deadlock)
524 {
525         struct task_struct *owner = rt_mutex_owner(lock);
526         struct rt_mutex_waiter *top_waiter = waiter;
527         unsigned long flags;
528         int chain_walk = 0, res;
529
530         raw_spin_lock_irqsave(&task->pi_lock, flags);
531         __rt_mutex_adjust_prio(task);
532         waiter->task = task;
533         waiter->lock = lock;
534         waiter->prio = task->prio;
535
536         /* Get the top priority waiter on the lock */
537         if (rt_mutex_has_waiters(lock))
538                 top_waiter = rt_mutex_top_waiter(lock);
539         rt_mutex_enqueue(lock, waiter);
540
541         task->pi_blocked_on = waiter;
542
543         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
544
545         if (!owner)
546                 return 0;
547
548         if (waiter == rt_mutex_top_waiter(lock)) {
549                 raw_spin_lock_irqsave(&owner->pi_lock, flags);
550                 rt_mutex_dequeue_pi(owner, top_waiter);
551                 rt_mutex_enqueue_pi(owner, waiter);
552
553                 __rt_mutex_adjust_prio(owner);
554                 if (owner->pi_blocked_on)
555                         chain_walk = 1;
556                 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
557         }
558         else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
559                 chain_walk = 1;
560
561         if (!chain_walk)
562                 return 0;
563
564         /*
565          * The owner can't disappear while holding a lock,
566          * so the owner struct is protected by wait_lock.
567          * Gets dropped in rt_mutex_adjust_prio_chain()!
568          */
569         get_task_struct(owner);
570
571         raw_spin_unlock(&lock->wait_lock);
572
573         res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
574                                          task);
575
576         raw_spin_lock(&lock->wait_lock);
577
578         return res;
579 }
580
581 /*
582  * Wake up the next waiter on the lock.
583  *
584  * Remove the top waiter from the current tasks waiter list and wake it up.
585  *
586  * Called with lock->wait_lock held.
587  */
588 static void wakeup_next_waiter(struct rt_mutex *lock)
589 {
590         struct rt_mutex_waiter *waiter;
591         unsigned long flags;
592
593         raw_spin_lock_irqsave(&current->pi_lock, flags);
594
595         waiter = rt_mutex_top_waiter(lock);
596
597         /*
598          * Remove it from current->pi_waiters. We do not adjust a
599          * possible priority boost right now. We execute wakeup in the
600          * boosted mode and go back to normal after releasing
601          * lock->wait_lock.
602          */
603         rt_mutex_dequeue_pi(current, waiter);
604
605         rt_mutex_set_owner(lock, NULL);
606
607         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
608
609         wake_up_process(waiter->task);
610 }
611
612 /*
613  * Remove a waiter from a lock and give up
614  *
615  * Must be called with lock->wait_lock held and
616  * have just failed to try_to_take_rt_mutex().
617  */
618 static void remove_waiter(struct rt_mutex *lock,
619                           struct rt_mutex_waiter *waiter)
620 {
621         int first = (waiter == rt_mutex_top_waiter(lock));
622         struct task_struct *owner = rt_mutex_owner(lock);
623         unsigned long flags;
624         int chain_walk = 0;
625
626         raw_spin_lock_irqsave(&current->pi_lock, flags);
627         rt_mutex_dequeue(lock, waiter);
628         current->pi_blocked_on = NULL;
629         raw_spin_unlock_irqrestore(&current->pi_lock, flags);
630
631         if (!owner)
632                 return;
633
634         if (first) {
635
636                 raw_spin_lock_irqsave(&owner->pi_lock, flags);
637
638                 rt_mutex_dequeue_pi(owner, waiter);
639
640                 if (rt_mutex_has_waiters(lock)) {
641                         struct rt_mutex_waiter *next;
642
643                         next = rt_mutex_top_waiter(lock);
644                         rt_mutex_enqueue_pi(owner, next);
645                 }
646                 __rt_mutex_adjust_prio(owner);
647
648                 if (owner->pi_blocked_on)
649                         chain_walk = 1;
650
651                 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
652         }
653
654         if (!chain_walk)
655                 return;
656
657         /* gets dropped in rt_mutex_adjust_prio_chain()! */
658         get_task_struct(owner);
659
660         raw_spin_unlock(&lock->wait_lock);
661
662         rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
663
664         raw_spin_lock(&lock->wait_lock);
665 }
666
667 /*
668  * Recheck the pi chain, in case we got a priority setting
669  *
670  * Called from sched_setscheduler
671  */
672 void rt_mutex_adjust_pi(struct task_struct *task)
673 {
674         struct rt_mutex_waiter *waiter;
675         unsigned long flags;
676
677         raw_spin_lock_irqsave(&task->pi_lock, flags);
678
679         waiter = task->pi_blocked_on;
680         if (!waiter || (waiter->prio == task->prio &&
681                         !dl_prio(task->prio))) {
682                 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
683                 return;
684         }
685
686         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
687
688         /* gets dropped in rt_mutex_adjust_prio_chain()! */
689         get_task_struct(task);
690         rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
691 }
692
693 /**
694  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
695  * @lock:                the rt_mutex to take
696  * @state:               the state the task should block in (TASK_INTERRUPTIBLE
697  *                       or TASK_UNINTERRUPTIBLE)
698  * @timeout:             the pre-initialized and started timer, or NULL for none
699  * @waiter:              the pre-initialized rt_mutex_waiter
700  *
701  * lock->wait_lock must be held by the caller.
702  */
703 static int __sched
704 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
705                     struct hrtimer_sleeper *timeout,
706                     struct rt_mutex_waiter *waiter)
707 {
708         int ret = 0;
709
710         for (;;) {
711                 /* Try to acquire the lock: */
712                 if (try_to_take_rt_mutex(lock, current, waiter))
713                         break;
714
715                 /*
716                  * TASK_INTERRUPTIBLE checks for signals and
717                  * timeout. Ignored otherwise.
718                  */
719                 if (unlikely(state == TASK_INTERRUPTIBLE)) {
720                         /* Signal pending? */
721                         if (signal_pending(current))
722                                 ret = -EINTR;
723                         if (timeout && !timeout->task)
724                                 ret = -ETIMEDOUT;
725                         if (ret)
726                                 break;
727                 }
728
729                 raw_spin_unlock(&lock->wait_lock);
730
731                 debug_rt_mutex_print_deadlock(waiter);
732
733                 schedule_rt_mutex(lock);
734
735                 raw_spin_lock(&lock->wait_lock);
736                 set_current_state(state);
737         }
738
739         return ret;
740 }
741
742 /*
743  * Slow path lock function:
744  */
745 static int __sched
746 rt_mutex_slowlock(struct rt_mutex *lock, int state,
747                   struct hrtimer_sleeper *timeout,
748                   int detect_deadlock)
749 {
750         struct rt_mutex_waiter waiter;
751         int ret = 0;
752
753         debug_rt_mutex_init_waiter(&waiter);
754         RB_CLEAR_NODE(&waiter.pi_tree_entry);
755         RB_CLEAR_NODE(&waiter.tree_entry);
756
757         raw_spin_lock(&lock->wait_lock);
758
759         /* Try to acquire the lock again: */
760         if (try_to_take_rt_mutex(lock, current, NULL)) {
761                 raw_spin_unlock(&lock->wait_lock);
762                 return 0;
763         }
764
765         set_current_state(state);
766
767         /* Setup the timer, when timeout != NULL */
768         if (unlikely(timeout)) {
769                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
770                 if (!hrtimer_active(&timeout->timer))
771                         timeout->task = NULL;
772         }
773
774         ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
775
776         if (likely(!ret))
777                 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
778
779         set_current_state(TASK_RUNNING);
780
781         if (unlikely(ret))
782                 remove_waiter(lock, &waiter);
783
784         /*
785          * try_to_take_rt_mutex() sets the waiter bit
786          * unconditionally. We might have to fix that up.
787          */
788         fixup_rt_mutex_waiters(lock);
789
790         raw_spin_unlock(&lock->wait_lock);
791
792         /* Remove pending timer: */
793         if (unlikely(timeout))
794                 hrtimer_cancel(&timeout->timer);
795
796         debug_rt_mutex_free_waiter(&waiter);
797
798         return ret;
799 }
800
801 /*
802  * Slow path try-lock function:
803  */
804 static inline int
805 rt_mutex_slowtrylock(struct rt_mutex *lock)
806 {
807         int ret = 0;
808
809         raw_spin_lock(&lock->wait_lock);
810
811         if (likely(rt_mutex_owner(lock) != current)) {
812
813                 ret = try_to_take_rt_mutex(lock, current, NULL);
814                 /*
815                  * try_to_take_rt_mutex() sets the lock waiters
816                  * bit unconditionally. Clean this up.
817                  */
818                 fixup_rt_mutex_waiters(lock);
819         }
820
821         raw_spin_unlock(&lock->wait_lock);
822
823         return ret;
824 }
825
826 /*
827  * Slow path to release a rt-mutex:
828  */
829 static void __sched
830 rt_mutex_slowunlock(struct rt_mutex *lock)
831 {
832         raw_spin_lock(&lock->wait_lock);
833
834         debug_rt_mutex_unlock(lock);
835
836         rt_mutex_deadlock_account_unlock(current);
837
838         if (!rt_mutex_has_waiters(lock)) {
839                 lock->owner = NULL;
840                 raw_spin_unlock(&lock->wait_lock);
841                 return;
842         }
843
844         wakeup_next_waiter(lock);
845
846         raw_spin_unlock(&lock->wait_lock);
847
848         /* Undo pi boosting if necessary: */
849         rt_mutex_adjust_prio(current);
850 }
851
852 /*
853  * debug aware fast / slowpath lock,trylock,unlock
854  *
855  * The atomic acquire/release ops are compiled away, when either the
856  * architecture does not support cmpxchg or when debugging is enabled.
857  */
858 static inline int
859 rt_mutex_fastlock(struct rt_mutex *lock, int state,
860                   int detect_deadlock,
861                   int (*slowfn)(struct rt_mutex *lock, int state,
862                                 struct hrtimer_sleeper *timeout,
863                                 int detect_deadlock))
864 {
865         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
866                 rt_mutex_deadlock_account_lock(lock, current);
867                 return 0;
868         } else
869                 return slowfn(lock, state, NULL, detect_deadlock);
870 }
871
872 static inline int
873 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
874                         struct hrtimer_sleeper *timeout, int detect_deadlock,
875                         int (*slowfn)(struct rt_mutex *lock, int state,
876                                       struct hrtimer_sleeper *timeout,
877                                       int detect_deadlock))
878 {
879         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
880                 rt_mutex_deadlock_account_lock(lock, current);
881                 return 0;
882         } else
883                 return slowfn(lock, state, timeout, detect_deadlock);
884 }
885
886 static inline int
887 rt_mutex_fasttrylock(struct rt_mutex *lock,
888                      int (*slowfn)(struct rt_mutex *lock))
889 {
890         if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
891                 rt_mutex_deadlock_account_lock(lock, current);
892                 return 1;
893         }
894         return slowfn(lock);
895 }
896
897 static inline void
898 rt_mutex_fastunlock(struct rt_mutex *lock,
899                     void (*slowfn)(struct rt_mutex *lock))
900 {
901         if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
902                 rt_mutex_deadlock_account_unlock(current);
903         else
904                 slowfn(lock);
905 }
906
907 /**
908  * rt_mutex_lock - lock a rt_mutex
909  *
910  * @lock: the rt_mutex to be locked
911  */
912 void __sched rt_mutex_lock(struct rt_mutex *lock)
913 {
914         might_sleep();
915
916         rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
917 }
918 EXPORT_SYMBOL_GPL(rt_mutex_lock);
919
920 /**
921  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
922  *
923  * @lock:               the rt_mutex to be locked
924  * @detect_deadlock:    deadlock detection on/off
925  *
926  * Returns:
927  *  0           on success
928  * -EINTR       when interrupted by a signal
929  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
930  */
931 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
932                                                  int detect_deadlock)
933 {
934         might_sleep();
935
936         return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
937                                  detect_deadlock, rt_mutex_slowlock);
938 }
939 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
940
941 /**
942  * rt_mutex_timed_lock - lock a rt_mutex interruptible
943  *                      the timeout structure is provided
944  *                      by the caller
945  *
946  * @lock:               the rt_mutex to be locked
947  * @timeout:            timeout structure or NULL (no timeout)
948  * @detect_deadlock:    deadlock detection on/off
949  *
950  * Returns:
951  *  0           on success
952  * -EINTR       when interrupted by a signal
953  * -ETIMEDOUT   when the timeout expired
954  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
955  */
956 int
957 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
958                     int detect_deadlock)
959 {
960         might_sleep();
961
962         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
963                                        detect_deadlock, rt_mutex_slowlock);
964 }
965 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
966
967 /**
968  * rt_mutex_trylock - try to lock a rt_mutex
969  *
970  * @lock:       the rt_mutex to be locked
971  *
972  * Returns 1 on success and 0 on contention
973  */
974 int __sched rt_mutex_trylock(struct rt_mutex *lock)
975 {
976         return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
977 }
978 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
979
980 /**
981  * rt_mutex_unlock - unlock a rt_mutex
982  *
983  * @lock: the rt_mutex to be unlocked
984  */
985 void __sched rt_mutex_unlock(struct rt_mutex *lock)
986 {
987         rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
988 }
989 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
990
991 /**
992  * rt_mutex_destroy - mark a mutex unusable
993  * @lock: the mutex to be destroyed
994  *
995  * This function marks the mutex uninitialized, and any subsequent
996  * use of the mutex is forbidden. The mutex must not be locked when
997  * this function is called.
998  */
999 void rt_mutex_destroy(struct rt_mutex *lock)
1000 {
1001         WARN_ON(rt_mutex_is_locked(lock));
1002 #ifdef CONFIG_DEBUG_RT_MUTEXES
1003         lock->magic = NULL;
1004 #endif
1005 }
1006
1007 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1008
1009 /**
1010  * __rt_mutex_init - initialize the rt lock
1011  *
1012  * @lock: the rt lock to be initialized
1013  *
1014  * Initialize the rt lock to unlocked state.
1015  *
1016  * Initializing of a locked rt lock is not allowed
1017  */
1018 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
1019 {
1020         lock->owner = NULL;
1021         raw_spin_lock_init(&lock->wait_lock);
1022         lock->waiters = RB_ROOT;
1023         lock->waiters_leftmost = NULL;
1024
1025         debug_rt_mutex_init(lock, name);
1026 }
1027 EXPORT_SYMBOL_GPL(__rt_mutex_init);
1028
1029 /**
1030  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
1031  *                              proxy owner
1032  *
1033  * @lock:       the rt_mutex to be locked
1034  * @proxy_owner:the task to set as owner
1035  *
1036  * No locking. Caller has to do serializing itself
1037  * Special API call for PI-futex support
1038  */
1039 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1040                                 struct task_struct *proxy_owner)
1041 {
1042         __rt_mutex_init(lock, NULL);
1043         debug_rt_mutex_proxy_lock(lock, proxy_owner);
1044         rt_mutex_set_owner(lock, proxy_owner);
1045         rt_mutex_deadlock_account_lock(lock, proxy_owner);
1046 }
1047
1048 /**
1049  * rt_mutex_proxy_unlock - release a lock on behalf of owner
1050  *
1051  * @lock:       the rt_mutex to be locked
1052  *
1053  * No locking. Caller has to do serializing itself
1054  * Special API call for PI-futex support
1055  */
1056 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1057                            struct task_struct *proxy_owner)
1058 {
1059         debug_rt_mutex_proxy_unlock(lock);
1060         rt_mutex_set_owner(lock, NULL);
1061         rt_mutex_deadlock_account_unlock(proxy_owner);
1062 }
1063
1064 /**
1065  * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
1066  * @lock:               the rt_mutex to take
1067  * @waiter:             the pre-initialized rt_mutex_waiter
1068  * @task:               the task to prepare
1069  * @detect_deadlock:    perform deadlock detection (1) or not (0)
1070  *
1071  * Returns:
1072  *  0 - task blocked on lock
1073  *  1 - acquired the lock for task, caller should wake it up
1074  * <0 - error
1075  *
1076  * Special API call for FUTEX_REQUEUE_PI support.
1077  */
1078 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1079                               struct rt_mutex_waiter *waiter,
1080                               struct task_struct *task, int detect_deadlock)
1081 {
1082         int ret;
1083
1084         raw_spin_lock(&lock->wait_lock);
1085
1086         if (try_to_take_rt_mutex(lock, task, NULL)) {
1087                 raw_spin_unlock(&lock->wait_lock);
1088                 return 1;
1089         }
1090
1091         ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
1092
1093         if (ret && !rt_mutex_owner(lock)) {
1094                 /*
1095                  * Reset the return value. We might have
1096                  * returned with -EDEADLK and the owner
1097                  * released the lock while we were walking the
1098                  * pi chain.  Let the waiter sort it out.
1099                  */
1100                 ret = 0;
1101         }
1102
1103         if (unlikely(ret))
1104                 remove_waiter(lock, waiter);
1105
1106         raw_spin_unlock(&lock->wait_lock);
1107
1108         debug_rt_mutex_print_deadlock(waiter);
1109
1110         return ret;
1111 }
1112
1113 /**
1114  * rt_mutex_next_owner - return the next owner of the lock
1115  *
1116  * @lock: the rt lock query
1117  *
1118  * Returns the next owner of the lock or NULL
1119  *
1120  * Caller has to serialize against other accessors to the lock
1121  * itself.
1122  *
1123  * Special API call for PI-futex support
1124  */
1125 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1126 {
1127         if (!rt_mutex_has_waiters(lock))
1128                 return NULL;
1129
1130         return rt_mutex_top_waiter(lock)->task;
1131 }
1132
1133 /**
1134  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
1135  * @lock:               the rt_mutex we were woken on
1136  * @to:                 the timeout, null if none. hrtimer should already have
1137  *                      been started.
1138  * @waiter:             the pre-initialized rt_mutex_waiter
1139  * @detect_deadlock:    perform deadlock detection (1) or not (0)
1140  *
1141  * Complete the lock acquisition started our behalf by another thread.
1142  *
1143  * Returns:
1144  *  0 - success
1145  * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
1146  *
1147  * Special API call for PI-futex requeue support
1148  */
1149 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1150                                struct hrtimer_sleeper *to,
1151                                struct rt_mutex_waiter *waiter,
1152                                int detect_deadlock)
1153 {
1154         int ret;
1155
1156         raw_spin_lock(&lock->wait_lock);
1157
1158         set_current_state(TASK_INTERRUPTIBLE);
1159
1160         ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1161
1162         set_current_state(TASK_RUNNING);
1163
1164         if (unlikely(ret))
1165                 remove_waiter(lock, waiter);
1166
1167         /*
1168          * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1169          * have to fix that up.
1170          */
1171         fixup_rt_mutex_waiters(lock);
1172
1173         raw_spin_unlock(&lock->wait_lock);
1174
1175         return ret;
1176 }