1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Fast Userspace Mutexes (which I call "Futexes!").
4 * (C) Rusty Russell, IBM 2002
6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
9 * Removed page pinning, fix privately mapped COW pages and other cleanups
10 * (C) Copyright 2003, 2004 Jamie Lokier
12 * Robust futex support started by Ingo Molnar
13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
17 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
18 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
20 * PRIVATE futexes by Eric Dumazet
21 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
23 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
24 * Copyright (C) IBM Corporation, 2009
25 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28 * enough at me, Linus for the original (flawed) idea, Matthew
29 * Kirkwood for proof-of-concept implementation.
31 * "The futexes are also cursed."
32 * "But they come in a choice of three flavours!"
34 #include <linux/compat.h>
35 #include <linux/jhash.h>
36 #include <linux/pagemap.h>
37 #include <linux/memblock.h>
38 #include <linux/fault-inject.h>
39 #include <linux/slab.h>
42 #include "../locking/rtmutex_common.h"
44 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
45 int __read_mostly futex_cmpxchg_enabled;
50 * The base of the bucket array and its size are always used together
51 * (after initialization only in futex_hash()), so ensure that they
52 * reside in the same cacheline.
55 struct futex_hash_bucket *queues;
56 unsigned long hashsize;
57 } __futex_data __read_mostly __aligned(2*sizeof(long));
58 #define futex_queues (__futex_data.queues)
59 #define futex_hashsize (__futex_data.hashsize)
63 * Fault injections for futexes.
65 #ifdef CONFIG_FAIL_FUTEX
68 struct fault_attr attr;
72 .attr = FAULT_ATTR_INITIALIZER,
73 .ignore_private = false,
76 static int __init setup_fail_futex(char *str)
78 return setup_fault_attr(&fail_futex.attr, str);
80 __setup("fail_futex=", setup_fail_futex);
82 bool should_fail_futex(bool fshared)
84 if (fail_futex.ignore_private && !fshared)
87 return should_fail(&fail_futex.attr, 1);
90 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
92 static int __init fail_futex_debugfs(void)
94 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
97 dir = fault_create_debugfs_attr("fail_futex", NULL,
102 debugfs_create_bool("ignore-private", mode, dir,
103 &fail_futex.ignore_private);
107 late_initcall(fail_futex_debugfs);
109 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
111 #endif /* CONFIG_FAIL_FUTEX */
114 * futex_hash - Return the hash bucket in the global hash
115 * @key: Pointer to the futex key for which the hash is calculated
117 * We hash on the keys returned from get_futex_key (see below) and return the
118 * corresponding hash bucket in the global hash.
120 struct futex_hash_bucket *futex_hash(union futex_key *key)
122 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
125 return &futex_queues[hash & (futex_hashsize - 1)];
130 * futex_setup_timer - set up the sleeping hrtimer.
131 * @time: ptr to the given timeout value
132 * @timeout: the hrtimer_sleeper structure to be set up
133 * @flags: futex flags
134 * @range_ns: optional range in ns
136 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
139 struct hrtimer_sleeper *
140 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
141 int flags, u64 range_ns)
146 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
147 CLOCK_REALTIME : CLOCK_MONOTONIC,
150 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
151 * effectively the same as calling hrtimer_set_expires().
153 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
159 * Generate a machine wide unique identifier for this inode.
161 * This relies on u64 not wrapping in the life-time of the machine; which with
162 * 1ns resolution means almost 585 years.
164 * This further relies on the fact that a well formed program will not unmap
165 * the file while it has a (shared) futex waiting on it. This mapping will have
166 * a file reference which pins the mount and inode.
168 * If for some reason an inode gets evicted and read back in again, it will get
169 * a new sequence number and will _NOT_ match, even though it is the exact same
172 * It is important that futex_match() will never have a false-positive, esp.
173 * for PI futexes that can mess up the state. The above argues that false-negatives
174 * are only possible for malformed programs.
176 static u64 get_inode_sequence_number(struct inode *inode)
178 static atomic64_t i_seq;
181 /* Does the inode already have a sequence number? */
182 old = atomic64_read(&inode->i_sequence);
187 u64 new = atomic64_add_return(1, &i_seq);
188 if (WARN_ON_ONCE(!new))
191 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
199 * get_futex_key() - Get parameters which are the keys for a futex
200 * @uaddr: virtual address of the futex
201 * @fshared: false for a PROCESS_PRIVATE futex, true for PROCESS_SHARED
202 * @key: address where result is stored.
203 * @rw: mapping needs to be read/write (values: FUTEX_READ,
206 * Return: a negative error code or 0
208 * The key words are stored in @key on success.
210 * For shared mappings (when @fshared), the key is:
212 * ( inode->i_sequence, page->index, offset_within_page )
214 * [ also see get_inode_sequence_number() ]
216 * For private mappings (or when !@fshared), the key is:
218 * ( current->mm, address, 0 )
220 * This allows (cross process, where applicable) identification of the futex
221 * without keeping the page pinned for the duration of the FUTEX_WAIT.
223 * lock_page() might sleep, the caller should not hold a spinlock.
225 int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
226 enum futex_access rw)
228 unsigned long address = (unsigned long)uaddr;
229 struct mm_struct *mm = current->mm;
230 struct page *page, *tail;
231 struct address_space *mapping;
235 * The futex address must be "naturally" aligned.
237 key->both.offset = address % PAGE_SIZE;
238 if (unlikely((address % sizeof(u32)) != 0))
240 address -= key->both.offset;
242 if (unlikely(!access_ok(uaddr, sizeof(u32))))
245 if (unlikely(should_fail_futex(fshared)))
249 * PROCESS_PRIVATE futexes are fast.
250 * As the mm cannot disappear under us and the 'key' only needs
251 * virtual address, we dont even have to find the underlying vma.
252 * Note : We do have to check 'uaddr' is a valid user address,
253 * but access_ok() should be faster than find_vma()
256 key->private.mm = mm;
257 key->private.address = address;
262 /* Ignore any VERIFY_READ mapping (futex common case) */
263 if (unlikely(should_fail_futex(true)))
266 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
268 * If write access is not required (eg. FUTEX_WAIT), try
269 * and get read-only access.
271 if (err == -EFAULT && rw == FUTEX_READ) {
272 err = get_user_pages_fast(address, 1, 0, &page);
281 * The treatment of mapping from this point on is critical. The page
282 * lock protects many things but in this context the page lock
283 * stabilizes mapping, prevents inode freeing in the shared
284 * file-backed region case and guards against movement to swap cache.
286 * Strictly speaking the page lock is not needed in all cases being
287 * considered here and page lock forces unnecessarily serialization
288 * From this point on, mapping will be re-verified if necessary and
289 * page lock will be acquired only if it is unavoidable
291 * Mapping checks require the head page for any compound page so the
292 * head page and mapping is looked up now. For anonymous pages, it
293 * does not matter if the page splits in the future as the key is
294 * based on the address. For filesystem-backed pages, the tail is
295 * required as the index of the page determines the key. For
296 * base pages, there is no tail page and tail == page.
299 page = compound_head(page);
300 mapping = READ_ONCE(page->mapping);
303 * If page->mapping is NULL, then it cannot be a PageAnon
304 * page; but it might be the ZERO_PAGE or in the gate area or
305 * in a special mapping (all cases which we are happy to fail);
306 * or it may have been a good file page when get_user_pages_fast
307 * found it, but truncated or holepunched or subjected to
308 * invalidate_complete_page2 before we got the page lock (also
309 * cases which we are happy to fail). And we hold a reference,
310 * so refcount care in invalidate_complete_page's remove_mapping
311 * prevents drop_caches from setting mapping to NULL beneath us.
313 * The case we do have to guard against is when memory pressure made
314 * shmem_writepage move it from filecache to swapcache beneath us:
315 * an unlikely race, but we do need to retry for page->mapping.
317 if (unlikely(!mapping)) {
321 * Page lock is required to identify which special case above
322 * applies. If this is really a shmem page then the page lock
323 * will prevent unexpected transitions.
326 shmem_swizzled = PageSwapCache(page) || page->mapping;
337 * Private mappings are handled in a simple way.
339 * If the futex key is stored on an anonymous page, then the associated
340 * object is the mm which is implicitly pinned by the calling process.
342 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
343 * it's a read-only handle, it's expected that futexes attach to
344 * the object not the particular process.
346 if (PageAnon(page)) {
348 * A RO anonymous page will never change and thus doesn't make
349 * sense for futex operations.
351 if (unlikely(should_fail_futex(true)) || ro) {
356 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
357 key->private.mm = mm;
358 key->private.address = address;
364 * The associated futex object in this case is the inode and
365 * the page->mapping must be traversed. Ordinarily this should
366 * be stabilised under page lock but it's not strictly
367 * necessary in this case as we just want to pin the inode, not
368 * update the radix tree or anything like that.
370 * The RCU read lock is taken as the inode is finally freed
371 * under RCU. If the mapping still matches expectations then the
372 * mapping->host can be safely accessed as being a valid inode.
376 if (READ_ONCE(page->mapping) != mapping) {
383 inode = READ_ONCE(mapping->host);
391 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
392 key->shared.i_seq = get_inode_sequence_number(inode);
393 key->shared.pgoff = page_to_pgoff(tail);
403 * fault_in_user_writeable() - Fault in user address and verify RW access
404 * @uaddr: pointer to faulting user space address
406 * Slow path to fixup the fault we just took in the atomic write
409 * We have no generic implementation of a non-destructive write to the
410 * user address. We know that we faulted in the atomic pagefault
411 * disabled section so we can as well avoid the #PF overhead by
412 * calling get_user_pages() right away.
414 int fault_in_user_writeable(u32 __user *uaddr)
416 struct mm_struct *mm = current->mm;
420 ret = fixup_user_fault(mm, (unsigned long)uaddr,
421 FAULT_FLAG_WRITE, NULL);
422 mmap_read_unlock(mm);
424 return ret < 0 ? ret : 0;
428 * futex_top_waiter() - Return the highest priority waiter on a futex
429 * @hb: the hash bucket the futex_q's reside in
430 * @key: the futex key (to distinguish it from other futex futex_q's)
432 * Must be called with the hb lock held.
434 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
436 struct futex_q *this;
438 plist_for_each_entry(this, &hb->chain, list) {
439 if (futex_match(&this->key, key))
445 int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval)
450 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
456 int futex_get_value_locked(u32 *dest, u32 __user *from)
461 ret = __get_user(*dest, from);
464 return ret ? -EFAULT : 0;
468 * wait_for_owner_exiting - Block until the owner has exited
469 * @ret: owner's current futex lock status
470 * @exiting: Pointer to the exiting task
472 * Caller must hold a refcount on @exiting.
474 void wait_for_owner_exiting(int ret, struct task_struct *exiting)
477 WARN_ON_ONCE(exiting);
481 if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
484 mutex_lock(&exiting->futex_exit_mutex);
486 * No point in doing state checking here. If the waiter got here
487 * while the task was in exec()->exec_futex_release() then it can
488 * have any FUTEX_STATE_* value when the waiter has acquired the
489 * mutex. OK, if running, EXITING or DEAD if it reached exit()
490 * already. Highly unlikely and not a problem. Just one more round
491 * through the futex maze.
493 mutex_unlock(&exiting->futex_exit_mutex);
495 put_task_struct(exiting);
499 * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
500 * @q: The futex_q to unqueue
502 * The q->lock_ptr must not be NULL and must be held by the caller.
504 void __futex_unqueue(struct futex_q *q)
506 struct futex_hash_bucket *hb;
508 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
510 lockdep_assert_held(q->lock_ptr);
512 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
513 plist_del(&q->list, &hb->chain);
514 futex_hb_waiters_dec(hb);
517 /* The key must be already stored in q->key. */
518 struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
519 __acquires(&hb->lock)
521 struct futex_hash_bucket *hb;
523 hb = futex_hash(&q->key);
526 * Increment the counter before taking the lock so that
527 * a potential waker won't miss a to-be-slept task that is
528 * waiting for the spinlock. This is safe as all futex_q_lock()
529 * users end up calling futex_queue(). Similarly, for housekeeping,
530 * decrement the counter at futex_q_unlock() when some error has
531 * occurred and we don't end up adding the task to the list.
533 futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
535 q->lock_ptr = &hb->lock;
537 spin_lock(&hb->lock);
541 void futex_q_unlock(struct futex_hash_bucket *hb)
542 __releases(&hb->lock)
544 spin_unlock(&hb->lock);
545 futex_hb_waiters_dec(hb);
548 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
553 * The priority used to register this element is
554 * - either the real thread-priority for the real-time threads
555 * (i.e. threads with a priority lower than MAX_RT_PRIO)
556 * - or MAX_RT_PRIO for non-RT threads.
557 * Thus, all RT-threads are woken first in priority order, and
558 * the others are woken last, in FIFO order.
560 prio = min(current->normal_prio, MAX_RT_PRIO);
562 plist_node_init(&q->list, prio);
563 plist_add(&q->list, &hb->chain);
568 * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
569 * @q: The futex_q to unqueue
571 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
572 * be paired with exactly one earlier call to futex_queue().
575 * - 1 - if the futex_q was still queued (and we removed unqueued it);
576 * - 0 - if the futex_q was already removed by the waking thread
578 int futex_unqueue(struct futex_q *q)
580 spinlock_t *lock_ptr;
583 /* In the common case we don't take the spinlock, which is nice. */
586 * q->lock_ptr can change between this read and the following spin_lock.
587 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
588 * optimizing lock_ptr out of the logic below.
590 lock_ptr = READ_ONCE(q->lock_ptr);
591 if (lock_ptr != NULL) {
594 * q->lock_ptr can change between reading it and
595 * spin_lock(), causing us to take the wrong lock. This
596 * corrects the race condition.
598 * Reasoning goes like this: if we have the wrong lock,
599 * q->lock_ptr must have changed (maybe several times)
600 * between reading it and the spin_lock(). It can
601 * change again after the spin_lock() but only if it was
602 * already changed before the spin_lock(). It cannot,
603 * however, change back to the original value. Therefore
604 * we can detect whether we acquired the correct lock.
606 if (unlikely(lock_ptr != q->lock_ptr)) {
607 spin_unlock(lock_ptr);
614 spin_unlock(lock_ptr);
622 * PI futexes can not be requeued and must remove themselves from the
623 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
625 void futex_unqueue_pi(struct futex_q *q)
629 BUG_ON(!q->pi_state);
630 put_pi_state(q->pi_state);
634 /* Constants for the pending_op argument of handle_futex_death */
635 #define HANDLE_DEATH_PENDING true
636 #define HANDLE_DEATH_LIST false
639 * Process a futex-list entry, check whether it's owned by the
640 * dying task, and do notification if so:
642 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
643 bool pi, bool pending_op)
645 u32 uval, nval, mval;
648 /* Futex address must be 32bit aligned */
649 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
653 if (get_user(uval, uaddr))
657 * Special case for regular (non PI) futexes. The unlock path in
658 * user space has two race scenarios:
660 * 1. The unlock path releases the user space futex value and
661 * before it can execute the futex() syscall to wake up
662 * waiters it is killed.
664 * 2. A woken up waiter is killed before it can acquire the
665 * futex in user space.
667 * In both cases the TID validation below prevents a wakeup of
668 * potential waiters which can cause these waiters to block
671 * In both cases the following conditions are met:
673 * 1) task->robust_list->list_op_pending != NULL
674 * @pending_op == true
675 * 2) User space futex value == 0
676 * 3) Regular futex: @pi == false
678 * If these conditions are met, it is safe to attempt waking up a
679 * potential waiter without touching the user space futex value and
680 * trying to set the OWNER_DIED bit. The user space futex value is
681 * uncontended and the rest of the user space mutex state is
682 * consistent, so a woken waiter will just take over the
683 * uncontended futex. Setting the OWNER_DIED bit would create
684 * inconsistent state and malfunction of the user space owner died
687 if (pending_op && !pi && !uval) {
688 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
692 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
696 * Ok, this dying thread is truly holding a futex
697 * of interest. Set the OWNER_DIED bit atomically
698 * via cmpxchg, and if the value had FUTEX_WAITERS
699 * set, wake up a waiter (if any). (We have to do a
700 * futex_wake() even if OWNER_DIED is already set -
701 * to handle the rare but possible case of recursive
702 * thread-death.) The rest of the cleanup is done in
705 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
708 * We are not holding a lock here, but we want to have
709 * the pagefault_disable/enable() protection because
710 * we want to handle the fault gracefully. If the
711 * access fails we try to fault in the futex with R/W
712 * verification via get_user_pages. get_user() above
713 * does not guarantee R/W access. If that fails we
714 * give up and leave the futex locked.
716 if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
719 if (fault_in_user_writeable(uaddr))
737 * Wake robust non-PI futexes here. The wakeup of
738 * PI futexes happens in exit_pi_state():
740 if (!pi && (uval & FUTEX_WAITERS))
741 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
747 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
749 static inline int fetch_robust_entry(struct robust_list __user **entry,
750 struct robust_list __user * __user *head,
753 unsigned long uentry;
755 if (get_user(uentry, (unsigned long __user *)head))
758 *entry = (void __user *)(uentry & ~1UL);
765 * Walk curr->robust_list (very carefully, it's a userspace list!)
766 * and mark any locks found there dead, and notify any waiters.
768 * We silently return on any sign of list-walking problem.
770 static void exit_robust_list(struct task_struct *curr)
772 struct robust_list_head __user *head = curr->robust_list;
773 struct robust_list __user *entry, *next_entry, *pending;
774 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
775 unsigned int next_pi;
776 unsigned long futex_offset;
779 if (!futex_cmpxchg_enabled)
783 * Fetch the list head (which was registered earlier, via
784 * sys_set_robust_list()):
786 if (fetch_robust_entry(&entry, &head->list.next, &pi))
789 * Fetch the relative futex offset:
791 if (get_user(futex_offset, &head->futex_offset))
794 * Fetch any possibly pending lock-add first, and handle it
797 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
800 next_entry = NULL; /* avoid warning with gcc */
801 while (entry != &head->list) {
803 * Fetch the next entry in the list before calling
804 * handle_futex_death:
806 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
808 * A pending lock might already be on the list, so
809 * don't process it twice:
811 if (entry != pending) {
812 if (handle_futex_death((void __user *)entry + futex_offset,
813 curr, pi, HANDLE_DEATH_LIST))
821 * Avoid excessively long or circular lists:
830 handle_futex_death((void __user *)pending + futex_offset,
831 curr, pip, HANDLE_DEATH_PENDING);
836 static void __user *futex_uaddr(struct robust_list __user *entry,
837 compat_long_t futex_offset)
839 compat_uptr_t base = ptr_to_compat(entry);
840 void __user *uaddr = compat_ptr(base + futex_offset);
846 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
849 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
850 compat_uptr_t __user *head, unsigned int *pi)
852 if (get_user(*uentry, head))
855 *entry = compat_ptr((*uentry) & ~1);
856 *pi = (unsigned int)(*uentry) & 1;
862 * Walk curr->robust_list (very carefully, it's a userspace list!)
863 * and mark any locks found there dead, and notify any waiters.
865 * We silently return on any sign of list-walking problem.
867 static void compat_exit_robust_list(struct task_struct *curr)
869 struct compat_robust_list_head __user *head = curr->compat_robust_list;
870 struct robust_list __user *entry, *next_entry, *pending;
871 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
872 unsigned int next_pi;
873 compat_uptr_t uentry, next_uentry, upending;
874 compat_long_t futex_offset;
877 if (!futex_cmpxchg_enabled)
881 * Fetch the list head (which was registered earlier, via
882 * sys_set_robust_list()):
884 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
887 * Fetch the relative futex offset:
889 if (get_user(futex_offset, &head->futex_offset))
892 * Fetch any possibly pending lock-add first, and handle it
895 if (compat_fetch_robust_entry(&upending, &pending,
896 &head->list_op_pending, &pip))
899 next_entry = NULL; /* avoid warning with gcc */
900 while (entry != (struct robust_list __user *) &head->list) {
902 * Fetch the next entry in the list before calling
903 * handle_futex_death:
905 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
906 (compat_uptr_t __user *)&entry->next, &next_pi);
908 * A pending lock might already be on the list, so
909 * dont process it twice:
911 if (entry != pending) {
912 void __user *uaddr = futex_uaddr(entry, futex_offset);
914 if (handle_futex_death(uaddr, curr, pi,
920 uentry = next_uentry;
924 * Avoid excessively long or circular lists:
932 void __user *uaddr = futex_uaddr(pending, futex_offset);
934 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
939 #ifdef CONFIG_FUTEX_PI
942 * This task is holding PI mutexes at exit time => bad.
943 * Kernel cleans up PI-state, but userspace is likely hosed.
944 * (Robust-futex cleanup is separate and might save the day for userspace.)
946 static void exit_pi_state_list(struct task_struct *curr)
948 struct list_head *next, *head = &curr->pi_state_list;
949 struct futex_pi_state *pi_state;
950 struct futex_hash_bucket *hb;
951 union futex_key key = FUTEX_KEY_INIT;
953 if (!futex_cmpxchg_enabled)
956 * We are a ZOMBIE and nobody can enqueue itself on
957 * pi_state_list anymore, but we have to be careful
958 * versus waiters unqueueing themselves:
960 raw_spin_lock_irq(&curr->pi_lock);
961 while (!list_empty(head)) {
963 pi_state = list_entry(next, struct futex_pi_state, list);
965 hb = futex_hash(&key);
968 * We can race against put_pi_state() removing itself from the
969 * list (a waiter going away). put_pi_state() will first
970 * decrement the reference count and then modify the list, so
971 * its possible to see the list entry but fail this reference
974 * In that case; drop the locks to let put_pi_state() make
975 * progress and retry the loop.
977 if (!refcount_inc_not_zero(&pi_state->refcount)) {
978 raw_spin_unlock_irq(&curr->pi_lock);
980 raw_spin_lock_irq(&curr->pi_lock);
983 raw_spin_unlock_irq(&curr->pi_lock);
985 spin_lock(&hb->lock);
986 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
987 raw_spin_lock(&curr->pi_lock);
989 * We dropped the pi-lock, so re-check whether this
990 * task still owns the PI-state:
992 if (head->next != next) {
993 /* retain curr->pi_lock for the loop invariant */
994 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
995 spin_unlock(&hb->lock);
996 put_pi_state(pi_state);
1000 WARN_ON(pi_state->owner != curr);
1001 WARN_ON(list_empty(&pi_state->list));
1002 list_del_init(&pi_state->list);
1003 pi_state->owner = NULL;
1005 raw_spin_unlock(&curr->pi_lock);
1006 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1007 spin_unlock(&hb->lock);
1009 rt_mutex_futex_unlock(&pi_state->pi_mutex);
1010 put_pi_state(pi_state);
1012 raw_spin_lock_irq(&curr->pi_lock);
1014 raw_spin_unlock_irq(&curr->pi_lock);
1017 static inline void exit_pi_state_list(struct task_struct *curr) { }
1020 static void futex_cleanup(struct task_struct *tsk)
1022 if (unlikely(tsk->robust_list)) {
1023 exit_robust_list(tsk);
1024 tsk->robust_list = NULL;
1027 #ifdef CONFIG_COMPAT
1028 if (unlikely(tsk->compat_robust_list)) {
1029 compat_exit_robust_list(tsk);
1030 tsk->compat_robust_list = NULL;
1034 if (unlikely(!list_empty(&tsk->pi_state_list)))
1035 exit_pi_state_list(tsk);
1039 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1040 * @tsk: task to set the state on
1042 * Set the futex exit state of the task lockless. The futex waiter code
1043 * observes that state when a task is exiting and loops until the task has
1044 * actually finished the futex cleanup. The worst case for this is that the
1045 * waiter runs through the wait loop until the state becomes visible.
1047 * This is called from the recursive fault handling path in do_exit().
1049 * This is best effort. Either the futex exit code has run already or
1050 * not. If the OWNER_DIED bit has been set on the futex then the waiter can
1051 * take it over. If not, the problem is pushed back to user space. If the
1052 * futex exit code did not run yet, then an already queued waiter might
1053 * block forever, but there is nothing which can be done about that.
1055 void futex_exit_recursive(struct task_struct *tsk)
1057 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
1058 if (tsk->futex_state == FUTEX_STATE_EXITING)
1059 mutex_unlock(&tsk->futex_exit_mutex);
1060 tsk->futex_state = FUTEX_STATE_DEAD;
1063 static void futex_cleanup_begin(struct task_struct *tsk)
1066 * Prevent various race issues against a concurrent incoming waiter
1067 * including live locks by forcing the waiter to block on
1068 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
1069 * attach_to_pi_owner().
1071 mutex_lock(&tsk->futex_exit_mutex);
1074 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
1076 * This ensures that all subsequent checks of tsk->futex_state in
1077 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
1078 * tsk->pi_lock held.
1080 * It guarantees also that a pi_state which was queued right before
1081 * the state change under tsk->pi_lock by a concurrent waiter must
1082 * be observed in exit_pi_state_list().
1084 raw_spin_lock_irq(&tsk->pi_lock);
1085 tsk->futex_state = FUTEX_STATE_EXITING;
1086 raw_spin_unlock_irq(&tsk->pi_lock);
1089 static void futex_cleanup_end(struct task_struct *tsk, int state)
1092 * Lockless store. The only side effect is that an observer might
1093 * take another loop until it becomes visible.
1095 tsk->futex_state = state;
1097 * Drop the exit protection. This unblocks waiters which observed
1098 * FUTEX_STATE_EXITING to reevaluate the state.
1100 mutex_unlock(&tsk->futex_exit_mutex);
1103 void futex_exec_release(struct task_struct *tsk)
1106 * The state handling is done for consistency, but in the case of
1107 * exec() there is no way to prevent further damage as the PID stays
1108 * the same. But for the unlikely and arguably buggy case that a
1109 * futex is held on exec(), this provides at least as much state
1110 * consistency protection which is possible.
1112 futex_cleanup_begin(tsk);
1115 * Reset the state to FUTEX_STATE_OK. The task is alive and about
1116 * exec a new binary.
1118 futex_cleanup_end(tsk, FUTEX_STATE_OK);
1121 void futex_exit_release(struct task_struct *tsk)
1123 futex_cleanup_begin(tsk);
1125 futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1128 static void __init futex_detect_cmpxchg(void)
1130 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
1134 * This will fail and we want it. Some arch implementations do
1135 * runtime detection of the futex_atomic_cmpxchg_inatomic()
1136 * functionality. We want to know that before we call in any
1137 * of the complex code paths. Also we want to prevent
1138 * registration of robust lists in that case. NULL is
1139 * guaranteed to fault and we get -EFAULT on functional
1140 * implementation, the non-functional ones will return
1143 if (futex_cmpxchg_value_locked(&curval, NULL, 0, 0) == -EFAULT)
1144 futex_cmpxchg_enabled = 1;
1148 static int __init futex_init(void)
1150 unsigned int futex_shift;
1153 #if CONFIG_BASE_SMALL
1154 futex_hashsize = 16;
1156 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
1159 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
1161 futex_hashsize < 256 ? HASH_SMALL : 0,
1163 futex_hashsize, futex_hashsize);
1164 futex_hashsize = 1UL << futex_shift;
1166 futex_detect_cmpxchg();
1168 for (i = 0; i < futex_hashsize; i++) {
1169 atomic_set(&futex_queues[i].waiters, 0);
1170 plist_head_init(&futex_queues[i].chain);
1171 spin_lock_init(&futex_queues[i].lock);
1176 core_initcall(futex_init);