1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
70 #include <uapi/linux/android/binder.h>
72 #include <linux/cacheflush.h>
74 #include "binder_internal.h"
75 #include "binder_trace.h"
77 static HLIST_HEAD(binder_deferred_list);
78 static DEFINE_MUTEX(binder_deferred_lock);
80 static HLIST_HEAD(binder_devices);
81 static HLIST_HEAD(binder_procs);
82 static DEFINE_MUTEX(binder_procs_lock);
84 static HLIST_HEAD(binder_dead_nodes);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87 static struct dentry *binder_debugfs_dir_entry_root;
88 static struct dentry *binder_debugfs_dir_entry_proc;
89 static atomic_t binder_last_id;
91 static int proc_show(struct seq_file *m, void *unused);
92 DEFINE_SHOW_ATTRIBUTE(proc);
94 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
97 BINDER_DEBUG_USER_ERROR = 1U << 0,
98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
101 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
103 BINDER_DEBUG_READ_WRITE = 1U << 6,
104 BINDER_DEBUG_USER_REFS = 1U << 7,
105 BINDER_DEBUG_THREADS = 1U << 8,
106 BINDER_DEBUG_TRANSACTION = 1U << 9,
107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
108 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
111 BINDER_DEBUG_SPINLOCKS = 1U << 14,
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 module_param_named(devices, binder_devices_param, charp, 0444);
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
123 static int binder_set_stop_on_user_error(const char *val,
124 const struct kernel_param *kp)
128 ret = param_set_int(val, kp);
129 if (binder_stop_on_user_error < 2)
130 wake_up(&binder_user_error_wait);
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 param_get_int, &binder_stop_on_user_error, 0644);
136 #define binder_debug(mask, x...) \
138 if (binder_debug_mask & mask) \
139 pr_info_ratelimited(x); \
142 #define binder_user_error(x...) \
144 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145 pr_info_ratelimited(x); \
146 if (binder_stop_on_user_error) \
147 binder_stop_on_user_error = 2; \
150 #define to_flat_binder_object(hdr) \
151 container_of(hdr, struct flat_binder_object, hdr)
153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
155 #define to_binder_buffer_object(hdr) \
156 container_of(hdr, struct binder_buffer_object, hdr)
158 #define to_binder_fd_array_object(hdr) \
159 container_of(hdr, struct binder_fd_array_object, hdr)
161 static struct binder_stats binder_stats;
163 static inline void binder_stats_deleted(enum binder_stat_types type)
165 atomic_inc(&binder_stats.obj_deleted[type]);
168 static inline void binder_stats_created(enum binder_stat_types type)
170 atomic_inc(&binder_stats.obj_created[type]);
173 struct binder_transaction_log binder_transaction_log;
174 struct binder_transaction_log binder_transaction_log_failed;
176 static struct binder_transaction_log_entry *binder_transaction_log_add(
177 struct binder_transaction_log *log)
179 struct binder_transaction_log_entry *e;
180 unsigned int cur = atomic_inc_return(&log->cur);
182 if (cur >= ARRAY_SIZE(log->entry))
184 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
185 WRITE_ONCE(e->debug_id_done, 0);
187 * write-barrier to synchronize access to e->debug_id_done.
188 * We make sure the initialized 0 value is seen before
189 * memset() other fields are zeroed by memset.
192 memset(e, 0, sizeof(*e));
196 enum binder_deferred_state {
197 BINDER_DEFERRED_FLUSH = 0x01,
198 BINDER_DEFERRED_RELEASE = 0x02,
202 BINDER_LOOPER_STATE_REGISTERED = 0x01,
203 BINDER_LOOPER_STATE_ENTERED = 0x02,
204 BINDER_LOOPER_STATE_EXITED = 0x04,
205 BINDER_LOOPER_STATE_INVALID = 0x08,
206 BINDER_LOOPER_STATE_WAITING = 0x10,
207 BINDER_LOOPER_STATE_POLL = 0x20,
211 * binder_proc_lock() - Acquire outer lock for given binder_proc
212 * @proc: struct binder_proc to acquire
214 * Acquires proc->outer_lock. Used to protect binder_ref
215 * structures associated with the given proc.
217 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
219 _binder_proc_lock(struct binder_proc *proc, int line)
220 __acquires(&proc->outer_lock)
222 binder_debug(BINDER_DEBUG_SPINLOCKS,
223 "%s: line=%d\n", __func__, line);
224 spin_lock(&proc->outer_lock);
228 * binder_proc_unlock() - Release spinlock for given binder_proc
229 * @proc: struct binder_proc to acquire
231 * Release lock acquired via binder_proc_lock()
233 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
235 _binder_proc_unlock(struct binder_proc *proc, int line)
236 __releases(&proc->outer_lock)
238 binder_debug(BINDER_DEBUG_SPINLOCKS,
239 "%s: line=%d\n", __func__, line);
240 spin_unlock(&proc->outer_lock);
244 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
245 * @proc: struct binder_proc to acquire
247 * Acquires proc->inner_lock. Used to protect todo lists
249 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
251 _binder_inner_proc_lock(struct binder_proc *proc, int line)
252 __acquires(&proc->inner_lock)
254 binder_debug(BINDER_DEBUG_SPINLOCKS,
255 "%s: line=%d\n", __func__, line);
256 spin_lock(&proc->inner_lock);
260 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
261 * @proc: struct binder_proc to acquire
263 * Release lock acquired via binder_inner_proc_lock()
265 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
267 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
268 __releases(&proc->inner_lock)
270 binder_debug(BINDER_DEBUG_SPINLOCKS,
271 "%s: line=%d\n", __func__, line);
272 spin_unlock(&proc->inner_lock);
276 * binder_node_lock() - Acquire spinlock for given binder_node
277 * @node: struct binder_node to acquire
279 * Acquires node->lock. Used to protect binder_node fields
281 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
283 _binder_node_lock(struct binder_node *node, int line)
284 __acquires(&node->lock)
286 binder_debug(BINDER_DEBUG_SPINLOCKS,
287 "%s: line=%d\n", __func__, line);
288 spin_lock(&node->lock);
292 * binder_node_unlock() - Release spinlock for given binder_proc
293 * @node: struct binder_node to acquire
295 * Release lock acquired via binder_node_lock()
297 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
299 _binder_node_unlock(struct binder_node *node, int line)
300 __releases(&node->lock)
302 binder_debug(BINDER_DEBUG_SPINLOCKS,
303 "%s: line=%d\n", __func__, line);
304 spin_unlock(&node->lock);
308 * binder_node_inner_lock() - Acquire node and inner locks
309 * @node: struct binder_node to acquire
311 * Acquires node->lock. If node->proc also acquires
312 * proc->inner_lock. Used to protect binder_node fields
314 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
316 _binder_node_inner_lock(struct binder_node *node, int line)
317 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
319 binder_debug(BINDER_DEBUG_SPINLOCKS,
320 "%s: line=%d\n", __func__, line);
321 spin_lock(&node->lock);
323 binder_inner_proc_lock(node->proc);
325 /* annotation for sparse */
326 __acquire(&node->proc->inner_lock);
330 * binder_node_unlock() - Release node and inner locks
331 * @node: struct binder_node to acquire
333 * Release lock acquired via binder_node_lock()
335 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
337 _binder_node_inner_unlock(struct binder_node *node, int line)
338 __releases(&node->lock) __releases(&node->proc->inner_lock)
340 struct binder_proc *proc = node->proc;
342 binder_debug(BINDER_DEBUG_SPINLOCKS,
343 "%s: line=%d\n", __func__, line);
345 binder_inner_proc_unlock(proc);
347 /* annotation for sparse */
348 __release(&node->proc->inner_lock);
349 spin_unlock(&node->lock);
352 static bool binder_worklist_empty_ilocked(struct list_head *list)
354 return list_empty(list);
358 * binder_worklist_empty() - Check if no items on the work list
359 * @proc: binder_proc associated with list
360 * @list: list to check
362 * Return: true if there are no items on list, else false
364 static bool binder_worklist_empty(struct binder_proc *proc,
365 struct list_head *list)
369 binder_inner_proc_lock(proc);
370 ret = binder_worklist_empty_ilocked(list);
371 binder_inner_proc_unlock(proc);
376 * binder_enqueue_work_ilocked() - Add an item to the work list
377 * @work: struct binder_work to add to list
378 * @target_list: list to add work to
380 * Adds the work to the specified list. Asserts that work
381 * is not already on a list.
383 * Requires the proc->inner_lock to be held.
386 binder_enqueue_work_ilocked(struct binder_work *work,
387 struct list_head *target_list)
389 BUG_ON(target_list == NULL);
390 BUG_ON(work->entry.next && !list_empty(&work->entry));
391 list_add_tail(&work->entry, target_list);
395 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
396 * @thread: thread to queue work to
397 * @work: struct binder_work to add to list
399 * Adds the work to the todo list of the thread. Doesn't set the process_todo
400 * flag, which means that (if it wasn't already set) the thread will go to
401 * sleep without handling this work when it calls read.
403 * Requires the proc->inner_lock to be held.
406 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
407 struct binder_work *work)
409 WARN_ON(!list_empty(&thread->waiting_thread_node));
410 binder_enqueue_work_ilocked(work, &thread->todo);
414 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
415 * @thread: thread to queue work to
416 * @work: struct binder_work to add to list
418 * Adds the work to the todo list of the thread, and enables processing
421 * Requires the proc->inner_lock to be held.
424 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
425 struct binder_work *work)
427 WARN_ON(!list_empty(&thread->waiting_thread_node));
428 binder_enqueue_work_ilocked(work, &thread->todo);
429 thread->process_todo = true;
433 * binder_enqueue_thread_work() - Add an item to the thread work list
434 * @thread: thread to queue work to
435 * @work: struct binder_work to add to list
437 * Adds the work to the todo list of the thread, and enables processing
441 binder_enqueue_thread_work(struct binder_thread *thread,
442 struct binder_work *work)
444 binder_inner_proc_lock(thread->proc);
445 binder_enqueue_thread_work_ilocked(thread, work);
446 binder_inner_proc_unlock(thread->proc);
450 binder_dequeue_work_ilocked(struct binder_work *work)
452 list_del_init(&work->entry);
456 * binder_dequeue_work() - Removes an item from the work list
457 * @proc: binder_proc associated with list
458 * @work: struct binder_work to remove from list
460 * Removes the specified work item from whatever list it is on.
461 * Can safely be called if work is not on any list.
464 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
466 binder_inner_proc_lock(proc);
467 binder_dequeue_work_ilocked(work);
468 binder_inner_proc_unlock(proc);
471 static struct binder_work *binder_dequeue_work_head_ilocked(
472 struct list_head *list)
474 struct binder_work *w;
476 w = list_first_entry_or_null(list, struct binder_work, entry);
478 list_del_init(&w->entry);
483 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
484 static void binder_free_thread(struct binder_thread *thread);
485 static void binder_free_proc(struct binder_proc *proc);
486 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
488 static bool binder_has_work_ilocked(struct binder_thread *thread,
491 return thread->process_todo ||
492 thread->looper_need_return ||
494 !binder_worklist_empty_ilocked(&thread->proc->todo));
497 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
501 binder_inner_proc_lock(thread->proc);
502 has_work = binder_has_work_ilocked(thread, do_proc_work);
503 binder_inner_proc_unlock(thread->proc);
508 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
510 return !thread->transaction_stack &&
511 binder_worklist_empty_ilocked(&thread->todo) &&
512 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
513 BINDER_LOOPER_STATE_REGISTERED));
516 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
520 struct binder_thread *thread;
522 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
523 thread = rb_entry(n, struct binder_thread, rb_node);
524 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
525 binder_available_for_proc_work_ilocked(thread)) {
527 wake_up_interruptible_sync(&thread->wait);
529 wake_up_interruptible(&thread->wait);
535 * binder_select_thread_ilocked() - selects a thread for doing proc work.
536 * @proc: process to select a thread from
538 * Note that calling this function moves the thread off the waiting_threads
539 * list, so it can only be woken up by the caller of this function, or a
540 * signal. Therefore, callers *should* always wake up the thread this function
543 * Return: If there's a thread currently waiting for process work,
544 * returns that thread. Otherwise returns NULL.
546 static struct binder_thread *
547 binder_select_thread_ilocked(struct binder_proc *proc)
549 struct binder_thread *thread;
551 assert_spin_locked(&proc->inner_lock);
552 thread = list_first_entry_or_null(&proc->waiting_threads,
553 struct binder_thread,
554 waiting_thread_node);
557 list_del_init(&thread->waiting_thread_node);
563 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
564 * @proc: process to wake up a thread in
565 * @thread: specific thread to wake-up (may be NULL)
566 * @sync: whether to do a synchronous wake-up
568 * This function wakes up a thread in the @proc process.
569 * The caller may provide a specific thread to wake-up in
570 * the @thread parameter. If @thread is NULL, this function
571 * will wake up threads that have called poll().
573 * Note that for this function to work as expected, callers
574 * should first call binder_select_thread() to find a thread
575 * to handle the work (if they don't have a thread already),
576 * and pass the result into the @thread parameter.
578 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
579 struct binder_thread *thread,
582 assert_spin_locked(&proc->inner_lock);
586 wake_up_interruptible_sync(&thread->wait);
588 wake_up_interruptible(&thread->wait);
592 /* Didn't find a thread waiting for proc work; this can happen
594 * 1. All threads are busy handling transactions
595 * In that case, one of those threads should call back into
596 * the kernel driver soon and pick up this work.
597 * 2. Threads are using the (e)poll interface, in which case
598 * they may be blocked on the waitqueue without having been
599 * added to waiting_threads. For this case, we just iterate
600 * over all threads not handling transaction work, and
601 * wake them all up. We wake all because we don't know whether
602 * a thread that called into (e)poll is handling non-binder
605 binder_wakeup_poll_threads_ilocked(proc, sync);
608 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
610 struct binder_thread *thread = binder_select_thread_ilocked(proc);
612 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
615 static void binder_set_nice(long nice)
619 if (can_nice(current, nice)) {
620 set_user_nice(current, nice);
623 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
624 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
625 "%d: nice value %ld not allowed use %ld instead\n",
626 current->pid, nice, min_nice);
627 set_user_nice(current, min_nice);
628 if (min_nice <= MAX_NICE)
630 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
633 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
634 binder_uintptr_t ptr)
636 struct rb_node *n = proc->nodes.rb_node;
637 struct binder_node *node;
639 assert_spin_locked(&proc->inner_lock);
642 node = rb_entry(n, struct binder_node, rb_node);
646 else if (ptr > node->ptr)
650 * take an implicit weak reference
651 * to ensure node stays alive until
652 * call to binder_put_node()
654 binder_inc_node_tmpref_ilocked(node);
661 static struct binder_node *binder_get_node(struct binder_proc *proc,
662 binder_uintptr_t ptr)
664 struct binder_node *node;
666 binder_inner_proc_lock(proc);
667 node = binder_get_node_ilocked(proc, ptr);
668 binder_inner_proc_unlock(proc);
672 static struct binder_node *binder_init_node_ilocked(
673 struct binder_proc *proc,
674 struct binder_node *new_node,
675 struct flat_binder_object *fp)
677 struct rb_node **p = &proc->nodes.rb_node;
678 struct rb_node *parent = NULL;
679 struct binder_node *node;
680 binder_uintptr_t ptr = fp ? fp->binder : 0;
681 binder_uintptr_t cookie = fp ? fp->cookie : 0;
682 __u32 flags = fp ? fp->flags : 0;
684 assert_spin_locked(&proc->inner_lock);
689 node = rb_entry(parent, struct binder_node, rb_node);
693 else if (ptr > node->ptr)
697 * A matching node is already in
698 * the rb tree. Abandon the init
701 binder_inc_node_tmpref_ilocked(node);
706 binder_stats_created(BINDER_STAT_NODE);
708 rb_link_node(&node->rb_node, parent, p);
709 rb_insert_color(&node->rb_node, &proc->nodes);
710 node->debug_id = atomic_inc_return(&binder_last_id);
713 node->cookie = cookie;
714 node->work.type = BINDER_WORK_NODE;
715 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
716 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
717 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
718 spin_lock_init(&node->lock);
719 INIT_LIST_HEAD(&node->work.entry);
720 INIT_LIST_HEAD(&node->async_todo);
721 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
722 "%d:%d node %d u%016llx c%016llx created\n",
723 proc->pid, current->pid, node->debug_id,
724 (u64)node->ptr, (u64)node->cookie);
729 static struct binder_node *binder_new_node(struct binder_proc *proc,
730 struct flat_binder_object *fp)
732 struct binder_node *node;
733 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
737 binder_inner_proc_lock(proc);
738 node = binder_init_node_ilocked(proc, new_node, fp);
739 binder_inner_proc_unlock(proc);
740 if (node != new_node)
742 * The node was already added by another thread
749 static void binder_free_node(struct binder_node *node)
752 binder_stats_deleted(BINDER_STAT_NODE);
755 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
757 struct list_head *target_list)
759 struct binder_proc *proc = node->proc;
761 assert_spin_locked(&node->lock);
763 assert_spin_locked(&proc->inner_lock);
766 if (target_list == NULL &&
767 node->internal_strong_refs == 0 &&
769 node == node->proc->context->binder_context_mgr_node &&
770 node->has_strong_ref)) {
771 pr_err("invalid inc strong node for %d\n",
775 node->internal_strong_refs++;
777 node->local_strong_refs++;
778 if (!node->has_strong_ref && target_list) {
779 struct binder_thread *thread = container_of(target_list,
780 struct binder_thread, todo);
781 binder_dequeue_work_ilocked(&node->work);
782 BUG_ON(&thread->todo != target_list);
783 binder_enqueue_deferred_thread_work_ilocked(thread,
788 node->local_weak_refs++;
789 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
790 if (target_list == NULL) {
791 pr_err("invalid inc weak node for %d\n",
798 binder_enqueue_work_ilocked(&node->work, target_list);
804 static int binder_inc_node(struct binder_node *node, int strong, int internal,
805 struct list_head *target_list)
809 binder_node_inner_lock(node);
810 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
811 binder_node_inner_unlock(node);
816 static bool binder_dec_node_nilocked(struct binder_node *node,
817 int strong, int internal)
819 struct binder_proc *proc = node->proc;
821 assert_spin_locked(&node->lock);
823 assert_spin_locked(&proc->inner_lock);
826 node->internal_strong_refs--;
828 node->local_strong_refs--;
829 if (node->local_strong_refs || node->internal_strong_refs)
833 node->local_weak_refs--;
834 if (node->local_weak_refs || node->tmp_refs ||
835 !hlist_empty(&node->refs))
839 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
840 if (list_empty(&node->work.entry)) {
841 binder_enqueue_work_ilocked(&node->work, &proc->todo);
842 binder_wakeup_proc_ilocked(proc);
845 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
846 !node->local_weak_refs && !node->tmp_refs) {
848 binder_dequeue_work_ilocked(&node->work);
849 rb_erase(&node->rb_node, &proc->nodes);
850 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
851 "refless node %d deleted\n",
854 BUG_ON(!list_empty(&node->work.entry));
855 spin_lock(&binder_dead_nodes_lock);
857 * tmp_refs could have changed so
860 if (node->tmp_refs) {
861 spin_unlock(&binder_dead_nodes_lock);
864 hlist_del(&node->dead_node);
865 spin_unlock(&binder_dead_nodes_lock);
866 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
867 "dead node %d deleted\n",
876 static void binder_dec_node(struct binder_node *node, int strong, int internal)
880 binder_node_inner_lock(node);
881 free_node = binder_dec_node_nilocked(node, strong, internal);
882 binder_node_inner_unlock(node);
884 binder_free_node(node);
887 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
890 * No call to binder_inc_node() is needed since we
891 * don't need to inform userspace of any changes to
898 * binder_inc_node_tmpref() - take a temporary reference on node
899 * @node: node to reference
901 * Take reference on node to prevent the node from being freed
902 * while referenced only by a local variable. The inner lock is
903 * needed to serialize with the node work on the queue (which
904 * isn't needed after the node is dead). If the node is dead
905 * (node->proc is NULL), use binder_dead_nodes_lock to protect
906 * node->tmp_refs against dead-node-only cases where the node
907 * lock cannot be acquired (eg traversing the dead node list to
910 static void binder_inc_node_tmpref(struct binder_node *node)
912 binder_node_lock(node);
914 binder_inner_proc_lock(node->proc);
916 spin_lock(&binder_dead_nodes_lock);
917 binder_inc_node_tmpref_ilocked(node);
919 binder_inner_proc_unlock(node->proc);
921 spin_unlock(&binder_dead_nodes_lock);
922 binder_node_unlock(node);
926 * binder_dec_node_tmpref() - remove a temporary reference on node
927 * @node: node to reference
929 * Release temporary reference on node taken via binder_inc_node_tmpref()
931 static void binder_dec_node_tmpref(struct binder_node *node)
935 binder_node_inner_lock(node);
937 spin_lock(&binder_dead_nodes_lock);
939 __acquire(&binder_dead_nodes_lock);
941 BUG_ON(node->tmp_refs < 0);
943 spin_unlock(&binder_dead_nodes_lock);
945 __release(&binder_dead_nodes_lock);
947 * Call binder_dec_node() to check if all refcounts are 0
948 * and cleanup is needed. Calling with strong=0 and internal=1
949 * causes no actual reference to be released in binder_dec_node().
950 * If that changes, a change is needed here too.
952 free_node = binder_dec_node_nilocked(node, 0, 1);
953 binder_node_inner_unlock(node);
955 binder_free_node(node);
958 static void binder_put_node(struct binder_node *node)
960 binder_dec_node_tmpref(node);
963 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
964 u32 desc, bool need_strong_ref)
966 struct rb_node *n = proc->refs_by_desc.rb_node;
967 struct binder_ref *ref;
970 ref = rb_entry(n, struct binder_ref, rb_node_desc);
972 if (desc < ref->data.desc) {
974 } else if (desc > ref->data.desc) {
976 } else if (need_strong_ref && !ref->data.strong) {
977 binder_user_error("tried to use weak ref as strong ref\n");
987 * binder_get_ref_for_node_olocked() - get the ref associated with given node
988 * @proc: binder_proc that owns the ref
989 * @node: binder_node of target
990 * @new_ref: newly allocated binder_ref to be initialized or %NULL
992 * Look up the ref for the given node and return it if it exists
994 * If it doesn't exist and the caller provides a newly allocated
995 * ref, initialize the fields of the newly allocated ref and insert
996 * into the given proc rb_trees and node refs list.
998 * Return: the ref for node. It is possible that another thread
999 * allocated/initialized the ref first in which case the
1000 * returned ref would be different than the passed-in
1001 * new_ref. new_ref must be kfree'd by the caller in
1004 static struct binder_ref *binder_get_ref_for_node_olocked(
1005 struct binder_proc *proc,
1006 struct binder_node *node,
1007 struct binder_ref *new_ref)
1009 struct binder_context *context = proc->context;
1010 struct rb_node **p = &proc->refs_by_node.rb_node;
1011 struct rb_node *parent = NULL;
1012 struct binder_ref *ref;
1017 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1019 if (node < ref->node)
1021 else if (node > ref->node)
1022 p = &(*p)->rb_right;
1029 binder_stats_created(BINDER_STAT_REF);
1030 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1031 new_ref->proc = proc;
1032 new_ref->node = node;
1033 rb_link_node(&new_ref->rb_node_node, parent, p);
1034 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1036 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1037 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1038 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1039 if (ref->data.desc > new_ref->data.desc)
1041 new_ref->data.desc = ref->data.desc + 1;
1044 p = &proc->refs_by_desc.rb_node;
1047 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1049 if (new_ref->data.desc < ref->data.desc)
1051 else if (new_ref->data.desc > ref->data.desc)
1052 p = &(*p)->rb_right;
1056 rb_link_node(&new_ref->rb_node_desc, parent, p);
1057 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1059 binder_node_lock(node);
1060 hlist_add_head(&new_ref->node_entry, &node->refs);
1062 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1063 "%d new ref %d desc %d for node %d\n",
1064 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1066 binder_node_unlock(node);
1070 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1072 bool delete_node = false;
1074 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1075 "%d delete ref %d desc %d for node %d\n",
1076 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1077 ref->node->debug_id);
1079 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1080 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1082 binder_node_inner_lock(ref->node);
1083 if (ref->data.strong)
1084 binder_dec_node_nilocked(ref->node, 1, 1);
1086 hlist_del(&ref->node_entry);
1087 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1088 binder_node_inner_unlock(ref->node);
1090 * Clear ref->node unless we want the caller to free the node
1094 * The caller uses ref->node to determine
1095 * whether the node needs to be freed. Clear
1096 * it since the node is still alive.
1102 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1103 "%d delete ref %d desc %d has death notification\n",
1104 ref->proc->pid, ref->data.debug_id,
1106 binder_dequeue_work(ref->proc, &ref->death->work);
1107 binder_stats_deleted(BINDER_STAT_DEATH);
1109 binder_stats_deleted(BINDER_STAT_REF);
1113 * binder_inc_ref_olocked() - increment the ref for given handle
1114 * @ref: ref to be incremented
1115 * @strong: if true, strong increment, else weak
1116 * @target_list: list to queue node work on
1118 * Increment the ref. @ref->proc->outer_lock must be held on entry
1120 * Return: 0, if successful, else errno
1122 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1123 struct list_head *target_list)
1128 if (ref->data.strong == 0) {
1129 ret = binder_inc_node(ref->node, 1, 1, target_list);
1135 if (ref->data.weak == 0) {
1136 ret = binder_inc_node(ref->node, 0, 1, target_list);
1146 * binder_dec_ref() - dec the ref for given handle
1147 * @ref: ref to be decremented
1148 * @strong: if true, strong decrement, else weak
1150 * Decrement the ref.
1152 * Return: true if ref is cleaned up and ready to be freed
1154 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1157 if (ref->data.strong == 0) {
1158 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1159 ref->proc->pid, ref->data.debug_id,
1160 ref->data.desc, ref->data.strong,
1165 if (ref->data.strong == 0)
1166 binder_dec_node(ref->node, strong, 1);
1168 if (ref->data.weak == 0) {
1169 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1170 ref->proc->pid, ref->data.debug_id,
1171 ref->data.desc, ref->data.strong,
1177 if (ref->data.strong == 0 && ref->data.weak == 0) {
1178 binder_cleanup_ref_olocked(ref);
1185 * binder_get_node_from_ref() - get the node from the given proc/desc
1186 * @proc: proc containing the ref
1187 * @desc: the handle associated with the ref
1188 * @need_strong_ref: if true, only return node if ref is strong
1189 * @rdata: the id/refcount data for the ref
1191 * Given a proc and ref handle, return the associated binder_node
1193 * Return: a binder_node or NULL if not found or not strong when strong required
1195 static struct binder_node *binder_get_node_from_ref(
1196 struct binder_proc *proc,
1197 u32 desc, bool need_strong_ref,
1198 struct binder_ref_data *rdata)
1200 struct binder_node *node;
1201 struct binder_ref *ref;
1203 binder_proc_lock(proc);
1204 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1209 * Take an implicit reference on the node to ensure
1210 * it stays alive until the call to binder_put_node()
1212 binder_inc_node_tmpref(node);
1215 binder_proc_unlock(proc);
1220 binder_proc_unlock(proc);
1225 * binder_free_ref() - free the binder_ref
1228 * Free the binder_ref. Free the binder_node indicated by ref->node
1229 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1231 static void binder_free_ref(struct binder_ref *ref)
1234 binder_free_node(ref->node);
1240 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1241 * @proc: proc containing the ref
1242 * @desc: the handle associated with the ref
1243 * @increment: true=inc reference, false=dec reference
1244 * @strong: true=strong reference, false=weak reference
1245 * @rdata: the id/refcount data for the ref
1247 * Given a proc and ref handle, increment or decrement the ref
1248 * according to "increment" arg.
1250 * Return: 0 if successful, else errno
1252 static int binder_update_ref_for_handle(struct binder_proc *proc,
1253 uint32_t desc, bool increment, bool strong,
1254 struct binder_ref_data *rdata)
1257 struct binder_ref *ref;
1258 bool delete_ref = false;
1260 binder_proc_lock(proc);
1261 ref = binder_get_ref_olocked(proc, desc, strong);
1267 ret = binder_inc_ref_olocked(ref, strong, NULL);
1269 delete_ref = binder_dec_ref_olocked(ref, strong);
1273 binder_proc_unlock(proc);
1276 binder_free_ref(ref);
1280 binder_proc_unlock(proc);
1285 * binder_dec_ref_for_handle() - dec the ref for given handle
1286 * @proc: proc containing the ref
1287 * @desc: the handle associated with the ref
1288 * @strong: true=strong reference, false=weak reference
1289 * @rdata: the id/refcount data for the ref
1291 * Just calls binder_update_ref_for_handle() to decrement the ref.
1293 * Return: 0 if successful, else errno
1295 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1296 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1298 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1303 * binder_inc_ref_for_node() - increment the ref for given proc/node
1304 * @proc: proc containing the ref
1305 * @node: target node
1306 * @strong: true=strong reference, false=weak reference
1307 * @target_list: worklist to use if node is incremented
1308 * @rdata: the id/refcount data for the ref
1310 * Given a proc and node, increment the ref. Create the ref if it
1311 * doesn't already exist
1313 * Return: 0 if successful, else errno
1315 static int binder_inc_ref_for_node(struct binder_proc *proc,
1316 struct binder_node *node,
1318 struct list_head *target_list,
1319 struct binder_ref_data *rdata)
1321 struct binder_ref *ref;
1322 struct binder_ref *new_ref = NULL;
1325 binder_proc_lock(proc);
1326 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1328 binder_proc_unlock(proc);
1329 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1332 binder_proc_lock(proc);
1333 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1335 ret = binder_inc_ref_olocked(ref, strong, target_list);
1337 binder_proc_unlock(proc);
1338 if (new_ref && ref != new_ref)
1340 * Another thread created the ref first so
1341 * free the one we allocated
1347 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1348 struct binder_transaction *t)
1350 BUG_ON(!target_thread);
1351 assert_spin_locked(&target_thread->proc->inner_lock);
1352 BUG_ON(target_thread->transaction_stack != t);
1353 BUG_ON(target_thread->transaction_stack->from != target_thread);
1354 target_thread->transaction_stack =
1355 target_thread->transaction_stack->from_parent;
1360 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1361 * @thread: thread to decrement
1363 * A thread needs to be kept alive while being used to create or
1364 * handle a transaction. binder_get_txn_from() is used to safely
1365 * extract t->from from a binder_transaction and keep the thread
1366 * indicated by t->from from being freed. When done with that
1367 * binder_thread, this function is called to decrement the
1368 * tmp_ref and free if appropriate (thread has been released
1369 * and no transaction being processed by the driver)
1371 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1374 * atomic is used to protect the counter value while
1375 * it cannot reach zero or thread->is_dead is false
1377 binder_inner_proc_lock(thread->proc);
1378 atomic_dec(&thread->tmp_ref);
1379 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1380 binder_inner_proc_unlock(thread->proc);
1381 binder_free_thread(thread);
1384 binder_inner_proc_unlock(thread->proc);
1388 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1389 * @proc: proc to decrement
1391 * A binder_proc needs to be kept alive while being used to create or
1392 * handle a transaction. proc->tmp_ref is incremented when
1393 * creating a new transaction or the binder_proc is currently in-use
1394 * by threads that are being released. When done with the binder_proc,
1395 * this function is called to decrement the counter and free the
1396 * proc if appropriate (proc has been released, all threads have
1397 * been released and not currenly in-use to process a transaction).
1399 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1401 binder_inner_proc_lock(proc);
1403 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1405 binder_inner_proc_unlock(proc);
1406 binder_free_proc(proc);
1409 binder_inner_proc_unlock(proc);
1413 * binder_get_txn_from() - safely extract the "from" thread in transaction
1414 * @t: binder transaction for t->from
1416 * Atomically return the "from" thread and increment the tmp_ref
1417 * count for the thread to ensure it stays alive until
1418 * binder_thread_dec_tmpref() is called.
1420 * Return: the value of t->from
1422 static struct binder_thread *binder_get_txn_from(
1423 struct binder_transaction *t)
1425 struct binder_thread *from;
1427 spin_lock(&t->lock);
1430 atomic_inc(&from->tmp_ref);
1431 spin_unlock(&t->lock);
1436 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1437 * @t: binder transaction for t->from
1439 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1440 * to guarantee that the thread cannot be released while operating on it.
1441 * The caller must call binder_inner_proc_unlock() to release the inner lock
1442 * as well as call binder_dec_thread_txn() to release the reference.
1444 * Return: the value of t->from
1446 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1447 struct binder_transaction *t)
1448 __acquires(&t->from->proc->inner_lock)
1450 struct binder_thread *from;
1452 from = binder_get_txn_from(t);
1454 __acquire(&from->proc->inner_lock);
1457 binder_inner_proc_lock(from->proc);
1459 BUG_ON(from != t->from);
1462 binder_inner_proc_unlock(from->proc);
1463 __acquire(&from->proc->inner_lock);
1464 binder_thread_dec_tmpref(from);
1469 * binder_free_txn_fixups() - free unprocessed fd fixups
1470 * @t: binder transaction for t->from
1472 * If the transaction is being torn down prior to being
1473 * processed by the target process, free all of the
1474 * fd fixups and fput the file structs. It is safe to
1475 * call this function after the fixups have been
1476 * processed -- in that case, the list will be empty.
1478 static void binder_free_txn_fixups(struct binder_transaction *t)
1480 struct binder_txn_fd_fixup *fixup, *tmp;
1482 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1484 list_del(&fixup->fixup_entry);
1489 static void binder_txn_latency_free(struct binder_transaction *t)
1491 int from_proc, from_thread, to_proc, to_thread;
1493 spin_lock(&t->lock);
1494 from_proc = t->from ? t->from->proc->pid : 0;
1495 from_thread = t->from ? t->from->pid : 0;
1496 to_proc = t->to_proc ? t->to_proc->pid : 0;
1497 to_thread = t->to_thread ? t->to_thread->pid : 0;
1498 spin_unlock(&t->lock);
1500 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1503 static void binder_free_transaction(struct binder_transaction *t)
1505 struct binder_proc *target_proc = t->to_proc;
1508 binder_inner_proc_lock(target_proc);
1509 target_proc->outstanding_txns--;
1510 if (target_proc->outstanding_txns < 0)
1511 pr_warn("%s: Unexpected outstanding_txns %d\n",
1512 __func__, target_proc->outstanding_txns);
1513 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1514 wake_up_interruptible_all(&target_proc->freeze_wait);
1516 t->buffer->transaction = NULL;
1517 binder_inner_proc_unlock(target_proc);
1519 if (trace_binder_txn_latency_free_enabled())
1520 binder_txn_latency_free(t);
1522 * If the transaction has no target_proc, then
1523 * t->buffer->transaction has already been cleared.
1525 binder_free_txn_fixups(t);
1527 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1530 static void binder_send_failed_reply(struct binder_transaction *t,
1531 uint32_t error_code)
1533 struct binder_thread *target_thread;
1534 struct binder_transaction *next;
1536 BUG_ON(t->flags & TF_ONE_WAY);
1538 target_thread = binder_get_txn_from_and_acq_inner(t);
1539 if (target_thread) {
1540 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1541 "send failed reply for transaction %d to %d:%d\n",
1543 target_thread->proc->pid,
1544 target_thread->pid);
1546 binder_pop_transaction_ilocked(target_thread, t);
1547 if (target_thread->reply_error.cmd == BR_OK) {
1548 target_thread->reply_error.cmd = error_code;
1549 binder_enqueue_thread_work_ilocked(
1551 &target_thread->reply_error.work);
1552 wake_up_interruptible(&target_thread->wait);
1555 * Cannot get here for normal operation, but
1556 * we can if multiple synchronous transactions
1557 * are sent without blocking for responses.
1558 * Just ignore the 2nd error in this case.
1560 pr_warn("Unexpected reply error: %u\n",
1561 target_thread->reply_error.cmd);
1563 binder_inner_proc_unlock(target_thread->proc);
1564 binder_thread_dec_tmpref(target_thread);
1565 binder_free_transaction(t);
1568 __release(&target_thread->proc->inner_lock);
1569 next = t->from_parent;
1571 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1572 "send failed reply for transaction %d, target dead\n",
1575 binder_free_transaction(t);
1577 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1578 "reply failed, no target thread at root\n");
1582 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1583 "reply failed, no target thread -- retry %d\n",
1589 * binder_cleanup_transaction() - cleans up undelivered transaction
1590 * @t: transaction that needs to be cleaned up
1591 * @reason: reason the transaction wasn't delivered
1592 * @error_code: error to return to caller (if synchronous call)
1594 static void binder_cleanup_transaction(struct binder_transaction *t,
1596 uint32_t error_code)
1598 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1599 binder_send_failed_reply(t, error_code);
1601 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1602 "undelivered transaction %d, %s\n",
1603 t->debug_id, reason);
1604 binder_free_transaction(t);
1609 * binder_get_object() - gets object and checks for valid metadata
1610 * @proc: binder_proc owning the buffer
1611 * @u: sender's user pointer to base of buffer
1612 * @buffer: binder_buffer that we're parsing.
1613 * @offset: offset in the @buffer at which to validate an object.
1614 * @object: struct binder_object to read into
1616 * Copy the binder object at the given offset into @object. If @u is
1617 * provided then the copy is from the sender's buffer. If not, then
1618 * it is copied from the target's @buffer.
1620 * Return: If there's a valid metadata object at @offset, the
1621 * size of that object. Otherwise, it returns zero. The object
1622 * is read into the struct binder_object pointed to by @object.
1624 static size_t binder_get_object(struct binder_proc *proc,
1625 const void __user *u,
1626 struct binder_buffer *buffer,
1627 unsigned long offset,
1628 struct binder_object *object)
1631 struct binder_object_header *hdr;
1632 size_t object_size = 0;
1634 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1635 if (offset > buffer->data_size || read_size < sizeof(*hdr))
1638 if (copy_from_user(object, u + offset, read_size))
1641 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1646 /* Ok, now see if we read a complete object. */
1648 switch (hdr->type) {
1649 case BINDER_TYPE_BINDER:
1650 case BINDER_TYPE_WEAK_BINDER:
1651 case BINDER_TYPE_HANDLE:
1652 case BINDER_TYPE_WEAK_HANDLE:
1653 object_size = sizeof(struct flat_binder_object);
1655 case BINDER_TYPE_FD:
1656 object_size = sizeof(struct binder_fd_object);
1658 case BINDER_TYPE_PTR:
1659 object_size = sizeof(struct binder_buffer_object);
1661 case BINDER_TYPE_FDA:
1662 object_size = sizeof(struct binder_fd_array_object);
1667 if (offset <= buffer->data_size - object_size &&
1668 buffer->data_size >= object_size)
1675 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1676 * @proc: binder_proc owning the buffer
1677 * @b: binder_buffer containing the object
1678 * @object: struct binder_object to read into
1679 * @index: index in offset array at which the binder_buffer_object is
1681 * @start_offset: points to the start of the offset array
1682 * @object_offsetp: offset of @object read from @b
1683 * @num_valid: the number of valid offsets in the offset array
1685 * Return: If @index is within the valid range of the offset array
1686 * described by @start and @num_valid, and if there's a valid
1687 * binder_buffer_object at the offset found in index @index
1688 * of the offset array, that object is returned. Otherwise,
1689 * %NULL is returned.
1690 * Note that the offset found in index @index itself is not
1691 * verified; this function assumes that @num_valid elements
1692 * from @start were previously verified to have valid offsets.
1693 * If @object_offsetp is non-NULL, then the offset within
1694 * @b is written to it.
1696 static struct binder_buffer_object *binder_validate_ptr(
1697 struct binder_proc *proc,
1698 struct binder_buffer *b,
1699 struct binder_object *object,
1700 binder_size_t index,
1701 binder_size_t start_offset,
1702 binder_size_t *object_offsetp,
1703 binder_size_t num_valid)
1706 binder_size_t object_offset;
1707 unsigned long buffer_offset;
1709 if (index >= num_valid)
1712 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1713 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1715 sizeof(object_offset)))
1717 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1718 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1721 *object_offsetp = object_offset;
1723 return &object->bbo;
1727 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1728 * @proc: binder_proc owning the buffer
1729 * @b: transaction buffer
1730 * @objects_start_offset: offset to start of objects buffer
1731 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1732 * @fixup_offset: start offset in @buffer to fix up
1733 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1734 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1736 * Return: %true if a fixup in buffer @buffer at offset @offset is
1739 * For safety reasons, we only allow fixups inside a buffer to happen
1740 * at increasing offsets; additionally, we only allow fixup on the last
1741 * buffer object that was verified, or one of its parents.
1743 * Example of what is allowed:
1746 * B (parent = A, offset = 0)
1747 * C (parent = A, offset = 16)
1748 * D (parent = C, offset = 0)
1749 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1751 * Examples of what is not allowed:
1753 * Decreasing offsets within the same parent:
1755 * C (parent = A, offset = 16)
1756 * B (parent = A, offset = 0) // decreasing offset within A
1758 * Referring to a parent that wasn't the last object or any of its parents:
1760 * B (parent = A, offset = 0)
1761 * C (parent = A, offset = 0)
1762 * C (parent = A, offset = 16)
1763 * D (parent = B, offset = 0) // B is not A or any of A's parents
1765 static bool binder_validate_fixup(struct binder_proc *proc,
1766 struct binder_buffer *b,
1767 binder_size_t objects_start_offset,
1768 binder_size_t buffer_obj_offset,
1769 binder_size_t fixup_offset,
1770 binder_size_t last_obj_offset,
1771 binder_size_t last_min_offset)
1773 if (!last_obj_offset) {
1774 /* Nothing to fix up in */
1778 while (last_obj_offset != buffer_obj_offset) {
1779 unsigned long buffer_offset;
1780 struct binder_object last_object;
1781 struct binder_buffer_object *last_bbo;
1782 size_t object_size = binder_get_object(proc, NULL, b,
1785 if (object_size != sizeof(*last_bbo))
1788 last_bbo = &last_object.bbo;
1790 * Safe to retrieve the parent of last_obj, since it
1791 * was already previously verified by the driver.
1793 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1795 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1796 buffer_offset = objects_start_offset +
1797 sizeof(binder_size_t) * last_bbo->parent;
1798 if (binder_alloc_copy_from_buffer(&proc->alloc,
1801 sizeof(last_obj_offset)))
1804 return (fixup_offset >= last_min_offset);
1808 * struct binder_task_work_cb - for deferred close
1810 * @twork: callback_head for task work
1813 * Structure to pass task work to be handled after
1814 * returning from binder_ioctl() via task_work_add().
1816 struct binder_task_work_cb {
1817 struct callback_head twork;
1822 * binder_do_fd_close() - close list of file descriptors
1823 * @twork: callback head for task work
1825 * It is not safe to call ksys_close() during the binder_ioctl()
1826 * function if there is a chance that binder's own file descriptor
1827 * might be closed. This is to meet the requirements for using
1828 * fdget() (see comments for __fget_light()). Therefore use
1829 * task_work_add() to schedule the close operation once we have
1830 * returned from binder_ioctl(). This function is a callback
1831 * for that mechanism and does the actual ksys_close() on the
1832 * given file descriptor.
1834 static void binder_do_fd_close(struct callback_head *twork)
1836 struct binder_task_work_cb *twcb = container_of(twork,
1837 struct binder_task_work_cb, twork);
1844 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1845 * @fd: file-descriptor to close
1847 * See comments in binder_do_fd_close(). This function is used to schedule
1848 * a file-descriptor to be closed after returning from binder_ioctl().
1850 static void binder_deferred_fd_close(int fd)
1852 struct binder_task_work_cb *twcb;
1854 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1857 init_task_work(&twcb->twork, binder_do_fd_close);
1858 twcb->file = close_fd_get_file(fd);
1860 // pin it until binder_do_fd_close(); see comments there
1861 get_file(twcb->file);
1862 filp_close(twcb->file, current->files);
1863 task_work_add(current, &twcb->twork, TWA_RESUME);
1869 static void binder_transaction_buffer_release(struct binder_proc *proc,
1870 struct binder_thread *thread,
1871 struct binder_buffer *buffer,
1872 binder_size_t failed_at,
1875 int debug_id = buffer->debug_id;
1876 binder_size_t off_start_offset, buffer_offset, off_end_offset;
1878 binder_debug(BINDER_DEBUG_TRANSACTION,
1879 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1880 proc->pid, buffer->debug_id,
1881 buffer->data_size, buffer->offsets_size,
1882 (unsigned long long)failed_at);
1884 if (buffer->target_node)
1885 binder_dec_node(buffer->target_node, 1, 0);
1887 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1888 off_end_offset = is_failure && failed_at ? failed_at :
1889 off_start_offset + buffer->offsets_size;
1890 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1891 buffer_offset += sizeof(binder_size_t)) {
1892 struct binder_object_header *hdr;
1893 size_t object_size = 0;
1894 struct binder_object object;
1895 binder_size_t object_offset;
1897 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1898 buffer, buffer_offset,
1899 sizeof(object_offset)))
1900 object_size = binder_get_object(proc, NULL, buffer,
1901 object_offset, &object);
1902 if (object_size == 0) {
1903 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1904 debug_id, (u64)object_offset, buffer->data_size);
1908 switch (hdr->type) {
1909 case BINDER_TYPE_BINDER:
1910 case BINDER_TYPE_WEAK_BINDER: {
1911 struct flat_binder_object *fp;
1912 struct binder_node *node;
1914 fp = to_flat_binder_object(hdr);
1915 node = binder_get_node(proc, fp->binder);
1917 pr_err("transaction release %d bad node %016llx\n",
1918 debug_id, (u64)fp->binder);
1921 binder_debug(BINDER_DEBUG_TRANSACTION,
1922 " node %d u%016llx\n",
1923 node->debug_id, (u64)node->ptr);
1924 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1926 binder_put_node(node);
1928 case BINDER_TYPE_HANDLE:
1929 case BINDER_TYPE_WEAK_HANDLE: {
1930 struct flat_binder_object *fp;
1931 struct binder_ref_data rdata;
1934 fp = to_flat_binder_object(hdr);
1935 ret = binder_dec_ref_for_handle(proc, fp->handle,
1936 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1939 pr_err("transaction release %d bad handle %d, ret = %d\n",
1940 debug_id, fp->handle, ret);
1943 binder_debug(BINDER_DEBUG_TRANSACTION,
1944 " ref %d desc %d\n",
1945 rdata.debug_id, rdata.desc);
1948 case BINDER_TYPE_FD: {
1950 * No need to close the file here since user-space
1951 * closes it for successfully delivered
1952 * transactions. For transactions that weren't
1953 * delivered, the new fd was never allocated so
1954 * there is no need to close and the fput on the
1955 * file is done when the transaction is torn
1959 case BINDER_TYPE_PTR:
1961 * Nothing to do here, this will get cleaned up when the
1962 * transaction buffer gets freed
1965 case BINDER_TYPE_FDA: {
1966 struct binder_fd_array_object *fda;
1967 struct binder_buffer_object *parent;
1968 struct binder_object ptr_object;
1969 binder_size_t fda_offset;
1971 binder_size_t fd_buf_size;
1972 binder_size_t num_valid;
1976 * The fd fixups have not been applied so no
1977 * fds need to be closed.
1982 num_valid = (buffer_offset - off_start_offset) /
1983 sizeof(binder_size_t);
1984 fda = to_binder_fd_array_object(hdr);
1985 parent = binder_validate_ptr(proc, buffer, &ptr_object,
1991 pr_err("transaction release %d bad parent offset\n",
1995 fd_buf_size = sizeof(u32) * fda->num_fds;
1996 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1997 pr_err("transaction release %d invalid number of fds (%lld)\n",
1998 debug_id, (u64)fda->num_fds);
2001 if (fd_buf_size > parent->length ||
2002 fda->parent_offset > parent->length - fd_buf_size) {
2003 /* No space for all file descriptors here. */
2004 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2005 debug_id, (u64)fda->num_fds);
2009 * the source data for binder_buffer_object is visible
2010 * to user-space and the @buffer element is the user
2011 * pointer to the buffer_object containing the fd_array.
2012 * Convert the address to an offset relative to
2013 * the base of the transaction buffer.
2016 (parent->buffer - (uintptr_t)buffer->user_data) +
2018 for (fd_index = 0; fd_index < fda->num_fds;
2022 binder_size_t offset = fda_offset +
2023 fd_index * sizeof(fd);
2025 err = binder_alloc_copy_from_buffer(
2026 &proc->alloc, &fd, buffer,
2027 offset, sizeof(fd));
2030 binder_deferred_fd_close(fd);
2032 * Need to make sure the thread goes
2033 * back to userspace to complete the
2037 thread->looper_need_return = true;
2042 pr_err("transaction release %d bad object type %x\n",
2043 debug_id, hdr->type);
2049 static int binder_translate_binder(struct flat_binder_object *fp,
2050 struct binder_transaction *t,
2051 struct binder_thread *thread)
2053 struct binder_node *node;
2054 struct binder_proc *proc = thread->proc;
2055 struct binder_proc *target_proc = t->to_proc;
2056 struct binder_ref_data rdata;
2059 node = binder_get_node(proc, fp->binder);
2061 node = binder_new_node(proc, fp);
2065 if (fp->cookie != node->cookie) {
2066 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2067 proc->pid, thread->pid, (u64)fp->binder,
2068 node->debug_id, (u64)fp->cookie,
2073 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2078 ret = binder_inc_ref_for_node(target_proc, node,
2079 fp->hdr.type == BINDER_TYPE_BINDER,
2080 &thread->todo, &rdata);
2084 if (fp->hdr.type == BINDER_TYPE_BINDER)
2085 fp->hdr.type = BINDER_TYPE_HANDLE;
2087 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2089 fp->handle = rdata.desc;
2092 trace_binder_transaction_node_to_ref(t, node, &rdata);
2093 binder_debug(BINDER_DEBUG_TRANSACTION,
2094 " node %d u%016llx -> ref %d desc %d\n",
2095 node->debug_id, (u64)node->ptr,
2096 rdata.debug_id, rdata.desc);
2098 binder_put_node(node);
2102 static int binder_translate_handle(struct flat_binder_object *fp,
2103 struct binder_transaction *t,
2104 struct binder_thread *thread)
2106 struct binder_proc *proc = thread->proc;
2107 struct binder_proc *target_proc = t->to_proc;
2108 struct binder_node *node;
2109 struct binder_ref_data src_rdata;
2112 node = binder_get_node_from_ref(proc, fp->handle,
2113 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2115 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2116 proc->pid, thread->pid, fp->handle);
2119 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2124 binder_node_lock(node);
2125 if (node->proc == target_proc) {
2126 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2127 fp->hdr.type = BINDER_TYPE_BINDER;
2129 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2130 fp->binder = node->ptr;
2131 fp->cookie = node->cookie;
2133 binder_inner_proc_lock(node->proc);
2135 __acquire(&node->proc->inner_lock);
2136 binder_inc_node_nilocked(node,
2137 fp->hdr.type == BINDER_TYPE_BINDER,
2140 binder_inner_proc_unlock(node->proc);
2142 __release(&node->proc->inner_lock);
2143 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2144 binder_debug(BINDER_DEBUG_TRANSACTION,
2145 " ref %d desc %d -> node %d u%016llx\n",
2146 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2148 binder_node_unlock(node);
2150 struct binder_ref_data dest_rdata;
2152 binder_node_unlock(node);
2153 ret = binder_inc_ref_for_node(target_proc, node,
2154 fp->hdr.type == BINDER_TYPE_HANDLE,
2160 fp->handle = dest_rdata.desc;
2162 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2164 binder_debug(BINDER_DEBUG_TRANSACTION,
2165 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2166 src_rdata.debug_id, src_rdata.desc,
2167 dest_rdata.debug_id, dest_rdata.desc,
2171 binder_put_node(node);
2175 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2176 struct binder_transaction *t,
2177 struct binder_thread *thread,
2178 struct binder_transaction *in_reply_to)
2180 struct binder_proc *proc = thread->proc;
2181 struct binder_proc *target_proc = t->to_proc;
2182 struct binder_txn_fd_fixup *fixup;
2185 bool target_allows_fd;
2188 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2190 target_allows_fd = t->buffer->target_node->accept_fds;
2191 if (!target_allows_fd) {
2192 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2193 proc->pid, thread->pid,
2194 in_reply_to ? "reply" : "transaction",
2197 goto err_fd_not_accepted;
2202 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2203 proc->pid, thread->pid, fd);
2207 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2214 * Add fixup record for this transaction. The allocation
2215 * of the fd in the target needs to be done from a
2218 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2224 fixup->offset = fd_offset;
2225 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2226 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2234 err_fd_not_accepted:
2239 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2240 * @offset offset in target buffer to fixup
2241 * @skip_size bytes to skip in copy (fixup will be written later)
2242 * @fixup_data data to write at fixup offset
2245 * This is used for the pointer fixup list (pf) which is created and consumed
2246 * during binder_transaction() and is only accessed locally. No
2247 * locking is necessary.
2249 * The list is ordered by @offset.
2251 struct binder_ptr_fixup {
2252 binder_size_t offset;
2254 binder_uintptr_t fixup_data;
2255 struct list_head node;
2259 * struct binder_sg_copy - scatter-gather data to be copied
2260 * @offset offset in target buffer
2261 * @sender_uaddr user address in source buffer
2262 * @length bytes to copy
2265 * This is used for the sg copy list (sgc) which is created and consumed
2266 * during binder_transaction() and is only accessed locally. No
2267 * locking is necessary.
2269 * The list is ordered by @offset.
2271 struct binder_sg_copy {
2272 binder_size_t offset;
2273 const void __user *sender_uaddr;
2275 struct list_head node;
2279 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2280 * @alloc: binder_alloc associated with @buffer
2281 * @buffer: binder buffer in target process
2282 * @sgc_head: list_head of scatter-gather copy list
2283 * @pf_head: list_head of pointer fixup list
2285 * Processes all elements of @sgc_head, applying fixups from @pf_head
2286 * and copying the scatter-gather data from the source process' user
2287 * buffer to the target's buffer. It is expected that the list creation
2288 * and processing all occurs during binder_transaction() so these lists
2289 * are only accessed in local context.
2291 * Return: 0=success, else -errno
2293 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2294 struct binder_buffer *buffer,
2295 struct list_head *sgc_head,
2296 struct list_head *pf_head)
2299 struct binder_sg_copy *sgc, *tmpsgc;
2300 struct binder_ptr_fixup *pf =
2301 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2304 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2305 size_t bytes_copied = 0;
2307 while (bytes_copied < sgc->length) {
2309 size_t bytes_left = sgc->length - bytes_copied;
2310 size_t offset = sgc->offset + bytes_copied;
2313 * We copy up to the fixup (pointed to by pf)
2315 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2317 if (!ret && copy_size)
2318 ret = binder_alloc_copy_user_to_buffer(
2321 sgc->sender_uaddr + bytes_copied,
2323 bytes_copied += copy_size;
2324 if (copy_size != bytes_left) {
2326 /* we stopped at a fixup offset */
2327 if (pf->skip_size) {
2329 * we are just skipping. This is for
2330 * BINDER_TYPE_FDA where the translated
2331 * fds will be fixed up when we get
2332 * to target context.
2334 bytes_copied += pf->skip_size;
2336 /* apply the fixup indicated by pf */
2338 ret = binder_alloc_copy_to_buffer(
2342 sizeof(pf->fixup_data));
2343 bytes_copied += sizeof(pf->fixup_data);
2345 list_del(&pf->node);
2347 pf = list_first_entry_or_null(pf_head,
2348 struct binder_ptr_fixup, node);
2351 list_del(&sgc->node);
2354 BUG_ON(!list_empty(pf_head));
2355 BUG_ON(!list_empty(sgc_head));
2357 return ret > 0 ? -EINVAL : ret;
2361 * binder_cleanup_deferred_txn_lists() - free specified lists
2362 * @sgc_head: list_head of scatter-gather copy list
2363 * @pf_head: list_head of pointer fixup list
2365 * Called to clean up @sgc_head and @pf_head if there is an
2368 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2369 struct list_head *pf_head)
2371 struct binder_sg_copy *sgc, *tmpsgc;
2372 struct binder_ptr_fixup *pf, *tmppf;
2374 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2375 list_del(&sgc->node);
2378 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2379 list_del(&pf->node);
2385 * binder_defer_copy() - queue a scatter-gather buffer for copy
2386 * @sgc_head: list_head of scatter-gather copy list
2387 * @offset: binder buffer offset in target process
2388 * @sender_uaddr: user address in source process
2389 * @length: bytes to copy
2391 * Specify a scatter-gather block to be copied. The actual copy must
2392 * be deferred until all the needed fixups are identified and queued.
2393 * Then the copy and fixups are done together so un-translated values
2394 * from the source are never visible in the target buffer.
2396 * We are guaranteed that repeated calls to this function will have
2397 * monotonically increasing @offset values so the list will naturally
2400 * Return: 0=success, else -errno
2402 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2403 const void __user *sender_uaddr, size_t length)
2405 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2410 bc->offset = offset;
2411 bc->sender_uaddr = sender_uaddr;
2412 bc->length = length;
2413 INIT_LIST_HEAD(&bc->node);
2416 * We are guaranteed that the deferred copies are in-order
2417 * so just add to the tail.
2419 list_add_tail(&bc->node, sgc_head);
2425 * binder_add_fixup() - queue a fixup to be applied to sg copy
2426 * @pf_head: list_head of binder ptr fixup list
2427 * @offset: binder buffer offset in target process
2428 * @fixup: bytes to be copied for fixup
2429 * @skip_size: bytes to skip when copying (fixup will be applied later)
2431 * Add the specified fixup to a list ordered by @offset. When copying
2432 * the scatter-gather buffers, the fixup will be copied instead of
2433 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2434 * will be applied later (in target process context), so we just skip
2435 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2438 * This function is called *mostly* in @offset order, but there are
2439 * exceptions. Since out-of-order inserts are relatively uncommon,
2440 * we insert the new element by searching backward from the tail of
2443 * Return: 0=success, else -errno
2445 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2446 binder_uintptr_t fixup, size_t skip_size)
2448 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2449 struct binder_ptr_fixup *tmppf;
2454 pf->offset = offset;
2455 pf->fixup_data = fixup;
2456 pf->skip_size = skip_size;
2457 INIT_LIST_HEAD(&pf->node);
2459 /* Fixups are *mostly* added in-order, but there are some
2460 * exceptions. Look backwards through list for insertion point.
2462 list_for_each_entry_reverse(tmppf, pf_head, node) {
2463 if (tmppf->offset < pf->offset) {
2464 list_add(&pf->node, &tmppf->node);
2469 * if we get here, then the new offset is the lowest so
2470 * insert at the head
2472 list_add(&pf->node, pf_head);
2476 static int binder_translate_fd_array(struct list_head *pf_head,
2477 struct binder_fd_array_object *fda,
2478 const void __user *sender_ubuffer,
2479 struct binder_buffer_object *parent,
2480 struct binder_buffer_object *sender_uparent,
2481 struct binder_transaction *t,
2482 struct binder_thread *thread,
2483 struct binder_transaction *in_reply_to)
2485 binder_size_t fdi, fd_buf_size;
2486 binder_size_t fda_offset;
2487 const void __user *sender_ufda_base;
2488 struct binder_proc *proc = thread->proc;
2491 fd_buf_size = sizeof(u32) * fda->num_fds;
2492 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2493 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2494 proc->pid, thread->pid, (u64)fda->num_fds);
2497 if (fd_buf_size > parent->length ||
2498 fda->parent_offset > parent->length - fd_buf_size) {
2499 /* No space for all file descriptors here. */
2500 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2501 proc->pid, thread->pid, (u64)fda->num_fds);
2505 * the source data for binder_buffer_object is visible
2506 * to user-space and the @buffer element is the user
2507 * pointer to the buffer_object containing the fd_array.
2508 * Convert the address to an offset relative to
2509 * the base of the transaction buffer.
2511 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2513 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2516 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2517 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2518 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2519 proc->pid, thread->pid);
2522 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2526 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2528 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2529 binder_size_t sender_uoffset = fdi * sizeof(fd);
2531 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2533 ret = binder_translate_fd(fd, offset, t, thread,
2536 return ret > 0 ? -EINVAL : ret;
2541 static int binder_fixup_parent(struct list_head *pf_head,
2542 struct binder_transaction *t,
2543 struct binder_thread *thread,
2544 struct binder_buffer_object *bp,
2545 binder_size_t off_start_offset,
2546 binder_size_t num_valid,
2547 binder_size_t last_fixup_obj_off,
2548 binder_size_t last_fixup_min_off)
2550 struct binder_buffer_object *parent;
2551 struct binder_buffer *b = t->buffer;
2552 struct binder_proc *proc = thread->proc;
2553 struct binder_proc *target_proc = t->to_proc;
2554 struct binder_object object;
2555 binder_size_t buffer_offset;
2556 binder_size_t parent_offset;
2558 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2561 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2562 off_start_offset, &parent_offset,
2565 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2566 proc->pid, thread->pid);
2570 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2571 parent_offset, bp->parent_offset,
2573 last_fixup_min_off)) {
2574 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2575 proc->pid, thread->pid);
2579 if (parent->length < sizeof(binder_uintptr_t) ||
2580 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2581 /* No space for a pointer here! */
2582 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2583 proc->pid, thread->pid);
2586 buffer_offset = bp->parent_offset +
2587 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2588 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2592 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2593 * @t: transaction to send
2594 * @proc: process to send the transaction to
2595 * @thread: thread in @proc to send the transaction to (may be NULL)
2597 * This function queues a transaction to the specified process. It will try
2598 * to find a thread in the target process to handle the transaction and
2599 * wake it up. If no thread is found, the work is queued to the proc
2602 * If the @thread parameter is not NULL, the transaction is always queued
2603 * to the waitlist of that specific thread.
2605 * Return: 0 if the transaction was successfully queued
2606 * BR_DEAD_REPLY if the target process or thread is dead
2607 * BR_FROZEN_REPLY if the target process or thread is frozen
2609 static int binder_proc_transaction(struct binder_transaction *t,
2610 struct binder_proc *proc,
2611 struct binder_thread *thread)
2613 struct binder_node *node = t->buffer->target_node;
2614 bool oneway = !!(t->flags & TF_ONE_WAY);
2615 bool pending_async = false;
2618 binder_node_lock(node);
2621 if (node->has_async_transaction)
2622 pending_async = true;
2624 node->has_async_transaction = true;
2627 binder_inner_proc_lock(proc);
2628 if (proc->is_frozen) {
2629 proc->sync_recv |= !oneway;
2630 proc->async_recv |= oneway;
2633 if ((proc->is_frozen && !oneway) || proc->is_dead ||
2634 (thread && thread->is_dead)) {
2635 binder_inner_proc_unlock(proc);
2636 binder_node_unlock(node);
2637 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2640 if (!thread && !pending_async)
2641 thread = binder_select_thread_ilocked(proc);
2644 binder_enqueue_thread_work_ilocked(thread, &t->work);
2645 else if (!pending_async)
2646 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2648 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2651 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2653 proc->outstanding_txns++;
2654 binder_inner_proc_unlock(proc);
2655 binder_node_unlock(node);
2661 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2662 * @node: struct binder_node for which to get refs
2663 * @proc: returns @node->proc if valid
2664 * @error: if no @proc then returns BR_DEAD_REPLY
2666 * User-space normally keeps the node alive when creating a transaction
2667 * since it has a reference to the target. The local strong ref keeps it
2668 * alive if the sending process dies before the target process processes
2669 * the transaction. If the source process is malicious or has a reference
2670 * counting bug, relying on the local strong ref can fail.
2672 * Since user-space can cause the local strong ref to go away, we also take
2673 * a tmpref on the node to ensure it survives while we are constructing
2674 * the transaction. We also need a tmpref on the proc while we are
2675 * constructing the transaction, so we take that here as well.
2677 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2678 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2679 * target proc has died, @error is set to BR_DEAD_REPLY
2681 static struct binder_node *binder_get_node_refs_for_txn(
2682 struct binder_node *node,
2683 struct binder_proc **procp,
2686 struct binder_node *target_node = NULL;
2688 binder_node_inner_lock(node);
2691 binder_inc_node_nilocked(node, 1, 0, NULL);
2692 binder_inc_node_tmpref_ilocked(node);
2693 node->proc->tmp_ref++;
2694 *procp = node->proc;
2696 *error = BR_DEAD_REPLY;
2697 binder_node_inner_unlock(node);
2702 static void binder_transaction(struct binder_proc *proc,
2703 struct binder_thread *thread,
2704 struct binder_transaction_data *tr, int reply,
2705 binder_size_t extra_buffers_size)
2708 struct binder_transaction *t;
2709 struct binder_work *w;
2710 struct binder_work *tcomplete;
2711 binder_size_t buffer_offset = 0;
2712 binder_size_t off_start_offset, off_end_offset;
2713 binder_size_t off_min;
2714 binder_size_t sg_buf_offset, sg_buf_end_offset;
2715 binder_size_t user_offset = 0;
2716 struct binder_proc *target_proc = NULL;
2717 struct binder_thread *target_thread = NULL;
2718 struct binder_node *target_node = NULL;
2719 struct binder_transaction *in_reply_to = NULL;
2720 struct binder_transaction_log_entry *e;
2721 uint32_t return_error = 0;
2722 uint32_t return_error_param = 0;
2723 uint32_t return_error_line = 0;
2724 binder_size_t last_fixup_obj_off = 0;
2725 binder_size_t last_fixup_min_off = 0;
2726 struct binder_context *context = proc->context;
2727 int t_debug_id = atomic_inc_return(&binder_last_id);
2728 char *secctx = NULL;
2730 struct list_head sgc_head;
2731 struct list_head pf_head;
2732 const void __user *user_buffer = (const void __user *)
2733 (uintptr_t)tr->data.ptr.buffer;
2734 INIT_LIST_HEAD(&sgc_head);
2735 INIT_LIST_HEAD(&pf_head);
2737 e = binder_transaction_log_add(&binder_transaction_log);
2738 e->debug_id = t_debug_id;
2739 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2740 e->from_proc = proc->pid;
2741 e->from_thread = thread->pid;
2742 e->target_handle = tr->target.handle;
2743 e->data_size = tr->data_size;
2744 e->offsets_size = tr->offsets_size;
2745 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2748 binder_inner_proc_lock(proc);
2749 in_reply_to = thread->transaction_stack;
2750 if (in_reply_to == NULL) {
2751 binder_inner_proc_unlock(proc);
2752 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2753 proc->pid, thread->pid);
2754 return_error = BR_FAILED_REPLY;
2755 return_error_param = -EPROTO;
2756 return_error_line = __LINE__;
2757 goto err_empty_call_stack;
2759 if (in_reply_to->to_thread != thread) {
2760 spin_lock(&in_reply_to->lock);
2761 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2762 proc->pid, thread->pid, in_reply_to->debug_id,
2763 in_reply_to->to_proc ?
2764 in_reply_to->to_proc->pid : 0,
2765 in_reply_to->to_thread ?
2766 in_reply_to->to_thread->pid : 0);
2767 spin_unlock(&in_reply_to->lock);
2768 binder_inner_proc_unlock(proc);
2769 return_error = BR_FAILED_REPLY;
2770 return_error_param = -EPROTO;
2771 return_error_line = __LINE__;
2773 goto err_bad_call_stack;
2775 thread->transaction_stack = in_reply_to->to_parent;
2776 binder_inner_proc_unlock(proc);
2777 binder_set_nice(in_reply_to->saved_priority);
2778 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2779 if (target_thread == NULL) {
2780 /* annotation for sparse */
2781 __release(&target_thread->proc->inner_lock);
2782 return_error = BR_DEAD_REPLY;
2783 return_error_line = __LINE__;
2784 goto err_dead_binder;
2786 if (target_thread->transaction_stack != in_reply_to) {
2787 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2788 proc->pid, thread->pid,
2789 target_thread->transaction_stack ?
2790 target_thread->transaction_stack->debug_id : 0,
2791 in_reply_to->debug_id);
2792 binder_inner_proc_unlock(target_thread->proc);
2793 return_error = BR_FAILED_REPLY;
2794 return_error_param = -EPROTO;
2795 return_error_line = __LINE__;
2797 target_thread = NULL;
2798 goto err_dead_binder;
2800 target_proc = target_thread->proc;
2801 target_proc->tmp_ref++;
2802 binder_inner_proc_unlock(target_thread->proc);
2804 if (tr->target.handle) {
2805 struct binder_ref *ref;
2808 * There must already be a strong ref
2809 * on this node. If so, do a strong
2810 * increment on the node to ensure it
2811 * stays alive until the transaction is
2814 binder_proc_lock(proc);
2815 ref = binder_get_ref_olocked(proc, tr->target.handle,
2818 target_node = binder_get_node_refs_for_txn(
2819 ref->node, &target_proc,
2822 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2823 proc->pid, thread->pid, tr->target.handle);
2824 return_error = BR_FAILED_REPLY;
2826 binder_proc_unlock(proc);
2828 mutex_lock(&context->context_mgr_node_lock);
2829 target_node = context->binder_context_mgr_node;
2831 target_node = binder_get_node_refs_for_txn(
2832 target_node, &target_proc,
2835 return_error = BR_DEAD_REPLY;
2836 mutex_unlock(&context->context_mgr_node_lock);
2837 if (target_node && target_proc->pid == proc->pid) {
2838 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2839 proc->pid, thread->pid);
2840 return_error = BR_FAILED_REPLY;
2841 return_error_param = -EINVAL;
2842 return_error_line = __LINE__;
2843 goto err_invalid_target_handle;
2848 * return_error is set above
2850 return_error_param = -EINVAL;
2851 return_error_line = __LINE__;
2852 goto err_dead_binder;
2854 e->to_node = target_node->debug_id;
2855 if (WARN_ON(proc == target_proc)) {
2856 return_error = BR_FAILED_REPLY;
2857 return_error_param = -EINVAL;
2858 return_error_line = __LINE__;
2859 goto err_invalid_target_handle;
2861 if (security_binder_transaction(proc->cred,
2862 target_proc->cred) < 0) {
2863 return_error = BR_FAILED_REPLY;
2864 return_error_param = -EPERM;
2865 return_error_line = __LINE__;
2866 goto err_invalid_target_handle;
2868 binder_inner_proc_lock(proc);
2870 w = list_first_entry_or_null(&thread->todo,
2871 struct binder_work, entry);
2872 if (!(tr->flags & TF_ONE_WAY) && w &&
2873 w->type == BINDER_WORK_TRANSACTION) {
2875 * Do not allow new outgoing transaction from a
2876 * thread that has a transaction at the head of
2877 * its todo list. Only need to check the head
2878 * because binder_select_thread_ilocked picks a
2879 * thread from proc->waiting_threads to enqueue
2880 * the transaction, and nothing is queued to the
2881 * todo list while the thread is on waiting_threads.
2883 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2884 proc->pid, thread->pid);
2885 binder_inner_proc_unlock(proc);
2886 return_error = BR_FAILED_REPLY;
2887 return_error_param = -EPROTO;
2888 return_error_line = __LINE__;
2889 goto err_bad_todo_list;
2892 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2893 struct binder_transaction *tmp;
2895 tmp = thread->transaction_stack;
2896 if (tmp->to_thread != thread) {
2897 spin_lock(&tmp->lock);
2898 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2899 proc->pid, thread->pid, tmp->debug_id,
2900 tmp->to_proc ? tmp->to_proc->pid : 0,
2902 tmp->to_thread->pid : 0);
2903 spin_unlock(&tmp->lock);
2904 binder_inner_proc_unlock(proc);
2905 return_error = BR_FAILED_REPLY;
2906 return_error_param = -EPROTO;
2907 return_error_line = __LINE__;
2908 goto err_bad_call_stack;
2911 struct binder_thread *from;
2913 spin_lock(&tmp->lock);
2915 if (from && from->proc == target_proc) {
2916 atomic_inc(&from->tmp_ref);
2917 target_thread = from;
2918 spin_unlock(&tmp->lock);
2921 spin_unlock(&tmp->lock);
2922 tmp = tmp->from_parent;
2925 binder_inner_proc_unlock(proc);
2928 e->to_thread = target_thread->pid;
2929 e->to_proc = target_proc->pid;
2931 /* TODO: reuse incoming transaction for reply */
2932 t = kzalloc(sizeof(*t), GFP_KERNEL);
2934 return_error = BR_FAILED_REPLY;
2935 return_error_param = -ENOMEM;
2936 return_error_line = __LINE__;
2937 goto err_alloc_t_failed;
2939 INIT_LIST_HEAD(&t->fd_fixups);
2940 binder_stats_created(BINDER_STAT_TRANSACTION);
2941 spin_lock_init(&t->lock);
2943 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2944 if (tcomplete == NULL) {
2945 return_error = BR_FAILED_REPLY;
2946 return_error_param = -ENOMEM;
2947 return_error_line = __LINE__;
2948 goto err_alloc_tcomplete_failed;
2950 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2952 t->debug_id = t_debug_id;
2955 binder_debug(BINDER_DEBUG_TRANSACTION,
2956 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2957 proc->pid, thread->pid, t->debug_id,
2958 target_proc->pid, target_thread->pid,
2959 (u64)tr->data.ptr.buffer,
2960 (u64)tr->data.ptr.offsets,
2961 (u64)tr->data_size, (u64)tr->offsets_size,
2962 (u64)extra_buffers_size);
2964 binder_debug(BINDER_DEBUG_TRANSACTION,
2965 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2966 proc->pid, thread->pid, t->debug_id,
2967 target_proc->pid, target_node->debug_id,
2968 (u64)tr->data.ptr.buffer,
2969 (u64)tr->data.ptr.offsets,
2970 (u64)tr->data_size, (u64)tr->offsets_size,
2971 (u64)extra_buffers_size);
2973 if (!reply && !(tr->flags & TF_ONE_WAY))
2977 t->sender_euid = task_euid(proc->tsk);
2978 t->to_proc = target_proc;
2979 t->to_thread = target_thread;
2981 t->flags = tr->flags;
2982 t->priority = task_nice(current);
2984 if (target_node && target_node->txn_security_ctx) {
2988 security_cred_getsecid(proc->cred, &secid);
2989 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
2991 return_error = BR_FAILED_REPLY;
2992 return_error_param = ret;
2993 return_error_line = __LINE__;
2994 goto err_get_secctx_failed;
2996 added_size = ALIGN(secctx_sz, sizeof(u64));
2997 extra_buffers_size += added_size;
2998 if (extra_buffers_size < added_size) {
2999 /* integer overflow of extra_buffers_size */
3000 return_error = BR_FAILED_REPLY;
3001 return_error_param = -EINVAL;
3002 return_error_line = __LINE__;
3003 goto err_bad_extra_size;
3007 trace_binder_transaction(reply, t, target_node);
3009 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3010 tr->offsets_size, extra_buffers_size,
3011 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3012 if (IS_ERR(t->buffer)) {
3014 * -ESRCH indicates VMA cleared. The target is dying.
3016 return_error_param = PTR_ERR(t->buffer);
3017 return_error = return_error_param == -ESRCH ?
3018 BR_DEAD_REPLY : BR_FAILED_REPLY;
3019 return_error_line = __LINE__;
3021 goto err_binder_alloc_buf_failed;
3025 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3026 ALIGN(tr->offsets_size, sizeof(void *)) +
3027 ALIGN(extra_buffers_size, sizeof(void *)) -
3028 ALIGN(secctx_sz, sizeof(u64));
3030 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3031 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3032 t->buffer, buf_offset,
3035 t->security_ctx = 0;
3038 security_release_secctx(secctx, secctx_sz);
3041 t->buffer->debug_id = t->debug_id;
3042 t->buffer->transaction = t;
3043 t->buffer->target_node = target_node;
3044 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3045 trace_binder_transaction_alloc_buf(t->buffer);
3047 if (binder_alloc_copy_user_to_buffer(
3048 &target_proc->alloc,
3050 ALIGN(tr->data_size, sizeof(void *)),
3051 (const void __user *)
3052 (uintptr_t)tr->data.ptr.offsets,
3053 tr->offsets_size)) {
3054 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3055 proc->pid, thread->pid);
3056 return_error = BR_FAILED_REPLY;
3057 return_error_param = -EFAULT;
3058 return_error_line = __LINE__;
3059 goto err_copy_data_failed;
3061 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3062 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3063 proc->pid, thread->pid, (u64)tr->offsets_size);
3064 return_error = BR_FAILED_REPLY;
3065 return_error_param = -EINVAL;
3066 return_error_line = __LINE__;
3067 goto err_bad_offset;
3069 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3070 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3071 proc->pid, thread->pid,
3072 (u64)extra_buffers_size);
3073 return_error = BR_FAILED_REPLY;
3074 return_error_param = -EINVAL;
3075 return_error_line = __LINE__;
3076 goto err_bad_offset;
3078 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3079 buffer_offset = off_start_offset;
3080 off_end_offset = off_start_offset + tr->offsets_size;
3081 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3082 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3083 ALIGN(secctx_sz, sizeof(u64));
3085 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3086 buffer_offset += sizeof(binder_size_t)) {
3087 struct binder_object_header *hdr;
3089 struct binder_object object;
3090 binder_size_t object_offset;
3091 binder_size_t copy_size;
3093 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3097 sizeof(object_offset))) {
3098 return_error = BR_FAILED_REPLY;
3099 return_error_param = -EINVAL;
3100 return_error_line = __LINE__;
3101 goto err_bad_offset;
3105 * Copy the source user buffer up to the next object
3106 * that will be processed.
3108 copy_size = object_offset - user_offset;
3109 if (copy_size && (user_offset > object_offset ||
3110 binder_alloc_copy_user_to_buffer(
3111 &target_proc->alloc,
3112 t->buffer, user_offset,
3113 user_buffer + user_offset,
3115 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3116 proc->pid, thread->pid);
3117 return_error = BR_FAILED_REPLY;
3118 return_error_param = -EFAULT;
3119 return_error_line = __LINE__;
3120 goto err_copy_data_failed;
3122 object_size = binder_get_object(target_proc, user_buffer,
3123 t->buffer, object_offset, &object);
3124 if (object_size == 0 || object_offset < off_min) {
3125 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3126 proc->pid, thread->pid,
3129 (u64)t->buffer->data_size);
3130 return_error = BR_FAILED_REPLY;
3131 return_error_param = -EINVAL;
3132 return_error_line = __LINE__;
3133 goto err_bad_offset;
3136 * Set offset to the next buffer fragment to be
3139 user_offset = object_offset + object_size;
3142 off_min = object_offset + object_size;
3143 switch (hdr->type) {
3144 case BINDER_TYPE_BINDER:
3145 case BINDER_TYPE_WEAK_BINDER: {
3146 struct flat_binder_object *fp;
3148 fp = to_flat_binder_object(hdr);
3149 ret = binder_translate_binder(fp, t, thread);
3152 binder_alloc_copy_to_buffer(&target_proc->alloc,
3156 return_error = BR_FAILED_REPLY;
3157 return_error_param = ret;
3158 return_error_line = __LINE__;
3159 goto err_translate_failed;
3162 case BINDER_TYPE_HANDLE:
3163 case BINDER_TYPE_WEAK_HANDLE: {
3164 struct flat_binder_object *fp;
3166 fp = to_flat_binder_object(hdr);
3167 ret = binder_translate_handle(fp, t, thread);
3169 binder_alloc_copy_to_buffer(&target_proc->alloc,
3173 return_error = BR_FAILED_REPLY;
3174 return_error_param = ret;
3175 return_error_line = __LINE__;
3176 goto err_translate_failed;
3180 case BINDER_TYPE_FD: {
3181 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3182 binder_size_t fd_offset = object_offset +
3183 (uintptr_t)&fp->fd - (uintptr_t)fp;
3184 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3185 thread, in_reply_to);
3189 binder_alloc_copy_to_buffer(&target_proc->alloc,
3193 return_error = BR_FAILED_REPLY;
3194 return_error_param = ret;
3195 return_error_line = __LINE__;
3196 goto err_translate_failed;
3199 case BINDER_TYPE_FDA: {
3200 struct binder_object ptr_object;
3201 binder_size_t parent_offset;
3202 struct binder_object user_object;
3203 size_t user_parent_size;
3204 struct binder_fd_array_object *fda =
3205 to_binder_fd_array_object(hdr);
3206 size_t num_valid = (buffer_offset - off_start_offset) /
3207 sizeof(binder_size_t);
3208 struct binder_buffer_object *parent =
3209 binder_validate_ptr(target_proc, t->buffer,
3210 &ptr_object, fda->parent,
3215 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3216 proc->pid, thread->pid);
3217 return_error = BR_FAILED_REPLY;
3218 return_error_param = -EINVAL;
3219 return_error_line = __LINE__;
3220 goto err_bad_parent;
3222 if (!binder_validate_fixup(target_proc, t->buffer,
3227 last_fixup_min_off)) {
3228 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3229 proc->pid, thread->pid);
3230 return_error = BR_FAILED_REPLY;
3231 return_error_param = -EINVAL;
3232 return_error_line = __LINE__;
3233 goto err_bad_parent;
3236 * We need to read the user version of the parent
3237 * object to get the original user offset
3240 binder_get_object(proc, user_buffer, t->buffer,
3241 parent_offset, &user_object);
3242 if (user_parent_size != sizeof(user_object.bbo)) {
3243 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3244 proc->pid, thread->pid,
3246 sizeof(user_object.bbo));
3247 return_error = BR_FAILED_REPLY;
3248 return_error_param = -EINVAL;
3249 return_error_line = __LINE__;
3250 goto err_bad_parent;
3252 ret = binder_translate_fd_array(&pf_head, fda,
3253 user_buffer, parent,
3254 &user_object.bbo, t,
3255 thread, in_reply_to);
3257 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3262 return_error = BR_FAILED_REPLY;
3263 return_error_param = ret > 0 ? -EINVAL : ret;
3264 return_error_line = __LINE__;
3265 goto err_translate_failed;
3267 last_fixup_obj_off = parent_offset;
3268 last_fixup_min_off =
3269 fda->parent_offset + sizeof(u32) * fda->num_fds;
3271 case BINDER_TYPE_PTR: {
3272 struct binder_buffer_object *bp =
3273 to_binder_buffer_object(hdr);
3274 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3277 if (bp->length > buf_left) {
3278 binder_user_error("%d:%d got transaction with too large buffer\n",
3279 proc->pid, thread->pid);
3280 return_error = BR_FAILED_REPLY;
3281 return_error_param = -EINVAL;
3282 return_error_line = __LINE__;
3283 goto err_bad_offset;
3285 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3286 (const void __user *)(uintptr_t)bp->buffer,
3289 return_error = BR_FAILED_REPLY;
3290 return_error_param = ret;
3291 return_error_line = __LINE__;
3292 goto err_translate_failed;
3294 /* Fixup buffer pointer to target proc address space */
3295 bp->buffer = (uintptr_t)
3296 t->buffer->user_data + sg_buf_offset;
3297 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3299 num_valid = (buffer_offset - off_start_offset) /
3300 sizeof(binder_size_t);
3301 ret = binder_fixup_parent(&pf_head, t,
3306 last_fixup_min_off);
3308 binder_alloc_copy_to_buffer(&target_proc->alloc,
3312 return_error = BR_FAILED_REPLY;
3313 return_error_param = ret;
3314 return_error_line = __LINE__;
3315 goto err_translate_failed;
3317 last_fixup_obj_off = object_offset;
3318 last_fixup_min_off = 0;
3321 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3322 proc->pid, thread->pid, hdr->type);
3323 return_error = BR_FAILED_REPLY;
3324 return_error_param = -EINVAL;
3325 return_error_line = __LINE__;
3326 goto err_bad_object_type;
3329 /* Done processing objects, copy the rest of the buffer */
3330 if (binder_alloc_copy_user_to_buffer(
3331 &target_proc->alloc,
3332 t->buffer, user_offset,
3333 user_buffer + user_offset,
3334 tr->data_size - user_offset)) {
3335 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3336 proc->pid, thread->pid);
3337 return_error = BR_FAILED_REPLY;
3338 return_error_param = -EFAULT;
3339 return_error_line = __LINE__;
3340 goto err_copy_data_failed;
3343 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3344 &sgc_head, &pf_head);
3346 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3347 proc->pid, thread->pid);
3348 return_error = BR_FAILED_REPLY;
3349 return_error_param = ret;
3350 return_error_line = __LINE__;
3351 goto err_copy_data_failed;
3353 if (t->buffer->oneway_spam_suspect)
3354 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3356 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3357 t->work.type = BINDER_WORK_TRANSACTION;
3360 binder_enqueue_thread_work(thread, tcomplete);
3361 binder_inner_proc_lock(target_proc);
3362 if (target_thread->is_dead) {
3363 return_error = BR_DEAD_REPLY;
3364 binder_inner_proc_unlock(target_proc);
3365 goto err_dead_proc_or_thread;
3367 BUG_ON(t->buffer->async_transaction != 0);
3368 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3369 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3370 target_proc->outstanding_txns++;
3371 binder_inner_proc_unlock(target_proc);
3372 wake_up_interruptible_sync(&target_thread->wait);
3373 binder_free_transaction(in_reply_to);
3374 } else if (!(t->flags & TF_ONE_WAY)) {
3375 BUG_ON(t->buffer->async_transaction != 0);
3376 binder_inner_proc_lock(proc);
3378 * Defer the TRANSACTION_COMPLETE, so we don't return to
3379 * userspace immediately; this allows the target process to
3380 * immediately start processing this transaction, reducing
3381 * latency. We will then return the TRANSACTION_COMPLETE when
3382 * the target replies (or there is an error).
3384 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3386 t->from_parent = thread->transaction_stack;
3387 thread->transaction_stack = t;
3388 binder_inner_proc_unlock(proc);
3389 return_error = binder_proc_transaction(t,
3390 target_proc, target_thread);
3392 binder_inner_proc_lock(proc);
3393 binder_pop_transaction_ilocked(thread, t);
3394 binder_inner_proc_unlock(proc);
3395 goto err_dead_proc_or_thread;
3398 BUG_ON(target_node == NULL);
3399 BUG_ON(t->buffer->async_transaction != 1);
3400 binder_enqueue_thread_work(thread, tcomplete);
3401 return_error = binder_proc_transaction(t, target_proc, NULL);
3403 goto err_dead_proc_or_thread;
3406 binder_thread_dec_tmpref(target_thread);
3407 binder_proc_dec_tmpref(target_proc);
3409 binder_dec_node_tmpref(target_node);
3411 * write barrier to synchronize with initialization
3415 WRITE_ONCE(e->debug_id_done, t_debug_id);
3418 err_dead_proc_or_thread:
3419 return_error_line = __LINE__;
3420 binder_dequeue_work(proc, tcomplete);
3421 err_translate_failed:
3422 err_bad_object_type:
3425 err_copy_data_failed:
3426 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3427 binder_free_txn_fixups(t);
3428 trace_binder_transaction_failed_buffer_release(t->buffer);
3429 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3430 buffer_offset, true);
3432 binder_dec_node_tmpref(target_node);
3434 t->buffer->transaction = NULL;
3435 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3436 err_binder_alloc_buf_failed:
3439 security_release_secctx(secctx, secctx_sz);
3440 err_get_secctx_failed:
3442 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3443 err_alloc_tcomplete_failed:
3444 if (trace_binder_txn_latency_free_enabled())
3445 binder_txn_latency_free(t);
3447 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3451 err_empty_call_stack:
3453 err_invalid_target_handle:
3455 binder_thread_dec_tmpref(target_thread);
3457 binder_proc_dec_tmpref(target_proc);
3459 binder_dec_node(target_node, 1, 0);
3460 binder_dec_node_tmpref(target_node);
3463 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3464 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3465 proc->pid, thread->pid, return_error, return_error_param,
3466 (u64)tr->data_size, (u64)tr->offsets_size,
3470 struct binder_transaction_log_entry *fe;
3472 e->return_error = return_error;
3473 e->return_error_param = return_error_param;
3474 e->return_error_line = return_error_line;
3475 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3478 * write barrier to synchronize with initialization
3482 WRITE_ONCE(e->debug_id_done, t_debug_id);
3483 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3486 BUG_ON(thread->return_error.cmd != BR_OK);
3488 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3489 binder_enqueue_thread_work(thread, &thread->return_error.work);
3490 binder_send_failed_reply(in_reply_to, return_error);
3492 thread->return_error.cmd = return_error;
3493 binder_enqueue_thread_work(thread, &thread->return_error.work);
3498 * binder_free_buf() - free the specified buffer
3499 * @proc: binder proc that owns buffer
3500 * @buffer: buffer to be freed
3501 * @is_failure: failed to send transaction
3503 * If buffer for an async transaction, enqueue the next async
3504 * transaction from the node.
3506 * Cleanup buffer and free it.
3509 binder_free_buf(struct binder_proc *proc,
3510 struct binder_thread *thread,
3511 struct binder_buffer *buffer, bool is_failure)
3513 binder_inner_proc_lock(proc);
3514 if (buffer->transaction) {
3515 buffer->transaction->buffer = NULL;
3516 buffer->transaction = NULL;
3518 binder_inner_proc_unlock(proc);
3519 if (buffer->async_transaction && buffer->target_node) {
3520 struct binder_node *buf_node;
3521 struct binder_work *w;
3523 buf_node = buffer->target_node;
3524 binder_node_inner_lock(buf_node);
3525 BUG_ON(!buf_node->has_async_transaction);
3526 BUG_ON(buf_node->proc != proc);
3527 w = binder_dequeue_work_head_ilocked(
3528 &buf_node->async_todo);
3530 buf_node->has_async_transaction = false;
3532 binder_enqueue_work_ilocked(
3534 binder_wakeup_proc_ilocked(proc);
3536 binder_node_inner_unlock(buf_node);
3538 trace_binder_transaction_buffer_release(buffer);
3539 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3540 binder_alloc_free_buf(&proc->alloc, buffer);
3543 static int binder_thread_write(struct binder_proc *proc,
3544 struct binder_thread *thread,
3545 binder_uintptr_t binder_buffer, size_t size,
3546 binder_size_t *consumed)
3549 struct binder_context *context = proc->context;
3550 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3551 void __user *ptr = buffer + *consumed;
3552 void __user *end = buffer + size;
3554 while (ptr < end && thread->return_error.cmd == BR_OK) {
3557 if (get_user(cmd, (uint32_t __user *)ptr))
3559 ptr += sizeof(uint32_t);
3560 trace_binder_command(cmd);
3561 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3562 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3563 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3564 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3572 const char *debug_string;
3573 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3574 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3575 struct binder_ref_data rdata;
3577 if (get_user(target, (uint32_t __user *)ptr))
3580 ptr += sizeof(uint32_t);
3582 if (increment && !target) {
3583 struct binder_node *ctx_mgr_node;
3585 mutex_lock(&context->context_mgr_node_lock);
3586 ctx_mgr_node = context->binder_context_mgr_node;
3588 if (ctx_mgr_node->proc == proc) {
3589 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3590 proc->pid, thread->pid);
3591 mutex_unlock(&context->context_mgr_node_lock);
3594 ret = binder_inc_ref_for_node(
3596 strong, NULL, &rdata);
3598 mutex_unlock(&context->context_mgr_node_lock);
3601 ret = binder_update_ref_for_handle(
3602 proc, target, increment, strong,
3604 if (!ret && rdata.desc != target) {
3605 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3606 proc->pid, thread->pid,
3607 target, rdata.desc);
3611 debug_string = "IncRefs";
3614 debug_string = "Acquire";
3617 debug_string = "Release";
3621 debug_string = "DecRefs";
3625 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3626 proc->pid, thread->pid, debug_string,
3627 strong, target, ret);
3630 binder_debug(BINDER_DEBUG_USER_REFS,
3631 "%d:%d %s ref %d desc %d s %d w %d\n",
3632 proc->pid, thread->pid, debug_string,
3633 rdata.debug_id, rdata.desc, rdata.strong,
3637 case BC_INCREFS_DONE:
3638 case BC_ACQUIRE_DONE: {
3639 binder_uintptr_t node_ptr;
3640 binder_uintptr_t cookie;
3641 struct binder_node *node;
3644 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3646 ptr += sizeof(binder_uintptr_t);
3647 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3649 ptr += sizeof(binder_uintptr_t);
3650 node = binder_get_node(proc, node_ptr);
3652 binder_user_error("%d:%d %s u%016llx no match\n",
3653 proc->pid, thread->pid,
3654 cmd == BC_INCREFS_DONE ?
3660 if (cookie != node->cookie) {
3661 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3662 proc->pid, thread->pid,
3663 cmd == BC_INCREFS_DONE ?
3664 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3665 (u64)node_ptr, node->debug_id,
3666 (u64)cookie, (u64)node->cookie);
3667 binder_put_node(node);
3670 binder_node_inner_lock(node);
3671 if (cmd == BC_ACQUIRE_DONE) {
3672 if (node->pending_strong_ref == 0) {
3673 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3674 proc->pid, thread->pid,
3676 binder_node_inner_unlock(node);
3677 binder_put_node(node);
3680 node->pending_strong_ref = 0;
3682 if (node->pending_weak_ref == 0) {
3683 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3684 proc->pid, thread->pid,
3686 binder_node_inner_unlock(node);
3687 binder_put_node(node);
3690 node->pending_weak_ref = 0;
3692 free_node = binder_dec_node_nilocked(node,
3693 cmd == BC_ACQUIRE_DONE, 0);
3695 binder_debug(BINDER_DEBUG_USER_REFS,
3696 "%d:%d %s node %d ls %d lw %d tr %d\n",
3697 proc->pid, thread->pid,
3698 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3699 node->debug_id, node->local_strong_refs,
3700 node->local_weak_refs, node->tmp_refs);
3701 binder_node_inner_unlock(node);
3702 binder_put_node(node);
3705 case BC_ATTEMPT_ACQUIRE:
3706 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3708 case BC_ACQUIRE_RESULT:
3709 pr_err("BC_ACQUIRE_RESULT not supported\n");
3712 case BC_FREE_BUFFER: {
3713 binder_uintptr_t data_ptr;
3714 struct binder_buffer *buffer;
3716 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3718 ptr += sizeof(binder_uintptr_t);
3720 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3722 if (IS_ERR_OR_NULL(buffer)) {
3723 if (PTR_ERR(buffer) == -EPERM) {
3725 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3726 proc->pid, thread->pid,
3730 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3731 proc->pid, thread->pid,
3736 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3737 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3738 proc->pid, thread->pid, (u64)data_ptr,
3740 buffer->transaction ? "active" : "finished");
3741 binder_free_buf(proc, thread, buffer, false);
3745 case BC_TRANSACTION_SG:
3747 struct binder_transaction_data_sg tr;
3749 if (copy_from_user(&tr, ptr, sizeof(tr)))
3752 binder_transaction(proc, thread, &tr.transaction_data,
3753 cmd == BC_REPLY_SG, tr.buffers_size);
3756 case BC_TRANSACTION:
3758 struct binder_transaction_data tr;
3760 if (copy_from_user(&tr, ptr, sizeof(tr)))
3763 binder_transaction(proc, thread, &tr,
3764 cmd == BC_REPLY, 0);
3768 case BC_REGISTER_LOOPER:
3769 binder_debug(BINDER_DEBUG_THREADS,
3770 "%d:%d BC_REGISTER_LOOPER\n",
3771 proc->pid, thread->pid);
3772 binder_inner_proc_lock(proc);
3773 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3774 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3775 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3776 proc->pid, thread->pid);
3777 } else if (proc->requested_threads == 0) {
3778 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3779 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3780 proc->pid, thread->pid);
3782 proc->requested_threads--;
3783 proc->requested_threads_started++;
3785 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3786 binder_inner_proc_unlock(proc);
3788 case BC_ENTER_LOOPER:
3789 binder_debug(BINDER_DEBUG_THREADS,
3790 "%d:%d BC_ENTER_LOOPER\n",
3791 proc->pid, thread->pid);
3792 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3793 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3794 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3795 proc->pid, thread->pid);
3797 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3799 case BC_EXIT_LOOPER:
3800 binder_debug(BINDER_DEBUG_THREADS,
3801 "%d:%d BC_EXIT_LOOPER\n",
3802 proc->pid, thread->pid);
3803 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3806 case BC_REQUEST_DEATH_NOTIFICATION:
3807 case BC_CLEAR_DEATH_NOTIFICATION: {
3809 binder_uintptr_t cookie;
3810 struct binder_ref *ref;
3811 struct binder_ref_death *death = NULL;
3813 if (get_user(target, (uint32_t __user *)ptr))
3815 ptr += sizeof(uint32_t);
3816 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3818 ptr += sizeof(binder_uintptr_t);
3819 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3821 * Allocate memory for death notification
3822 * before taking lock
3824 death = kzalloc(sizeof(*death), GFP_KERNEL);
3825 if (death == NULL) {
3826 WARN_ON(thread->return_error.cmd !=
3828 thread->return_error.cmd = BR_ERROR;
3829 binder_enqueue_thread_work(
3831 &thread->return_error.work);
3833 BINDER_DEBUG_FAILED_TRANSACTION,
3834 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3835 proc->pid, thread->pid);
3839 binder_proc_lock(proc);
3840 ref = binder_get_ref_olocked(proc, target, false);
3842 binder_user_error("%d:%d %s invalid ref %d\n",
3843 proc->pid, thread->pid,
3844 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3845 "BC_REQUEST_DEATH_NOTIFICATION" :
3846 "BC_CLEAR_DEATH_NOTIFICATION",
3848 binder_proc_unlock(proc);
3853 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3854 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3855 proc->pid, thread->pid,
3856 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3857 "BC_REQUEST_DEATH_NOTIFICATION" :
3858 "BC_CLEAR_DEATH_NOTIFICATION",
3859 (u64)cookie, ref->data.debug_id,
3860 ref->data.desc, ref->data.strong,
3861 ref->data.weak, ref->node->debug_id);
3863 binder_node_lock(ref->node);
3864 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3866 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3867 proc->pid, thread->pid);
3868 binder_node_unlock(ref->node);
3869 binder_proc_unlock(proc);
3873 binder_stats_created(BINDER_STAT_DEATH);
3874 INIT_LIST_HEAD(&death->work.entry);
3875 death->cookie = cookie;
3877 if (ref->node->proc == NULL) {
3878 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3880 binder_inner_proc_lock(proc);
3881 binder_enqueue_work_ilocked(
3882 &ref->death->work, &proc->todo);
3883 binder_wakeup_proc_ilocked(proc);
3884 binder_inner_proc_unlock(proc);
3887 if (ref->death == NULL) {
3888 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3889 proc->pid, thread->pid);
3890 binder_node_unlock(ref->node);
3891 binder_proc_unlock(proc);
3895 if (death->cookie != cookie) {
3896 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3897 proc->pid, thread->pid,
3900 binder_node_unlock(ref->node);
3901 binder_proc_unlock(proc);
3905 binder_inner_proc_lock(proc);
3906 if (list_empty(&death->work.entry)) {
3907 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3908 if (thread->looper &
3909 (BINDER_LOOPER_STATE_REGISTERED |
3910 BINDER_LOOPER_STATE_ENTERED))
3911 binder_enqueue_thread_work_ilocked(
3915 binder_enqueue_work_ilocked(
3918 binder_wakeup_proc_ilocked(
3922 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3923 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3925 binder_inner_proc_unlock(proc);
3927 binder_node_unlock(ref->node);
3928 binder_proc_unlock(proc);
3930 case BC_DEAD_BINDER_DONE: {
3931 struct binder_work *w;
3932 binder_uintptr_t cookie;
3933 struct binder_ref_death *death = NULL;
3935 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3938 ptr += sizeof(cookie);
3939 binder_inner_proc_lock(proc);
3940 list_for_each_entry(w, &proc->delivered_death,
3942 struct binder_ref_death *tmp_death =
3944 struct binder_ref_death,
3947 if (tmp_death->cookie == cookie) {
3952 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3953 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3954 proc->pid, thread->pid, (u64)cookie,
3956 if (death == NULL) {
3957 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3958 proc->pid, thread->pid, (u64)cookie);
3959 binder_inner_proc_unlock(proc);
3962 binder_dequeue_work_ilocked(&death->work);
3963 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3964 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3965 if (thread->looper &
3966 (BINDER_LOOPER_STATE_REGISTERED |
3967 BINDER_LOOPER_STATE_ENTERED))
3968 binder_enqueue_thread_work_ilocked(
3969 thread, &death->work);
3971 binder_enqueue_work_ilocked(
3974 binder_wakeup_proc_ilocked(proc);
3977 binder_inner_proc_unlock(proc);
3981 pr_err("%d:%d unknown command %d\n",
3982 proc->pid, thread->pid, cmd);
3985 *consumed = ptr - buffer;
3990 static void binder_stat_br(struct binder_proc *proc,
3991 struct binder_thread *thread, uint32_t cmd)
3993 trace_binder_return(cmd);
3994 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3995 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3996 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3997 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4001 static int binder_put_node_cmd(struct binder_proc *proc,
4002 struct binder_thread *thread,
4004 binder_uintptr_t node_ptr,
4005 binder_uintptr_t node_cookie,
4007 uint32_t cmd, const char *cmd_name)
4009 void __user *ptr = *ptrp;
4011 if (put_user(cmd, (uint32_t __user *)ptr))
4013 ptr += sizeof(uint32_t);
4015 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4017 ptr += sizeof(binder_uintptr_t);
4019 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4021 ptr += sizeof(binder_uintptr_t);
4023 binder_stat_br(proc, thread, cmd);
4024 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4025 proc->pid, thread->pid, cmd_name, node_debug_id,
4026 (u64)node_ptr, (u64)node_cookie);
4032 static int binder_wait_for_work(struct binder_thread *thread,
4036 struct binder_proc *proc = thread->proc;
4039 freezer_do_not_count();
4040 binder_inner_proc_lock(proc);
4042 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4043 if (binder_has_work_ilocked(thread, do_proc_work))
4046 list_add(&thread->waiting_thread_node,
4047 &proc->waiting_threads);
4048 binder_inner_proc_unlock(proc);
4050 binder_inner_proc_lock(proc);
4051 list_del_init(&thread->waiting_thread_node);
4052 if (signal_pending(current)) {
4057 finish_wait(&thread->wait, &wait);
4058 binder_inner_proc_unlock(proc);
4065 * binder_apply_fd_fixups() - finish fd translation
4066 * @proc: binder_proc associated @t->buffer
4067 * @t: binder transaction with list of fd fixups
4069 * Now that we are in the context of the transaction target
4070 * process, we can allocate and install fds. Process the
4071 * list of fds to translate and fixup the buffer with the
4074 * If we fail to allocate an fd, then free the resources by
4075 * fput'ing files that have not been processed and ksys_close'ing
4076 * any fds that have already been allocated.
4078 static int binder_apply_fd_fixups(struct binder_proc *proc,
4079 struct binder_transaction *t)
4081 struct binder_txn_fd_fixup *fixup, *tmp;
4084 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4085 int fd = get_unused_fd_flags(O_CLOEXEC);
4088 binder_debug(BINDER_DEBUG_TRANSACTION,
4089 "failed fd fixup txn %d fd %d\n",
4094 binder_debug(BINDER_DEBUG_TRANSACTION,
4095 "fd fixup txn %d fd %d\n",
4097 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4098 fd_install(fd, fixup->file);
4100 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4107 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4114 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4120 binder_deferred_fd_close(fd);
4122 list_del(&fixup->fixup_entry);
4129 static int binder_thread_read(struct binder_proc *proc,
4130 struct binder_thread *thread,
4131 binder_uintptr_t binder_buffer, size_t size,
4132 binder_size_t *consumed, int non_block)
4134 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4135 void __user *ptr = buffer + *consumed;
4136 void __user *end = buffer + size;
4139 int wait_for_proc_work;
4141 if (*consumed == 0) {
4142 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4144 ptr += sizeof(uint32_t);
4148 binder_inner_proc_lock(proc);
4149 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4150 binder_inner_proc_unlock(proc);
4152 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4154 trace_binder_wait_for_work(wait_for_proc_work,
4155 !!thread->transaction_stack,
4156 !binder_worklist_empty(proc, &thread->todo));
4157 if (wait_for_proc_work) {
4158 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4159 BINDER_LOOPER_STATE_ENTERED))) {
4160 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4161 proc->pid, thread->pid, thread->looper);
4162 wait_event_interruptible(binder_user_error_wait,
4163 binder_stop_on_user_error < 2);
4165 binder_set_nice(proc->default_priority);
4169 if (!binder_has_work(thread, wait_for_proc_work))
4172 ret = binder_wait_for_work(thread, wait_for_proc_work);
4175 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4182 struct binder_transaction_data_secctx tr;
4183 struct binder_transaction_data *trd = &tr.transaction_data;
4184 struct binder_work *w = NULL;
4185 struct list_head *list = NULL;
4186 struct binder_transaction *t = NULL;
4187 struct binder_thread *t_from;
4188 size_t trsize = sizeof(*trd);
4190 binder_inner_proc_lock(proc);
4191 if (!binder_worklist_empty_ilocked(&thread->todo))
4192 list = &thread->todo;
4193 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4197 binder_inner_proc_unlock(proc);
4200 if (ptr - buffer == 4 && !thread->looper_need_return)
4205 if (end - ptr < sizeof(tr) + 4) {
4206 binder_inner_proc_unlock(proc);
4209 w = binder_dequeue_work_head_ilocked(list);
4210 if (binder_worklist_empty_ilocked(&thread->todo))
4211 thread->process_todo = false;
4214 case BINDER_WORK_TRANSACTION: {
4215 binder_inner_proc_unlock(proc);
4216 t = container_of(w, struct binder_transaction, work);
4218 case BINDER_WORK_RETURN_ERROR: {
4219 struct binder_error *e = container_of(
4220 w, struct binder_error, work);
4222 WARN_ON(e->cmd == BR_OK);
4223 binder_inner_proc_unlock(proc);
4224 if (put_user(e->cmd, (uint32_t __user *)ptr))
4228 ptr += sizeof(uint32_t);
4230 binder_stat_br(proc, thread, cmd);
4232 case BINDER_WORK_TRANSACTION_COMPLETE:
4233 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4234 if (proc->oneway_spam_detection_enabled &&
4235 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4236 cmd = BR_ONEWAY_SPAM_SUSPECT;
4238 cmd = BR_TRANSACTION_COMPLETE;
4239 binder_inner_proc_unlock(proc);
4241 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4242 if (put_user(cmd, (uint32_t __user *)ptr))
4244 ptr += sizeof(uint32_t);
4246 binder_stat_br(proc, thread, cmd);
4247 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4248 "%d:%d BR_TRANSACTION_COMPLETE\n",
4249 proc->pid, thread->pid);
4251 case BINDER_WORK_NODE: {
4252 struct binder_node *node = container_of(w, struct binder_node, work);
4254 binder_uintptr_t node_ptr = node->ptr;
4255 binder_uintptr_t node_cookie = node->cookie;
4256 int node_debug_id = node->debug_id;
4259 void __user *orig_ptr = ptr;
4261 BUG_ON(proc != node->proc);
4262 strong = node->internal_strong_refs ||
4263 node->local_strong_refs;
4264 weak = !hlist_empty(&node->refs) ||
4265 node->local_weak_refs ||
4266 node->tmp_refs || strong;
4267 has_strong_ref = node->has_strong_ref;
4268 has_weak_ref = node->has_weak_ref;
4270 if (weak && !has_weak_ref) {
4271 node->has_weak_ref = 1;
4272 node->pending_weak_ref = 1;
4273 node->local_weak_refs++;
4275 if (strong && !has_strong_ref) {
4276 node->has_strong_ref = 1;
4277 node->pending_strong_ref = 1;
4278 node->local_strong_refs++;
4280 if (!strong && has_strong_ref)
4281 node->has_strong_ref = 0;
4282 if (!weak && has_weak_ref)
4283 node->has_weak_ref = 0;
4284 if (!weak && !strong) {
4285 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4286 "%d:%d node %d u%016llx c%016llx deleted\n",
4287 proc->pid, thread->pid,
4291 rb_erase(&node->rb_node, &proc->nodes);
4292 binder_inner_proc_unlock(proc);
4293 binder_node_lock(node);
4295 * Acquire the node lock before freeing the
4296 * node to serialize with other threads that
4297 * may have been holding the node lock while
4298 * decrementing this node (avoids race where
4299 * this thread frees while the other thread
4300 * is unlocking the node after the final
4303 binder_node_unlock(node);
4304 binder_free_node(node);
4306 binder_inner_proc_unlock(proc);
4308 if (weak && !has_weak_ref)
4309 ret = binder_put_node_cmd(
4310 proc, thread, &ptr, node_ptr,
4311 node_cookie, node_debug_id,
4312 BR_INCREFS, "BR_INCREFS");
4313 if (!ret && strong && !has_strong_ref)
4314 ret = binder_put_node_cmd(
4315 proc, thread, &ptr, node_ptr,
4316 node_cookie, node_debug_id,
4317 BR_ACQUIRE, "BR_ACQUIRE");
4318 if (!ret && !strong && has_strong_ref)
4319 ret = binder_put_node_cmd(
4320 proc, thread, &ptr, node_ptr,
4321 node_cookie, node_debug_id,
4322 BR_RELEASE, "BR_RELEASE");
4323 if (!ret && !weak && has_weak_ref)
4324 ret = binder_put_node_cmd(
4325 proc, thread, &ptr, node_ptr,
4326 node_cookie, node_debug_id,
4327 BR_DECREFS, "BR_DECREFS");
4328 if (orig_ptr == ptr)
4329 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4330 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4331 proc->pid, thread->pid,
4338 case BINDER_WORK_DEAD_BINDER:
4339 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4340 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4341 struct binder_ref_death *death;
4343 binder_uintptr_t cookie;
4345 death = container_of(w, struct binder_ref_death, work);
4346 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4347 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4349 cmd = BR_DEAD_BINDER;
4350 cookie = death->cookie;
4352 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4353 "%d:%d %s %016llx\n",
4354 proc->pid, thread->pid,
4355 cmd == BR_DEAD_BINDER ?
4357 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4359 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4360 binder_inner_proc_unlock(proc);
4362 binder_stats_deleted(BINDER_STAT_DEATH);
4364 binder_enqueue_work_ilocked(
4365 w, &proc->delivered_death);
4366 binder_inner_proc_unlock(proc);
4368 if (put_user(cmd, (uint32_t __user *)ptr))
4370 ptr += sizeof(uint32_t);
4371 if (put_user(cookie,
4372 (binder_uintptr_t __user *)ptr))
4374 ptr += sizeof(binder_uintptr_t);
4375 binder_stat_br(proc, thread, cmd);
4376 if (cmd == BR_DEAD_BINDER)
4377 goto done; /* DEAD_BINDER notifications can cause transactions */
4380 binder_inner_proc_unlock(proc);
4381 pr_err("%d:%d: bad work type %d\n",
4382 proc->pid, thread->pid, w->type);
4389 BUG_ON(t->buffer == NULL);
4390 if (t->buffer->target_node) {
4391 struct binder_node *target_node = t->buffer->target_node;
4393 trd->target.ptr = target_node->ptr;
4394 trd->cookie = target_node->cookie;
4395 t->saved_priority = task_nice(current);
4396 if (t->priority < target_node->min_priority &&
4397 !(t->flags & TF_ONE_WAY))
4398 binder_set_nice(t->priority);
4399 else if (!(t->flags & TF_ONE_WAY) ||
4400 t->saved_priority > target_node->min_priority)
4401 binder_set_nice(target_node->min_priority);
4402 cmd = BR_TRANSACTION;
4404 trd->target.ptr = 0;
4408 trd->code = t->code;
4409 trd->flags = t->flags;
4410 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4412 t_from = binder_get_txn_from(t);
4414 struct task_struct *sender = t_from->proc->tsk;
4417 task_tgid_nr_ns(sender,
4418 task_active_pid_ns(current));
4420 trd->sender_pid = 0;
4423 ret = binder_apply_fd_fixups(proc, t);
4425 struct binder_buffer *buffer = t->buffer;
4426 bool oneway = !!(t->flags & TF_ONE_WAY);
4427 int tid = t->debug_id;
4430 binder_thread_dec_tmpref(t_from);
4431 buffer->transaction = NULL;
4432 binder_cleanup_transaction(t, "fd fixups failed",
4434 binder_free_buf(proc, thread, buffer, true);
4435 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4436 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4437 proc->pid, thread->pid,
4439 (cmd == BR_REPLY ? "reply " : ""),
4440 tid, BR_FAILED_REPLY, ret, __LINE__);
4441 if (cmd == BR_REPLY) {
4442 cmd = BR_FAILED_REPLY;
4443 if (put_user(cmd, (uint32_t __user *)ptr))
4445 ptr += sizeof(uint32_t);
4446 binder_stat_br(proc, thread, cmd);
4451 trd->data_size = t->buffer->data_size;
4452 trd->offsets_size = t->buffer->offsets_size;
4453 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4454 trd->data.ptr.offsets = trd->data.ptr.buffer +
4455 ALIGN(t->buffer->data_size,
4458 tr.secctx = t->security_ctx;
4459 if (t->security_ctx) {
4460 cmd = BR_TRANSACTION_SEC_CTX;
4461 trsize = sizeof(tr);
4463 if (put_user(cmd, (uint32_t __user *)ptr)) {
4465 binder_thread_dec_tmpref(t_from);
4467 binder_cleanup_transaction(t, "put_user failed",
4472 ptr += sizeof(uint32_t);
4473 if (copy_to_user(ptr, &tr, trsize)) {
4475 binder_thread_dec_tmpref(t_from);
4477 binder_cleanup_transaction(t, "copy_to_user failed",
4484 trace_binder_transaction_received(t);
4485 binder_stat_br(proc, thread, cmd);
4486 binder_debug(BINDER_DEBUG_TRANSACTION,
4487 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4488 proc->pid, thread->pid,
4489 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4490 (cmd == BR_TRANSACTION_SEC_CTX) ?
4491 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4492 t->debug_id, t_from ? t_from->proc->pid : 0,
4493 t_from ? t_from->pid : 0, cmd,
4494 t->buffer->data_size, t->buffer->offsets_size,
4495 (u64)trd->data.ptr.buffer,
4496 (u64)trd->data.ptr.offsets);
4499 binder_thread_dec_tmpref(t_from);
4500 t->buffer->allow_user_free = 1;
4501 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4502 binder_inner_proc_lock(thread->proc);
4503 t->to_parent = thread->transaction_stack;
4504 t->to_thread = thread;
4505 thread->transaction_stack = t;
4506 binder_inner_proc_unlock(thread->proc);
4508 binder_free_transaction(t);
4515 *consumed = ptr - buffer;
4516 binder_inner_proc_lock(proc);
4517 if (proc->requested_threads == 0 &&
4518 list_empty(&thread->proc->waiting_threads) &&
4519 proc->requested_threads_started < proc->max_threads &&
4520 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4521 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4522 /*spawn a new thread if we leave this out */) {
4523 proc->requested_threads++;
4524 binder_inner_proc_unlock(proc);
4525 binder_debug(BINDER_DEBUG_THREADS,
4526 "%d:%d BR_SPAWN_LOOPER\n",
4527 proc->pid, thread->pid);
4528 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4530 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4532 binder_inner_proc_unlock(proc);
4536 static void binder_release_work(struct binder_proc *proc,
4537 struct list_head *list)
4539 struct binder_work *w;
4540 enum binder_work_type wtype;
4543 binder_inner_proc_lock(proc);
4544 w = binder_dequeue_work_head_ilocked(list);
4545 wtype = w ? w->type : 0;
4546 binder_inner_proc_unlock(proc);
4551 case BINDER_WORK_TRANSACTION: {
4552 struct binder_transaction *t;
4554 t = container_of(w, struct binder_transaction, work);
4556 binder_cleanup_transaction(t, "process died.",
4559 case BINDER_WORK_RETURN_ERROR: {
4560 struct binder_error *e = container_of(
4561 w, struct binder_error, work);
4563 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4564 "undelivered TRANSACTION_ERROR: %u\n",
4567 case BINDER_WORK_TRANSACTION_COMPLETE: {
4568 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4569 "undelivered TRANSACTION_COMPLETE\n");
4571 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4573 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4574 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4575 struct binder_ref_death *death;
4577 death = container_of(w, struct binder_ref_death, work);
4578 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4579 "undelivered death notification, %016llx\n",
4580 (u64)death->cookie);
4582 binder_stats_deleted(BINDER_STAT_DEATH);
4584 case BINDER_WORK_NODE:
4587 pr_err("unexpected work type, %d, not freed\n",
4595 static struct binder_thread *binder_get_thread_ilocked(
4596 struct binder_proc *proc, struct binder_thread *new_thread)
4598 struct binder_thread *thread = NULL;
4599 struct rb_node *parent = NULL;
4600 struct rb_node **p = &proc->threads.rb_node;
4604 thread = rb_entry(parent, struct binder_thread, rb_node);
4606 if (current->pid < thread->pid)
4608 else if (current->pid > thread->pid)
4609 p = &(*p)->rb_right;
4615 thread = new_thread;
4616 binder_stats_created(BINDER_STAT_THREAD);
4617 thread->proc = proc;
4618 thread->pid = current->pid;
4619 atomic_set(&thread->tmp_ref, 0);
4620 init_waitqueue_head(&thread->wait);
4621 INIT_LIST_HEAD(&thread->todo);
4622 rb_link_node(&thread->rb_node, parent, p);
4623 rb_insert_color(&thread->rb_node, &proc->threads);
4624 thread->looper_need_return = true;
4625 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4626 thread->return_error.cmd = BR_OK;
4627 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4628 thread->reply_error.cmd = BR_OK;
4629 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4633 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4635 struct binder_thread *thread;
4636 struct binder_thread *new_thread;
4638 binder_inner_proc_lock(proc);
4639 thread = binder_get_thread_ilocked(proc, NULL);
4640 binder_inner_proc_unlock(proc);
4642 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4643 if (new_thread == NULL)
4645 binder_inner_proc_lock(proc);
4646 thread = binder_get_thread_ilocked(proc, new_thread);
4647 binder_inner_proc_unlock(proc);
4648 if (thread != new_thread)
4654 static void binder_free_proc(struct binder_proc *proc)
4656 struct binder_device *device;
4658 BUG_ON(!list_empty(&proc->todo));
4659 BUG_ON(!list_empty(&proc->delivered_death));
4660 if (proc->outstanding_txns)
4661 pr_warn("%s: Unexpected outstanding_txns %d\n",
4662 __func__, proc->outstanding_txns);
4663 device = container_of(proc->context, struct binder_device, context);
4664 if (refcount_dec_and_test(&device->ref)) {
4665 kfree(proc->context->name);
4668 binder_alloc_deferred_release(&proc->alloc);
4669 put_task_struct(proc->tsk);
4670 put_cred(proc->cred);
4671 binder_stats_deleted(BINDER_STAT_PROC);
4675 static void binder_free_thread(struct binder_thread *thread)
4677 BUG_ON(!list_empty(&thread->todo));
4678 binder_stats_deleted(BINDER_STAT_THREAD);
4679 binder_proc_dec_tmpref(thread->proc);
4683 static int binder_thread_release(struct binder_proc *proc,
4684 struct binder_thread *thread)
4686 struct binder_transaction *t;
4687 struct binder_transaction *send_reply = NULL;
4688 int active_transactions = 0;
4689 struct binder_transaction *last_t = NULL;
4691 binder_inner_proc_lock(thread->proc);
4693 * take a ref on the proc so it survives
4694 * after we remove this thread from proc->threads.
4695 * The corresponding dec is when we actually
4696 * free the thread in binder_free_thread()
4700 * take a ref on this thread to ensure it
4701 * survives while we are releasing it
4703 atomic_inc(&thread->tmp_ref);
4704 rb_erase(&thread->rb_node, &proc->threads);
4705 t = thread->transaction_stack;
4707 spin_lock(&t->lock);
4708 if (t->to_thread == thread)
4711 __acquire(&t->lock);
4713 thread->is_dead = true;
4717 active_transactions++;
4718 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4719 "release %d:%d transaction %d %s, still active\n",
4720 proc->pid, thread->pid,
4722 (t->to_thread == thread) ? "in" : "out");
4724 if (t->to_thread == thread) {
4725 thread->proc->outstanding_txns--;
4727 t->to_thread = NULL;
4729 t->buffer->transaction = NULL;
4733 } else if (t->from == thread) {
4738 spin_unlock(&last_t->lock);
4740 spin_lock(&t->lock);
4742 __acquire(&t->lock);
4744 /* annotation for sparse, lock not acquired in last iteration above */
4745 __release(&t->lock);
4748 * If this thread used poll, make sure we remove the waitqueue from any
4749 * poll data structures holding it.
4751 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4752 wake_up_pollfree(&thread->wait);
4754 binder_inner_proc_unlock(thread->proc);
4757 * This is needed to avoid races between wake_up_pollfree() above and
4758 * someone else removing the last entry from the queue for other reasons
4759 * (e.g. ep_remove_wait_queue() being called due to an epoll file
4760 * descriptor being closed). Such other users hold an RCU read lock, so
4761 * we can be sure they're done after we call synchronize_rcu().
4763 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4767 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4768 binder_release_work(proc, &thread->todo);
4769 binder_thread_dec_tmpref(thread);
4770 return active_transactions;
4773 static __poll_t binder_poll(struct file *filp,
4774 struct poll_table_struct *wait)
4776 struct binder_proc *proc = filp->private_data;
4777 struct binder_thread *thread = NULL;
4778 bool wait_for_proc_work;
4780 thread = binder_get_thread(proc);
4784 binder_inner_proc_lock(thread->proc);
4785 thread->looper |= BINDER_LOOPER_STATE_POLL;
4786 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4788 binder_inner_proc_unlock(thread->proc);
4790 poll_wait(filp, &thread->wait, wait);
4792 if (binder_has_work(thread, wait_for_proc_work))
4798 static int binder_ioctl_write_read(struct file *filp,
4799 unsigned int cmd, unsigned long arg,
4800 struct binder_thread *thread)
4803 struct binder_proc *proc = filp->private_data;
4804 unsigned int size = _IOC_SIZE(cmd);
4805 void __user *ubuf = (void __user *)arg;
4806 struct binder_write_read bwr;
4808 if (size != sizeof(struct binder_write_read)) {
4812 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4816 binder_debug(BINDER_DEBUG_READ_WRITE,
4817 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4818 proc->pid, thread->pid,
4819 (u64)bwr.write_size, (u64)bwr.write_buffer,
4820 (u64)bwr.read_size, (u64)bwr.read_buffer);
4822 if (bwr.write_size > 0) {
4823 ret = binder_thread_write(proc, thread,
4826 &bwr.write_consumed);
4827 trace_binder_write_done(ret);
4829 bwr.read_consumed = 0;
4830 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4835 if (bwr.read_size > 0) {
4836 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4839 filp->f_flags & O_NONBLOCK);
4840 trace_binder_read_done(ret);
4841 binder_inner_proc_lock(proc);
4842 if (!binder_worklist_empty_ilocked(&proc->todo))
4843 binder_wakeup_proc_ilocked(proc);
4844 binder_inner_proc_unlock(proc);
4846 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4851 binder_debug(BINDER_DEBUG_READ_WRITE,
4852 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4853 proc->pid, thread->pid,
4854 (u64)bwr.write_consumed, (u64)bwr.write_size,
4855 (u64)bwr.read_consumed, (u64)bwr.read_size);
4856 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4864 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4865 struct flat_binder_object *fbo)
4868 struct binder_proc *proc = filp->private_data;
4869 struct binder_context *context = proc->context;
4870 struct binder_node *new_node;
4871 kuid_t curr_euid = current_euid();
4873 mutex_lock(&context->context_mgr_node_lock);
4874 if (context->binder_context_mgr_node) {
4875 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4879 ret = security_binder_set_context_mgr(proc->cred);
4882 if (uid_valid(context->binder_context_mgr_uid)) {
4883 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4884 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4885 from_kuid(&init_user_ns, curr_euid),
4886 from_kuid(&init_user_ns,
4887 context->binder_context_mgr_uid));
4892 context->binder_context_mgr_uid = curr_euid;
4894 new_node = binder_new_node(proc, fbo);
4899 binder_node_lock(new_node);
4900 new_node->local_weak_refs++;
4901 new_node->local_strong_refs++;
4902 new_node->has_strong_ref = 1;
4903 new_node->has_weak_ref = 1;
4904 context->binder_context_mgr_node = new_node;
4905 binder_node_unlock(new_node);
4906 binder_put_node(new_node);
4908 mutex_unlock(&context->context_mgr_node_lock);
4912 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4913 struct binder_node_info_for_ref *info)
4915 struct binder_node *node;
4916 struct binder_context *context = proc->context;
4917 __u32 handle = info->handle;
4919 if (info->strong_count || info->weak_count || info->reserved1 ||
4920 info->reserved2 || info->reserved3) {
4921 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4926 /* This ioctl may only be used by the context manager */
4927 mutex_lock(&context->context_mgr_node_lock);
4928 if (!context->binder_context_mgr_node ||
4929 context->binder_context_mgr_node->proc != proc) {
4930 mutex_unlock(&context->context_mgr_node_lock);
4933 mutex_unlock(&context->context_mgr_node_lock);
4935 node = binder_get_node_from_ref(proc, handle, true, NULL);
4939 info->strong_count = node->local_strong_refs +
4940 node->internal_strong_refs;
4941 info->weak_count = node->local_weak_refs;
4943 binder_put_node(node);
4948 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4949 struct binder_node_debug_info *info)
4952 binder_uintptr_t ptr = info->ptr;
4954 memset(info, 0, sizeof(*info));
4956 binder_inner_proc_lock(proc);
4957 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4958 struct binder_node *node = rb_entry(n, struct binder_node,
4960 if (node->ptr > ptr) {
4961 info->ptr = node->ptr;
4962 info->cookie = node->cookie;
4963 info->has_strong_ref = node->has_strong_ref;
4964 info->has_weak_ref = node->has_weak_ref;
4968 binder_inner_proc_unlock(proc);
4973 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
4976 struct binder_thread *thread;
4978 if (proc->outstanding_txns > 0)
4981 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
4982 thread = rb_entry(n, struct binder_thread, rb_node);
4983 if (thread->transaction_stack)
4989 static int binder_ioctl_freeze(struct binder_freeze_info *info,
4990 struct binder_proc *target_proc)
4994 if (!info->enable) {
4995 binder_inner_proc_lock(target_proc);
4996 target_proc->sync_recv = false;
4997 target_proc->async_recv = false;
4998 target_proc->is_frozen = false;
4999 binder_inner_proc_unlock(target_proc);
5004 * Freezing the target. Prevent new transactions by
5005 * setting frozen state. If timeout specified, wait
5006 * for transactions to drain.
5008 binder_inner_proc_lock(target_proc);
5009 target_proc->sync_recv = false;
5010 target_proc->async_recv = false;
5011 target_proc->is_frozen = true;
5012 binder_inner_proc_unlock(target_proc);
5014 if (info->timeout_ms > 0)
5015 ret = wait_event_interruptible_timeout(
5016 target_proc->freeze_wait,
5017 (!target_proc->outstanding_txns),
5018 msecs_to_jiffies(info->timeout_ms));
5020 /* Check pending transactions that wait for reply */
5022 binder_inner_proc_lock(target_proc);
5023 if (binder_txns_pending_ilocked(target_proc))
5025 binder_inner_proc_unlock(target_proc);
5029 binder_inner_proc_lock(target_proc);
5030 target_proc->is_frozen = false;
5031 binder_inner_proc_unlock(target_proc);
5037 static int binder_ioctl_get_freezer_info(
5038 struct binder_frozen_status_info *info)
5040 struct binder_proc *target_proc;
5044 info->sync_recv = 0;
5045 info->async_recv = 0;
5047 mutex_lock(&binder_procs_lock);
5048 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5049 if (target_proc->pid == info->pid) {
5051 binder_inner_proc_lock(target_proc);
5052 txns_pending = binder_txns_pending_ilocked(target_proc);
5053 info->sync_recv |= target_proc->sync_recv |
5054 (txns_pending << 1);
5055 info->async_recv |= target_proc->async_recv;
5056 binder_inner_proc_unlock(target_proc);
5059 mutex_unlock(&binder_procs_lock);
5067 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5070 struct binder_proc *proc = filp->private_data;
5071 struct binder_thread *thread;
5072 unsigned int size = _IOC_SIZE(cmd);
5073 void __user *ubuf = (void __user *)arg;
5075 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5076 proc->pid, current->pid, cmd, arg);*/
5078 binder_selftest_alloc(&proc->alloc);
5080 trace_binder_ioctl(cmd, arg);
5082 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5086 thread = binder_get_thread(proc);
5087 if (thread == NULL) {
5093 case BINDER_WRITE_READ:
5094 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5098 case BINDER_SET_MAX_THREADS: {
5101 if (copy_from_user(&max_threads, ubuf,
5102 sizeof(max_threads))) {
5106 binder_inner_proc_lock(proc);
5107 proc->max_threads = max_threads;
5108 binder_inner_proc_unlock(proc);
5111 case BINDER_SET_CONTEXT_MGR_EXT: {
5112 struct flat_binder_object fbo;
5114 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5118 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5123 case BINDER_SET_CONTEXT_MGR:
5124 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5128 case BINDER_THREAD_EXIT:
5129 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5130 proc->pid, thread->pid);
5131 binder_thread_release(proc, thread);
5134 case BINDER_VERSION: {
5135 struct binder_version __user *ver = ubuf;
5137 if (size != sizeof(struct binder_version)) {
5141 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5142 &ver->protocol_version)) {
5148 case BINDER_GET_NODE_INFO_FOR_REF: {
5149 struct binder_node_info_for_ref info;
5151 if (copy_from_user(&info, ubuf, sizeof(info))) {
5156 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5160 if (copy_to_user(ubuf, &info, sizeof(info))) {
5167 case BINDER_GET_NODE_DEBUG_INFO: {
5168 struct binder_node_debug_info info;
5170 if (copy_from_user(&info, ubuf, sizeof(info))) {
5175 ret = binder_ioctl_get_node_debug_info(proc, &info);
5179 if (copy_to_user(ubuf, &info, sizeof(info))) {
5185 case BINDER_FREEZE: {
5186 struct binder_freeze_info info;
5187 struct binder_proc **target_procs = NULL, *target_proc;
5188 int target_procs_count = 0, i = 0;
5192 if (copy_from_user(&info, ubuf, sizeof(info))) {
5197 mutex_lock(&binder_procs_lock);
5198 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5199 if (target_proc->pid == info.pid)
5200 target_procs_count++;
5203 if (target_procs_count == 0) {
5204 mutex_unlock(&binder_procs_lock);
5209 target_procs = kcalloc(target_procs_count,
5210 sizeof(struct binder_proc *),
5213 if (!target_procs) {
5214 mutex_unlock(&binder_procs_lock);
5219 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5220 if (target_proc->pid != info.pid)
5223 binder_inner_proc_lock(target_proc);
5224 target_proc->tmp_ref++;
5225 binder_inner_proc_unlock(target_proc);
5227 target_procs[i++] = target_proc;
5229 mutex_unlock(&binder_procs_lock);
5231 for (i = 0; i < target_procs_count; i++) {
5233 ret = binder_ioctl_freeze(&info,
5236 binder_proc_dec_tmpref(target_procs[i]);
5239 kfree(target_procs);
5245 case BINDER_GET_FROZEN_INFO: {
5246 struct binder_frozen_status_info info;
5248 if (copy_from_user(&info, ubuf, sizeof(info))) {
5253 ret = binder_ioctl_get_freezer_info(&info);
5257 if (copy_to_user(ubuf, &info, sizeof(info))) {
5263 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5266 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5270 binder_inner_proc_lock(proc);
5271 proc->oneway_spam_detection_enabled = (bool)enable;
5272 binder_inner_proc_unlock(proc);
5282 thread->looper_need_return = false;
5283 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5284 if (ret && ret != -EINTR)
5285 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5287 trace_binder_ioctl_done(ret);
5291 static void binder_vma_open(struct vm_area_struct *vma)
5293 struct binder_proc *proc = vma->vm_private_data;
5295 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5296 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5297 proc->pid, vma->vm_start, vma->vm_end,
5298 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5299 (unsigned long)pgprot_val(vma->vm_page_prot));
5302 static void binder_vma_close(struct vm_area_struct *vma)
5304 struct binder_proc *proc = vma->vm_private_data;
5306 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5307 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5308 proc->pid, vma->vm_start, vma->vm_end,
5309 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5310 (unsigned long)pgprot_val(vma->vm_page_prot));
5311 binder_alloc_vma_close(&proc->alloc);
5314 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5316 return VM_FAULT_SIGBUS;
5319 static const struct vm_operations_struct binder_vm_ops = {
5320 .open = binder_vma_open,
5321 .close = binder_vma_close,
5322 .fault = binder_vm_fault,
5325 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5327 struct binder_proc *proc = filp->private_data;
5329 if (proc->tsk != current->group_leader)
5332 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5333 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5334 __func__, proc->pid, vma->vm_start, vma->vm_end,
5335 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5336 (unsigned long)pgprot_val(vma->vm_page_prot));
5338 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5339 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5340 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5343 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5344 vma->vm_flags &= ~VM_MAYWRITE;
5346 vma->vm_ops = &binder_vm_ops;
5347 vma->vm_private_data = proc;
5349 return binder_alloc_mmap_handler(&proc->alloc, vma);
5352 static int binder_open(struct inode *nodp, struct file *filp)
5354 struct binder_proc *proc, *itr;
5355 struct binder_device *binder_dev;
5356 struct binderfs_info *info;
5357 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5358 bool existing_pid = false;
5360 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5361 current->group_leader->pid, current->pid);
5363 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5366 spin_lock_init(&proc->inner_lock);
5367 spin_lock_init(&proc->outer_lock);
5368 get_task_struct(current->group_leader);
5369 proc->tsk = current->group_leader;
5370 proc->cred = get_cred(filp->f_cred);
5371 INIT_LIST_HEAD(&proc->todo);
5372 init_waitqueue_head(&proc->freeze_wait);
5373 proc->default_priority = task_nice(current);
5374 /* binderfs stashes devices in i_private */
5375 if (is_binderfs_device(nodp)) {
5376 binder_dev = nodp->i_private;
5377 info = nodp->i_sb->s_fs_info;
5378 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5380 binder_dev = container_of(filp->private_data,
5381 struct binder_device, miscdev);
5383 refcount_inc(&binder_dev->ref);
5384 proc->context = &binder_dev->context;
5385 binder_alloc_init(&proc->alloc);
5387 binder_stats_created(BINDER_STAT_PROC);
5388 proc->pid = current->group_leader->pid;
5389 INIT_LIST_HEAD(&proc->delivered_death);
5390 INIT_LIST_HEAD(&proc->waiting_threads);
5391 filp->private_data = proc;
5393 mutex_lock(&binder_procs_lock);
5394 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5395 if (itr->pid == proc->pid) {
5396 existing_pid = true;
5400 hlist_add_head(&proc->proc_node, &binder_procs);
5401 mutex_unlock(&binder_procs_lock);
5403 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5406 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5408 * proc debug entries are shared between contexts.
5409 * Only create for the first PID to avoid debugfs log spamming
5410 * The printing code will anyway print all contexts for a given
5411 * PID so this is not a problem.
5413 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5414 binder_debugfs_dir_entry_proc,
5415 (void *)(unsigned long)proc->pid,
5419 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5421 struct dentry *binderfs_entry;
5423 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5425 * Similar to debugfs, the process specific log file is shared
5426 * between contexts. Only create for the first PID.
5427 * This is ok since same as debugfs, the log file will contain
5428 * information on all contexts of a given PID.
5430 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5431 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5432 if (!IS_ERR(binderfs_entry)) {
5433 proc->binderfs_entry = binderfs_entry;
5437 error = PTR_ERR(binderfs_entry);
5438 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5446 static int binder_flush(struct file *filp, fl_owner_t id)
5448 struct binder_proc *proc = filp->private_data;
5450 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5455 static void binder_deferred_flush(struct binder_proc *proc)
5460 binder_inner_proc_lock(proc);
5461 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5462 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5464 thread->looper_need_return = true;
5465 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5466 wake_up_interruptible(&thread->wait);
5470 binder_inner_proc_unlock(proc);
5472 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5473 "binder_flush: %d woke %d threads\n", proc->pid,
5477 static int binder_release(struct inode *nodp, struct file *filp)
5479 struct binder_proc *proc = filp->private_data;
5481 debugfs_remove(proc->debugfs_entry);
5483 if (proc->binderfs_entry) {
5484 binderfs_remove_file(proc->binderfs_entry);
5485 proc->binderfs_entry = NULL;
5488 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5493 static int binder_node_release(struct binder_node *node, int refs)
5495 struct binder_ref *ref;
5497 struct binder_proc *proc = node->proc;
5499 binder_release_work(proc, &node->async_todo);
5501 binder_node_lock(node);
5502 binder_inner_proc_lock(proc);
5503 binder_dequeue_work_ilocked(&node->work);
5505 * The caller must have taken a temporary ref on the node,
5507 BUG_ON(!node->tmp_refs);
5508 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5509 binder_inner_proc_unlock(proc);
5510 binder_node_unlock(node);
5511 binder_free_node(node);
5517 node->local_strong_refs = 0;
5518 node->local_weak_refs = 0;
5519 binder_inner_proc_unlock(proc);
5521 spin_lock(&binder_dead_nodes_lock);
5522 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5523 spin_unlock(&binder_dead_nodes_lock);
5525 hlist_for_each_entry(ref, &node->refs, node_entry) {
5528 * Need the node lock to synchronize
5529 * with new notification requests and the
5530 * inner lock to synchronize with queued
5531 * death notifications.
5533 binder_inner_proc_lock(ref->proc);
5535 binder_inner_proc_unlock(ref->proc);
5541 BUG_ON(!list_empty(&ref->death->work.entry));
5542 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5543 binder_enqueue_work_ilocked(&ref->death->work,
5545 binder_wakeup_proc_ilocked(ref->proc);
5546 binder_inner_proc_unlock(ref->proc);
5549 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5550 "node %d now dead, refs %d, death %d\n",
5551 node->debug_id, refs, death);
5552 binder_node_unlock(node);
5553 binder_put_node(node);
5558 static void binder_deferred_release(struct binder_proc *proc)
5560 struct binder_context *context = proc->context;
5562 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5564 mutex_lock(&binder_procs_lock);
5565 hlist_del(&proc->proc_node);
5566 mutex_unlock(&binder_procs_lock);
5568 mutex_lock(&context->context_mgr_node_lock);
5569 if (context->binder_context_mgr_node &&
5570 context->binder_context_mgr_node->proc == proc) {
5571 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5572 "%s: %d context_mgr_node gone\n",
5573 __func__, proc->pid);
5574 context->binder_context_mgr_node = NULL;
5576 mutex_unlock(&context->context_mgr_node_lock);
5577 binder_inner_proc_lock(proc);
5579 * Make sure proc stays alive after we
5580 * remove all the threads
5584 proc->is_dead = true;
5585 proc->is_frozen = false;
5586 proc->sync_recv = false;
5587 proc->async_recv = false;
5589 active_transactions = 0;
5590 while ((n = rb_first(&proc->threads))) {
5591 struct binder_thread *thread;
5593 thread = rb_entry(n, struct binder_thread, rb_node);
5594 binder_inner_proc_unlock(proc);
5596 active_transactions += binder_thread_release(proc, thread);
5597 binder_inner_proc_lock(proc);
5602 while ((n = rb_first(&proc->nodes))) {
5603 struct binder_node *node;
5605 node = rb_entry(n, struct binder_node, rb_node);
5608 * take a temporary ref on the node before
5609 * calling binder_node_release() which will either
5610 * kfree() the node or call binder_put_node()
5612 binder_inc_node_tmpref_ilocked(node);
5613 rb_erase(&node->rb_node, &proc->nodes);
5614 binder_inner_proc_unlock(proc);
5615 incoming_refs = binder_node_release(node, incoming_refs);
5616 binder_inner_proc_lock(proc);
5618 binder_inner_proc_unlock(proc);
5621 binder_proc_lock(proc);
5622 while ((n = rb_first(&proc->refs_by_desc))) {
5623 struct binder_ref *ref;
5625 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5627 binder_cleanup_ref_olocked(ref);
5628 binder_proc_unlock(proc);
5629 binder_free_ref(ref);
5630 binder_proc_lock(proc);
5632 binder_proc_unlock(proc);
5634 binder_release_work(proc, &proc->todo);
5635 binder_release_work(proc, &proc->delivered_death);
5637 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5638 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5639 __func__, proc->pid, threads, nodes, incoming_refs,
5640 outgoing_refs, active_transactions);
5642 binder_proc_dec_tmpref(proc);
5645 static void binder_deferred_func(struct work_struct *work)
5647 struct binder_proc *proc;
5652 mutex_lock(&binder_deferred_lock);
5653 if (!hlist_empty(&binder_deferred_list)) {
5654 proc = hlist_entry(binder_deferred_list.first,
5655 struct binder_proc, deferred_work_node);
5656 hlist_del_init(&proc->deferred_work_node);
5657 defer = proc->deferred_work;
5658 proc->deferred_work = 0;
5663 mutex_unlock(&binder_deferred_lock);
5665 if (defer & BINDER_DEFERRED_FLUSH)
5666 binder_deferred_flush(proc);
5668 if (defer & BINDER_DEFERRED_RELEASE)
5669 binder_deferred_release(proc); /* frees proc */
5672 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5675 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5677 mutex_lock(&binder_deferred_lock);
5678 proc->deferred_work |= defer;
5679 if (hlist_unhashed(&proc->deferred_work_node)) {
5680 hlist_add_head(&proc->deferred_work_node,
5681 &binder_deferred_list);
5682 schedule_work(&binder_deferred_work);
5684 mutex_unlock(&binder_deferred_lock);
5687 static void print_binder_transaction_ilocked(struct seq_file *m,
5688 struct binder_proc *proc,
5690 struct binder_transaction *t)
5692 struct binder_proc *to_proc;
5693 struct binder_buffer *buffer = t->buffer;
5695 spin_lock(&t->lock);
5696 to_proc = t->to_proc;
5698 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5699 prefix, t->debug_id, t,
5700 t->from ? t->from->proc->pid : 0,
5701 t->from ? t->from->pid : 0,
5702 to_proc ? to_proc->pid : 0,
5703 t->to_thread ? t->to_thread->pid : 0,
5704 t->code, t->flags, t->priority, t->need_reply);
5705 spin_unlock(&t->lock);
5707 if (proc != to_proc) {
5709 * Can only safely deref buffer if we are holding the
5710 * correct proc inner lock for this node
5716 if (buffer == NULL) {
5717 seq_puts(m, " buffer free\n");
5720 if (buffer->target_node)
5721 seq_printf(m, " node %d", buffer->target_node->debug_id);
5722 seq_printf(m, " size %zd:%zd data %pK\n",
5723 buffer->data_size, buffer->offsets_size,
5727 static void print_binder_work_ilocked(struct seq_file *m,
5728 struct binder_proc *proc,
5730 const char *transaction_prefix,
5731 struct binder_work *w)
5733 struct binder_node *node;
5734 struct binder_transaction *t;
5737 case BINDER_WORK_TRANSACTION:
5738 t = container_of(w, struct binder_transaction, work);
5739 print_binder_transaction_ilocked(
5740 m, proc, transaction_prefix, t);
5742 case BINDER_WORK_RETURN_ERROR: {
5743 struct binder_error *e = container_of(
5744 w, struct binder_error, work);
5746 seq_printf(m, "%stransaction error: %u\n",
5749 case BINDER_WORK_TRANSACTION_COMPLETE:
5750 seq_printf(m, "%stransaction complete\n", prefix);
5752 case BINDER_WORK_NODE:
5753 node = container_of(w, struct binder_node, work);
5754 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5755 prefix, node->debug_id,
5756 (u64)node->ptr, (u64)node->cookie);
5758 case BINDER_WORK_DEAD_BINDER:
5759 seq_printf(m, "%shas dead binder\n", prefix);
5761 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5762 seq_printf(m, "%shas cleared dead binder\n", prefix);
5764 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5765 seq_printf(m, "%shas cleared death notification\n", prefix);
5768 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5773 static void print_binder_thread_ilocked(struct seq_file *m,
5774 struct binder_thread *thread,
5777 struct binder_transaction *t;
5778 struct binder_work *w;
5779 size_t start_pos = m->count;
5782 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5783 thread->pid, thread->looper,
5784 thread->looper_need_return,
5785 atomic_read(&thread->tmp_ref));
5786 header_pos = m->count;
5787 t = thread->transaction_stack;
5789 if (t->from == thread) {
5790 print_binder_transaction_ilocked(m, thread->proc,
5791 " outgoing transaction", t);
5793 } else if (t->to_thread == thread) {
5794 print_binder_transaction_ilocked(m, thread->proc,
5795 " incoming transaction", t);
5798 print_binder_transaction_ilocked(m, thread->proc,
5799 " bad transaction", t);
5803 list_for_each_entry(w, &thread->todo, entry) {
5804 print_binder_work_ilocked(m, thread->proc, " ",
5805 " pending transaction", w);
5807 if (!print_always && m->count == header_pos)
5808 m->count = start_pos;
5811 static void print_binder_node_nilocked(struct seq_file *m,
5812 struct binder_node *node)
5814 struct binder_ref *ref;
5815 struct binder_work *w;
5819 hlist_for_each_entry(ref, &node->refs, node_entry)
5822 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5823 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5824 node->has_strong_ref, node->has_weak_ref,
5825 node->local_strong_refs, node->local_weak_refs,
5826 node->internal_strong_refs, count, node->tmp_refs);
5828 seq_puts(m, " proc");
5829 hlist_for_each_entry(ref, &node->refs, node_entry)
5830 seq_printf(m, " %d", ref->proc->pid);
5834 list_for_each_entry(w, &node->async_todo, entry)
5835 print_binder_work_ilocked(m, node->proc, " ",
5836 " pending async transaction", w);
5840 static void print_binder_ref_olocked(struct seq_file *m,
5841 struct binder_ref *ref)
5843 binder_node_lock(ref->node);
5844 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5845 ref->data.debug_id, ref->data.desc,
5846 ref->node->proc ? "" : "dead ",
5847 ref->node->debug_id, ref->data.strong,
5848 ref->data.weak, ref->death);
5849 binder_node_unlock(ref->node);
5852 static void print_binder_proc(struct seq_file *m,
5853 struct binder_proc *proc, int print_all)
5855 struct binder_work *w;
5857 size_t start_pos = m->count;
5859 struct binder_node *last_node = NULL;
5861 seq_printf(m, "proc %d\n", proc->pid);
5862 seq_printf(m, "context %s\n", proc->context->name);
5863 header_pos = m->count;
5865 binder_inner_proc_lock(proc);
5866 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5867 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5868 rb_node), print_all);
5870 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5871 struct binder_node *node = rb_entry(n, struct binder_node,
5873 if (!print_all && !node->has_async_transaction)
5877 * take a temporary reference on the node so it
5878 * survives and isn't removed from the tree
5879 * while we print it.
5881 binder_inc_node_tmpref_ilocked(node);
5882 /* Need to drop inner lock to take node lock */
5883 binder_inner_proc_unlock(proc);
5885 binder_put_node(last_node);
5886 binder_node_inner_lock(node);
5887 print_binder_node_nilocked(m, node);
5888 binder_node_inner_unlock(node);
5890 binder_inner_proc_lock(proc);
5892 binder_inner_proc_unlock(proc);
5894 binder_put_node(last_node);
5897 binder_proc_lock(proc);
5898 for (n = rb_first(&proc->refs_by_desc);
5901 print_binder_ref_olocked(m, rb_entry(n,
5904 binder_proc_unlock(proc);
5906 binder_alloc_print_allocated(m, &proc->alloc);
5907 binder_inner_proc_lock(proc);
5908 list_for_each_entry(w, &proc->todo, entry)
5909 print_binder_work_ilocked(m, proc, " ",
5910 " pending transaction", w);
5911 list_for_each_entry(w, &proc->delivered_death, entry) {
5912 seq_puts(m, " has delivered dead binder\n");
5915 binder_inner_proc_unlock(proc);
5916 if (!print_all && m->count == header_pos)
5917 m->count = start_pos;
5920 static const char * const binder_return_strings[] = {
5925 "BR_ACQUIRE_RESULT",
5927 "BR_TRANSACTION_COMPLETE",
5932 "BR_ATTEMPT_ACQUIRE",
5937 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5940 "BR_ONEWAY_SPAM_SUSPECT",
5943 static const char * const binder_command_strings[] = {
5946 "BC_ACQUIRE_RESULT",
5954 "BC_ATTEMPT_ACQUIRE",
5955 "BC_REGISTER_LOOPER",
5958 "BC_REQUEST_DEATH_NOTIFICATION",
5959 "BC_CLEAR_DEATH_NOTIFICATION",
5960 "BC_DEAD_BINDER_DONE",
5961 "BC_TRANSACTION_SG",
5965 static const char * const binder_objstat_strings[] = {
5972 "transaction_complete"
5975 static void print_binder_stats(struct seq_file *m, const char *prefix,
5976 struct binder_stats *stats)
5980 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5981 ARRAY_SIZE(binder_command_strings));
5982 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5983 int temp = atomic_read(&stats->bc[i]);
5986 seq_printf(m, "%s%s: %d\n", prefix,
5987 binder_command_strings[i], temp);
5990 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5991 ARRAY_SIZE(binder_return_strings));
5992 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5993 int temp = atomic_read(&stats->br[i]);
5996 seq_printf(m, "%s%s: %d\n", prefix,
5997 binder_return_strings[i], temp);
6000 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6001 ARRAY_SIZE(binder_objstat_strings));
6002 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6003 ARRAY_SIZE(stats->obj_deleted));
6004 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6005 int created = atomic_read(&stats->obj_created[i]);
6006 int deleted = atomic_read(&stats->obj_deleted[i]);
6008 if (created || deleted)
6009 seq_printf(m, "%s%s: active %d total %d\n",
6011 binder_objstat_strings[i],
6017 static void print_binder_proc_stats(struct seq_file *m,
6018 struct binder_proc *proc)
6020 struct binder_work *w;
6021 struct binder_thread *thread;
6023 int count, strong, weak, ready_threads;
6024 size_t free_async_space =
6025 binder_alloc_get_free_async_space(&proc->alloc);
6027 seq_printf(m, "proc %d\n", proc->pid);
6028 seq_printf(m, "context %s\n", proc->context->name);
6031 binder_inner_proc_lock(proc);
6032 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6035 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6038 seq_printf(m, " threads: %d\n", count);
6039 seq_printf(m, " requested threads: %d+%d/%d\n"
6040 " ready threads %d\n"
6041 " free async space %zd\n", proc->requested_threads,
6042 proc->requested_threads_started, proc->max_threads,
6046 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6048 binder_inner_proc_unlock(proc);
6049 seq_printf(m, " nodes: %d\n", count);
6053 binder_proc_lock(proc);
6054 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6055 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6058 strong += ref->data.strong;
6059 weak += ref->data.weak;
6061 binder_proc_unlock(proc);
6062 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6064 count = binder_alloc_get_allocated_count(&proc->alloc);
6065 seq_printf(m, " buffers: %d\n", count);
6067 binder_alloc_print_pages(m, &proc->alloc);
6070 binder_inner_proc_lock(proc);
6071 list_for_each_entry(w, &proc->todo, entry) {
6072 if (w->type == BINDER_WORK_TRANSACTION)
6075 binder_inner_proc_unlock(proc);
6076 seq_printf(m, " pending transactions: %d\n", count);
6078 print_binder_stats(m, " ", &proc->stats);
6082 int binder_state_show(struct seq_file *m, void *unused)
6084 struct binder_proc *proc;
6085 struct binder_node *node;
6086 struct binder_node *last_node = NULL;
6088 seq_puts(m, "binder state:\n");
6090 spin_lock(&binder_dead_nodes_lock);
6091 if (!hlist_empty(&binder_dead_nodes))
6092 seq_puts(m, "dead nodes:\n");
6093 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6095 * take a temporary reference on the node so it
6096 * survives and isn't removed from the list
6097 * while we print it.
6100 spin_unlock(&binder_dead_nodes_lock);
6102 binder_put_node(last_node);
6103 binder_node_lock(node);
6104 print_binder_node_nilocked(m, node);
6105 binder_node_unlock(node);
6107 spin_lock(&binder_dead_nodes_lock);
6109 spin_unlock(&binder_dead_nodes_lock);
6111 binder_put_node(last_node);
6113 mutex_lock(&binder_procs_lock);
6114 hlist_for_each_entry(proc, &binder_procs, proc_node)
6115 print_binder_proc(m, proc, 1);
6116 mutex_unlock(&binder_procs_lock);
6121 int binder_stats_show(struct seq_file *m, void *unused)
6123 struct binder_proc *proc;
6125 seq_puts(m, "binder stats:\n");
6127 print_binder_stats(m, "", &binder_stats);
6129 mutex_lock(&binder_procs_lock);
6130 hlist_for_each_entry(proc, &binder_procs, proc_node)
6131 print_binder_proc_stats(m, proc);
6132 mutex_unlock(&binder_procs_lock);
6137 int binder_transactions_show(struct seq_file *m, void *unused)
6139 struct binder_proc *proc;
6141 seq_puts(m, "binder transactions:\n");
6142 mutex_lock(&binder_procs_lock);
6143 hlist_for_each_entry(proc, &binder_procs, proc_node)
6144 print_binder_proc(m, proc, 0);
6145 mutex_unlock(&binder_procs_lock);
6150 static int proc_show(struct seq_file *m, void *unused)
6152 struct binder_proc *itr;
6153 int pid = (unsigned long)m->private;
6155 mutex_lock(&binder_procs_lock);
6156 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6157 if (itr->pid == pid) {
6158 seq_puts(m, "binder proc state:\n");
6159 print_binder_proc(m, itr, 1);
6162 mutex_unlock(&binder_procs_lock);
6167 static void print_binder_transaction_log_entry(struct seq_file *m,
6168 struct binder_transaction_log_entry *e)
6170 int debug_id = READ_ONCE(e->debug_id_done);
6172 * read barrier to guarantee debug_id_done read before
6173 * we print the log values
6177 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6178 e->debug_id, (e->call_type == 2) ? "reply" :
6179 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6180 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6181 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6182 e->return_error, e->return_error_param,
6183 e->return_error_line);
6185 * read-barrier to guarantee read of debug_id_done after
6186 * done printing the fields of the entry
6189 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6190 "\n" : " (incomplete)\n");
6193 int binder_transaction_log_show(struct seq_file *m, void *unused)
6195 struct binder_transaction_log *log = m->private;
6196 unsigned int log_cur = atomic_read(&log->cur);
6201 count = log_cur + 1;
6202 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6203 0 : count % ARRAY_SIZE(log->entry);
6204 if (count > ARRAY_SIZE(log->entry) || log->full)
6205 count = ARRAY_SIZE(log->entry);
6206 for (i = 0; i < count; i++) {
6207 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6209 print_binder_transaction_log_entry(m, &log->entry[index]);
6214 const struct file_operations binder_fops = {
6215 .owner = THIS_MODULE,
6216 .poll = binder_poll,
6217 .unlocked_ioctl = binder_ioctl,
6218 .compat_ioctl = compat_ptr_ioctl,
6219 .mmap = binder_mmap,
6220 .open = binder_open,
6221 .flush = binder_flush,
6222 .release = binder_release,
6225 static int __init init_binder_device(const char *name)
6228 struct binder_device *binder_device;
6230 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6234 binder_device->miscdev.fops = &binder_fops;
6235 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6236 binder_device->miscdev.name = name;
6238 refcount_set(&binder_device->ref, 1);
6239 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6240 binder_device->context.name = name;
6241 mutex_init(&binder_device->context.context_mgr_node_lock);
6243 ret = misc_register(&binder_device->miscdev);
6245 kfree(binder_device);
6249 hlist_add_head(&binder_device->hlist, &binder_devices);
6254 static int __init binder_init(void)
6257 char *device_name, *device_tmp;
6258 struct binder_device *device;
6259 struct hlist_node *tmp;
6260 char *device_names = NULL;
6262 ret = binder_alloc_shrinker_init();
6266 atomic_set(&binder_transaction_log.cur, ~0U);
6267 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6269 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6270 if (binder_debugfs_dir_entry_root)
6271 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6272 binder_debugfs_dir_entry_root);
6274 if (binder_debugfs_dir_entry_root) {
6275 debugfs_create_file("state",
6277 binder_debugfs_dir_entry_root,
6279 &binder_state_fops);
6280 debugfs_create_file("stats",
6282 binder_debugfs_dir_entry_root,
6284 &binder_stats_fops);
6285 debugfs_create_file("transactions",
6287 binder_debugfs_dir_entry_root,
6289 &binder_transactions_fops);
6290 debugfs_create_file("transaction_log",
6292 binder_debugfs_dir_entry_root,
6293 &binder_transaction_log,
6294 &binder_transaction_log_fops);
6295 debugfs_create_file("failed_transaction_log",
6297 binder_debugfs_dir_entry_root,
6298 &binder_transaction_log_failed,
6299 &binder_transaction_log_fops);
6302 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6303 strcmp(binder_devices_param, "") != 0) {
6305 * Copy the module_parameter string, because we don't want to
6306 * tokenize it in-place.
6308 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6309 if (!device_names) {
6311 goto err_alloc_device_names_failed;
6314 device_tmp = device_names;
6315 while ((device_name = strsep(&device_tmp, ","))) {
6316 ret = init_binder_device(device_name);
6318 goto err_init_binder_device_failed;
6322 ret = init_binderfs();
6324 goto err_init_binder_device_failed;
6328 err_init_binder_device_failed:
6329 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6330 misc_deregister(&device->miscdev);
6331 hlist_del(&device->hlist);
6335 kfree(device_names);
6337 err_alloc_device_names_failed:
6338 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6343 device_initcall(binder_init);
6345 #define CREATE_TRACE_POINTS
6346 #include "binder_trace.h"
6348 MODULE_LICENSE("GPL v2");