1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
70 #include <uapi/linux/android/binder.h>
72 #include <linux/cacheflush.h>
74 #include "binder_internal.h"
75 #include "binder_trace.h"
77 static HLIST_HEAD(binder_deferred_list);
78 static DEFINE_MUTEX(binder_deferred_lock);
80 static HLIST_HEAD(binder_devices);
81 static HLIST_HEAD(binder_procs);
82 static DEFINE_MUTEX(binder_procs_lock);
84 static HLIST_HEAD(binder_dead_nodes);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87 static struct dentry *binder_debugfs_dir_entry_root;
88 static struct dentry *binder_debugfs_dir_entry_proc;
89 static atomic_t binder_last_id;
91 static int proc_show(struct seq_file *m, void *unused);
92 DEFINE_SHOW_ATTRIBUTE(proc);
94 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
97 BINDER_DEBUG_USER_ERROR = 1U << 0,
98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
101 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
103 BINDER_DEBUG_READ_WRITE = 1U << 6,
104 BINDER_DEBUG_USER_REFS = 1U << 7,
105 BINDER_DEBUG_THREADS = 1U << 8,
106 BINDER_DEBUG_TRANSACTION = 1U << 9,
107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
108 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
111 BINDER_DEBUG_SPINLOCKS = 1U << 14,
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 module_param_named(devices, binder_devices_param, charp, 0444);
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
123 static int binder_set_stop_on_user_error(const char *val,
124 const struct kernel_param *kp)
128 ret = param_set_int(val, kp);
129 if (binder_stop_on_user_error < 2)
130 wake_up(&binder_user_error_wait);
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 param_get_int, &binder_stop_on_user_error, 0644);
136 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138 struct va_format vaf;
141 if (binder_debug_mask & mask) {
142 va_start(args, format);
145 pr_info_ratelimited("%pV", &vaf);
150 static __printf(1, 2) void binder_user_error(const char *format, ...)
152 struct va_format vaf;
155 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
156 va_start(args, format);
159 pr_info_ratelimited("%pV", &vaf);
163 if (binder_stop_on_user_error)
164 binder_stop_on_user_error = 2;
167 #define binder_set_extended_error(ee, _id, _command, _param) \
170 (ee)->command = _command; \
171 (ee)->param = _param; \
174 #define to_flat_binder_object(hdr) \
175 container_of(hdr, struct flat_binder_object, hdr)
177 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
179 #define to_binder_buffer_object(hdr) \
180 container_of(hdr, struct binder_buffer_object, hdr)
182 #define to_binder_fd_array_object(hdr) \
183 container_of(hdr, struct binder_fd_array_object, hdr)
185 static struct binder_stats binder_stats;
187 static inline void binder_stats_deleted(enum binder_stat_types type)
189 atomic_inc(&binder_stats.obj_deleted[type]);
192 static inline void binder_stats_created(enum binder_stat_types type)
194 atomic_inc(&binder_stats.obj_created[type]);
197 struct binder_transaction_log binder_transaction_log;
198 struct binder_transaction_log binder_transaction_log_failed;
200 static struct binder_transaction_log_entry *binder_transaction_log_add(
201 struct binder_transaction_log *log)
203 struct binder_transaction_log_entry *e;
204 unsigned int cur = atomic_inc_return(&log->cur);
206 if (cur >= ARRAY_SIZE(log->entry))
208 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
209 WRITE_ONCE(e->debug_id_done, 0);
211 * write-barrier to synchronize access to e->debug_id_done.
212 * We make sure the initialized 0 value is seen before
213 * memset() other fields are zeroed by memset.
216 memset(e, 0, sizeof(*e));
220 enum binder_deferred_state {
221 BINDER_DEFERRED_FLUSH = 0x01,
222 BINDER_DEFERRED_RELEASE = 0x02,
226 BINDER_LOOPER_STATE_REGISTERED = 0x01,
227 BINDER_LOOPER_STATE_ENTERED = 0x02,
228 BINDER_LOOPER_STATE_EXITED = 0x04,
229 BINDER_LOOPER_STATE_INVALID = 0x08,
230 BINDER_LOOPER_STATE_WAITING = 0x10,
231 BINDER_LOOPER_STATE_POLL = 0x20,
235 * binder_proc_lock() - Acquire outer lock for given binder_proc
236 * @proc: struct binder_proc to acquire
238 * Acquires proc->outer_lock. Used to protect binder_ref
239 * structures associated with the given proc.
241 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
243 _binder_proc_lock(struct binder_proc *proc, int line)
244 __acquires(&proc->outer_lock)
246 binder_debug(BINDER_DEBUG_SPINLOCKS,
247 "%s: line=%d\n", __func__, line);
248 spin_lock(&proc->outer_lock);
252 * binder_proc_unlock() - Release spinlock for given binder_proc
253 * @proc: struct binder_proc to acquire
255 * Release lock acquired via binder_proc_lock()
257 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
259 _binder_proc_unlock(struct binder_proc *proc, int line)
260 __releases(&proc->outer_lock)
262 binder_debug(BINDER_DEBUG_SPINLOCKS,
263 "%s: line=%d\n", __func__, line);
264 spin_unlock(&proc->outer_lock);
268 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
269 * @proc: struct binder_proc to acquire
271 * Acquires proc->inner_lock. Used to protect todo lists
273 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
275 _binder_inner_proc_lock(struct binder_proc *proc, int line)
276 __acquires(&proc->inner_lock)
278 binder_debug(BINDER_DEBUG_SPINLOCKS,
279 "%s: line=%d\n", __func__, line);
280 spin_lock(&proc->inner_lock);
284 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
285 * @proc: struct binder_proc to acquire
287 * Release lock acquired via binder_inner_proc_lock()
289 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
291 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
292 __releases(&proc->inner_lock)
294 binder_debug(BINDER_DEBUG_SPINLOCKS,
295 "%s: line=%d\n", __func__, line);
296 spin_unlock(&proc->inner_lock);
300 * binder_node_lock() - Acquire spinlock for given binder_node
301 * @node: struct binder_node to acquire
303 * Acquires node->lock. Used to protect binder_node fields
305 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
307 _binder_node_lock(struct binder_node *node, int line)
308 __acquires(&node->lock)
310 binder_debug(BINDER_DEBUG_SPINLOCKS,
311 "%s: line=%d\n", __func__, line);
312 spin_lock(&node->lock);
316 * binder_node_unlock() - Release spinlock for given binder_proc
317 * @node: struct binder_node to acquire
319 * Release lock acquired via binder_node_lock()
321 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
323 _binder_node_unlock(struct binder_node *node, int line)
324 __releases(&node->lock)
326 binder_debug(BINDER_DEBUG_SPINLOCKS,
327 "%s: line=%d\n", __func__, line);
328 spin_unlock(&node->lock);
332 * binder_node_inner_lock() - Acquire node and inner locks
333 * @node: struct binder_node to acquire
335 * Acquires node->lock. If node->proc also acquires
336 * proc->inner_lock. Used to protect binder_node fields
338 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
340 _binder_node_inner_lock(struct binder_node *node, int line)
341 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
343 binder_debug(BINDER_DEBUG_SPINLOCKS,
344 "%s: line=%d\n", __func__, line);
345 spin_lock(&node->lock);
347 binder_inner_proc_lock(node->proc);
349 /* annotation for sparse */
350 __acquire(&node->proc->inner_lock);
354 * binder_node_unlock() - Release node and inner locks
355 * @node: struct binder_node to acquire
357 * Release lock acquired via binder_node_lock()
359 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
361 _binder_node_inner_unlock(struct binder_node *node, int line)
362 __releases(&node->lock) __releases(&node->proc->inner_lock)
364 struct binder_proc *proc = node->proc;
366 binder_debug(BINDER_DEBUG_SPINLOCKS,
367 "%s: line=%d\n", __func__, line);
369 binder_inner_proc_unlock(proc);
371 /* annotation for sparse */
372 __release(&node->proc->inner_lock);
373 spin_unlock(&node->lock);
376 static bool binder_worklist_empty_ilocked(struct list_head *list)
378 return list_empty(list);
382 * binder_worklist_empty() - Check if no items on the work list
383 * @proc: binder_proc associated with list
384 * @list: list to check
386 * Return: true if there are no items on list, else false
388 static bool binder_worklist_empty(struct binder_proc *proc,
389 struct list_head *list)
393 binder_inner_proc_lock(proc);
394 ret = binder_worklist_empty_ilocked(list);
395 binder_inner_proc_unlock(proc);
400 * binder_enqueue_work_ilocked() - Add an item to the work list
401 * @work: struct binder_work to add to list
402 * @target_list: list to add work to
404 * Adds the work to the specified list. Asserts that work
405 * is not already on a list.
407 * Requires the proc->inner_lock to be held.
410 binder_enqueue_work_ilocked(struct binder_work *work,
411 struct list_head *target_list)
413 BUG_ON(target_list == NULL);
414 BUG_ON(work->entry.next && !list_empty(&work->entry));
415 list_add_tail(&work->entry, target_list);
419 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
420 * @thread: thread to queue work to
421 * @work: struct binder_work to add to list
423 * Adds the work to the todo list of the thread. Doesn't set the process_todo
424 * flag, which means that (if it wasn't already set) the thread will go to
425 * sleep without handling this work when it calls read.
427 * Requires the proc->inner_lock to be held.
430 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
431 struct binder_work *work)
433 WARN_ON(!list_empty(&thread->waiting_thread_node));
434 binder_enqueue_work_ilocked(work, &thread->todo);
438 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
439 * @thread: thread to queue work to
440 * @work: struct binder_work to add to list
442 * Adds the work to the todo list of the thread, and enables processing
445 * Requires the proc->inner_lock to be held.
448 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
449 struct binder_work *work)
451 WARN_ON(!list_empty(&thread->waiting_thread_node));
452 binder_enqueue_work_ilocked(work, &thread->todo);
453 thread->process_todo = true;
457 * binder_enqueue_thread_work() - Add an item to the thread work list
458 * @thread: thread to queue work to
459 * @work: struct binder_work to add to list
461 * Adds the work to the todo list of the thread, and enables processing
465 binder_enqueue_thread_work(struct binder_thread *thread,
466 struct binder_work *work)
468 binder_inner_proc_lock(thread->proc);
469 binder_enqueue_thread_work_ilocked(thread, work);
470 binder_inner_proc_unlock(thread->proc);
474 binder_dequeue_work_ilocked(struct binder_work *work)
476 list_del_init(&work->entry);
480 * binder_dequeue_work() - Removes an item from the work list
481 * @proc: binder_proc associated with list
482 * @work: struct binder_work to remove from list
484 * Removes the specified work item from whatever list it is on.
485 * Can safely be called if work is not on any list.
488 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
490 binder_inner_proc_lock(proc);
491 binder_dequeue_work_ilocked(work);
492 binder_inner_proc_unlock(proc);
495 static struct binder_work *binder_dequeue_work_head_ilocked(
496 struct list_head *list)
498 struct binder_work *w;
500 w = list_first_entry_or_null(list, struct binder_work, entry);
502 list_del_init(&w->entry);
507 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
508 static void binder_free_thread(struct binder_thread *thread);
509 static void binder_free_proc(struct binder_proc *proc);
510 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
512 static bool binder_has_work_ilocked(struct binder_thread *thread,
515 return thread->process_todo ||
516 thread->looper_need_return ||
518 !binder_worklist_empty_ilocked(&thread->proc->todo));
521 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
525 binder_inner_proc_lock(thread->proc);
526 has_work = binder_has_work_ilocked(thread, do_proc_work);
527 binder_inner_proc_unlock(thread->proc);
532 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
534 return !thread->transaction_stack &&
535 binder_worklist_empty_ilocked(&thread->todo) &&
536 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
537 BINDER_LOOPER_STATE_REGISTERED));
540 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
544 struct binder_thread *thread;
546 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
547 thread = rb_entry(n, struct binder_thread, rb_node);
548 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
549 binder_available_for_proc_work_ilocked(thread)) {
551 wake_up_interruptible_sync(&thread->wait);
553 wake_up_interruptible(&thread->wait);
559 * binder_select_thread_ilocked() - selects a thread for doing proc work.
560 * @proc: process to select a thread from
562 * Note that calling this function moves the thread off the waiting_threads
563 * list, so it can only be woken up by the caller of this function, or a
564 * signal. Therefore, callers *should* always wake up the thread this function
567 * Return: If there's a thread currently waiting for process work,
568 * returns that thread. Otherwise returns NULL.
570 static struct binder_thread *
571 binder_select_thread_ilocked(struct binder_proc *proc)
573 struct binder_thread *thread;
575 assert_spin_locked(&proc->inner_lock);
576 thread = list_first_entry_or_null(&proc->waiting_threads,
577 struct binder_thread,
578 waiting_thread_node);
581 list_del_init(&thread->waiting_thread_node);
587 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
588 * @proc: process to wake up a thread in
589 * @thread: specific thread to wake-up (may be NULL)
590 * @sync: whether to do a synchronous wake-up
592 * This function wakes up a thread in the @proc process.
593 * The caller may provide a specific thread to wake-up in
594 * the @thread parameter. If @thread is NULL, this function
595 * will wake up threads that have called poll().
597 * Note that for this function to work as expected, callers
598 * should first call binder_select_thread() to find a thread
599 * to handle the work (if they don't have a thread already),
600 * and pass the result into the @thread parameter.
602 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
603 struct binder_thread *thread,
606 assert_spin_locked(&proc->inner_lock);
610 wake_up_interruptible_sync(&thread->wait);
612 wake_up_interruptible(&thread->wait);
616 /* Didn't find a thread waiting for proc work; this can happen
618 * 1. All threads are busy handling transactions
619 * In that case, one of those threads should call back into
620 * the kernel driver soon and pick up this work.
621 * 2. Threads are using the (e)poll interface, in which case
622 * they may be blocked on the waitqueue without having been
623 * added to waiting_threads. For this case, we just iterate
624 * over all threads not handling transaction work, and
625 * wake them all up. We wake all because we don't know whether
626 * a thread that called into (e)poll is handling non-binder
629 binder_wakeup_poll_threads_ilocked(proc, sync);
632 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
634 struct binder_thread *thread = binder_select_thread_ilocked(proc);
636 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
639 static void binder_set_nice(long nice)
643 if (can_nice(current, nice)) {
644 set_user_nice(current, nice);
647 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
648 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
649 "%d: nice value %ld not allowed use %ld instead\n",
650 current->pid, nice, min_nice);
651 set_user_nice(current, min_nice);
652 if (min_nice <= MAX_NICE)
654 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
657 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
658 binder_uintptr_t ptr)
660 struct rb_node *n = proc->nodes.rb_node;
661 struct binder_node *node;
663 assert_spin_locked(&proc->inner_lock);
666 node = rb_entry(n, struct binder_node, rb_node);
670 else if (ptr > node->ptr)
674 * take an implicit weak reference
675 * to ensure node stays alive until
676 * call to binder_put_node()
678 binder_inc_node_tmpref_ilocked(node);
685 static struct binder_node *binder_get_node(struct binder_proc *proc,
686 binder_uintptr_t ptr)
688 struct binder_node *node;
690 binder_inner_proc_lock(proc);
691 node = binder_get_node_ilocked(proc, ptr);
692 binder_inner_proc_unlock(proc);
696 static struct binder_node *binder_init_node_ilocked(
697 struct binder_proc *proc,
698 struct binder_node *new_node,
699 struct flat_binder_object *fp)
701 struct rb_node **p = &proc->nodes.rb_node;
702 struct rb_node *parent = NULL;
703 struct binder_node *node;
704 binder_uintptr_t ptr = fp ? fp->binder : 0;
705 binder_uintptr_t cookie = fp ? fp->cookie : 0;
706 __u32 flags = fp ? fp->flags : 0;
708 assert_spin_locked(&proc->inner_lock);
713 node = rb_entry(parent, struct binder_node, rb_node);
717 else if (ptr > node->ptr)
721 * A matching node is already in
722 * the rb tree. Abandon the init
725 binder_inc_node_tmpref_ilocked(node);
730 binder_stats_created(BINDER_STAT_NODE);
732 rb_link_node(&node->rb_node, parent, p);
733 rb_insert_color(&node->rb_node, &proc->nodes);
734 node->debug_id = atomic_inc_return(&binder_last_id);
737 node->cookie = cookie;
738 node->work.type = BINDER_WORK_NODE;
739 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
740 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
741 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
742 spin_lock_init(&node->lock);
743 INIT_LIST_HEAD(&node->work.entry);
744 INIT_LIST_HEAD(&node->async_todo);
745 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
746 "%d:%d node %d u%016llx c%016llx created\n",
747 proc->pid, current->pid, node->debug_id,
748 (u64)node->ptr, (u64)node->cookie);
753 static struct binder_node *binder_new_node(struct binder_proc *proc,
754 struct flat_binder_object *fp)
756 struct binder_node *node;
757 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
761 binder_inner_proc_lock(proc);
762 node = binder_init_node_ilocked(proc, new_node, fp);
763 binder_inner_proc_unlock(proc);
764 if (node != new_node)
766 * The node was already added by another thread
773 static void binder_free_node(struct binder_node *node)
776 binder_stats_deleted(BINDER_STAT_NODE);
779 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
781 struct list_head *target_list)
783 struct binder_proc *proc = node->proc;
785 assert_spin_locked(&node->lock);
787 assert_spin_locked(&proc->inner_lock);
790 if (target_list == NULL &&
791 node->internal_strong_refs == 0 &&
793 node == node->proc->context->binder_context_mgr_node &&
794 node->has_strong_ref)) {
795 pr_err("invalid inc strong node for %d\n",
799 node->internal_strong_refs++;
801 node->local_strong_refs++;
802 if (!node->has_strong_ref && target_list) {
803 struct binder_thread *thread = container_of(target_list,
804 struct binder_thread, todo);
805 binder_dequeue_work_ilocked(&node->work);
806 BUG_ON(&thread->todo != target_list);
807 binder_enqueue_deferred_thread_work_ilocked(thread,
812 node->local_weak_refs++;
813 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
814 if (target_list == NULL) {
815 pr_err("invalid inc weak node for %d\n",
822 binder_enqueue_work_ilocked(&node->work, target_list);
828 static int binder_inc_node(struct binder_node *node, int strong, int internal,
829 struct list_head *target_list)
833 binder_node_inner_lock(node);
834 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
835 binder_node_inner_unlock(node);
840 static bool binder_dec_node_nilocked(struct binder_node *node,
841 int strong, int internal)
843 struct binder_proc *proc = node->proc;
845 assert_spin_locked(&node->lock);
847 assert_spin_locked(&proc->inner_lock);
850 node->internal_strong_refs--;
852 node->local_strong_refs--;
853 if (node->local_strong_refs || node->internal_strong_refs)
857 node->local_weak_refs--;
858 if (node->local_weak_refs || node->tmp_refs ||
859 !hlist_empty(&node->refs))
863 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
864 if (list_empty(&node->work.entry)) {
865 binder_enqueue_work_ilocked(&node->work, &proc->todo);
866 binder_wakeup_proc_ilocked(proc);
869 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
870 !node->local_weak_refs && !node->tmp_refs) {
872 binder_dequeue_work_ilocked(&node->work);
873 rb_erase(&node->rb_node, &proc->nodes);
874 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
875 "refless node %d deleted\n",
878 BUG_ON(!list_empty(&node->work.entry));
879 spin_lock(&binder_dead_nodes_lock);
881 * tmp_refs could have changed so
884 if (node->tmp_refs) {
885 spin_unlock(&binder_dead_nodes_lock);
888 hlist_del(&node->dead_node);
889 spin_unlock(&binder_dead_nodes_lock);
890 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
891 "dead node %d deleted\n",
900 static void binder_dec_node(struct binder_node *node, int strong, int internal)
904 binder_node_inner_lock(node);
905 free_node = binder_dec_node_nilocked(node, strong, internal);
906 binder_node_inner_unlock(node);
908 binder_free_node(node);
911 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
914 * No call to binder_inc_node() is needed since we
915 * don't need to inform userspace of any changes to
922 * binder_inc_node_tmpref() - take a temporary reference on node
923 * @node: node to reference
925 * Take reference on node to prevent the node from being freed
926 * while referenced only by a local variable. The inner lock is
927 * needed to serialize with the node work on the queue (which
928 * isn't needed after the node is dead). If the node is dead
929 * (node->proc is NULL), use binder_dead_nodes_lock to protect
930 * node->tmp_refs against dead-node-only cases where the node
931 * lock cannot be acquired (eg traversing the dead node list to
934 static void binder_inc_node_tmpref(struct binder_node *node)
936 binder_node_lock(node);
938 binder_inner_proc_lock(node->proc);
940 spin_lock(&binder_dead_nodes_lock);
941 binder_inc_node_tmpref_ilocked(node);
943 binder_inner_proc_unlock(node->proc);
945 spin_unlock(&binder_dead_nodes_lock);
946 binder_node_unlock(node);
950 * binder_dec_node_tmpref() - remove a temporary reference on node
951 * @node: node to reference
953 * Release temporary reference on node taken via binder_inc_node_tmpref()
955 static void binder_dec_node_tmpref(struct binder_node *node)
959 binder_node_inner_lock(node);
961 spin_lock(&binder_dead_nodes_lock);
963 __acquire(&binder_dead_nodes_lock);
965 BUG_ON(node->tmp_refs < 0);
967 spin_unlock(&binder_dead_nodes_lock);
969 __release(&binder_dead_nodes_lock);
971 * Call binder_dec_node() to check if all refcounts are 0
972 * and cleanup is needed. Calling with strong=0 and internal=1
973 * causes no actual reference to be released in binder_dec_node().
974 * If that changes, a change is needed here too.
976 free_node = binder_dec_node_nilocked(node, 0, 1);
977 binder_node_inner_unlock(node);
979 binder_free_node(node);
982 static void binder_put_node(struct binder_node *node)
984 binder_dec_node_tmpref(node);
987 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
988 u32 desc, bool need_strong_ref)
990 struct rb_node *n = proc->refs_by_desc.rb_node;
991 struct binder_ref *ref;
994 ref = rb_entry(n, struct binder_ref, rb_node_desc);
996 if (desc < ref->data.desc) {
998 } else if (desc > ref->data.desc) {
1000 } else if (need_strong_ref && !ref->data.strong) {
1001 binder_user_error("tried to use weak ref as strong ref\n");
1011 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1012 * @proc: binder_proc that owns the ref
1013 * @node: binder_node of target
1014 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1016 * Look up the ref for the given node and return it if it exists
1018 * If it doesn't exist and the caller provides a newly allocated
1019 * ref, initialize the fields of the newly allocated ref and insert
1020 * into the given proc rb_trees and node refs list.
1022 * Return: the ref for node. It is possible that another thread
1023 * allocated/initialized the ref first in which case the
1024 * returned ref would be different than the passed-in
1025 * new_ref. new_ref must be kfree'd by the caller in
1028 static struct binder_ref *binder_get_ref_for_node_olocked(
1029 struct binder_proc *proc,
1030 struct binder_node *node,
1031 struct binder_ref *new_ref)
1033 struct binder_context *context = proc->context;
1034 struct rb_node **p = &proc->refs_by_node.rb_node;
1035 struct rb_node *parent = NULL;
1036 struct binder_ref *ref;
1041 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1043 if (node < ref->node)
1045 else if (node > ref->node)
1046 p = &(*p)->rb_right;
1053 binder_stats_created(BINDER_STAT_REF);
1054 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1055 new_ref->proc = proc;
1056 new_ref->node = node;
1057 rb_link_node(&new_ref->rb_node_node, parent, p);
1058 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1060 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1061 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1062 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1063 if (ref->data.desc > new_ref->data.desc)
1065 new_ref->data.desc = ref->data.desc + 1;
1068 p = &proc->refs_by_desc.rb_node;
1071 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1073 if (new_ref->data.desc < ref->data.desc)
1075 else if (new_ref->data.desc > ref->data.desc)
1076 p = &(*p)->rb_right;
1080 rb_link_node(&new_ref->rb_node_desc, parent, p);
1081 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1083 binder_node_lock(node);
1084 hlist_add_head(&new_ref->node_entry, &node->refs);
1086 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1087 "%d new ref %d desc %d for node %d\n",
1088 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1090 binder_node_unlock(node);
1094 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1096 bool delete_node = false;
1098 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1099 "%d delete ref %d desc %d for node %d\n",
1100 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1101 ref->node->debug_id);
1103 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1104 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1106 binder_node_inner_lock(ref->node);
1107 if (ref->data.strong)
1108 binder_dec_node_nilocked(ref->node, 1, 1);
1110 hlist_del(&ref->node_entry);
1111 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1112 binder_node_inner_unlock(ref->node);
1114 * Clear ref->node unless we want the caller to free the node
1118 * The caller uses ref->node to determine
1119 * whether the node needs to be freed. Clear
1120 * it since the node is still alive.
1126 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1127 "%d delete ref %d desc %d has death notification\n",
1128 ref->proc->pid, ref->data.debug_id,
1130 binder_dequeue_work(ref->proc, &ref->death->work);
1131 binder_stats_deleted(BINDER_STAT_DEATH);
1133 binder_stats_deleted(BINDER_STAT_REF);
1137 * binder_inc_ref_olocked() - increment the ref for given handle
1138 * @ref: ref to be incremented
1139 * @strong: if true, strong increment, else weak
1140 * @target_list: list to queue node work on
1142 * Increment the ref. @ref->proc->outer_lock must be held on entry
1144 * Return: 0, if successful, else errno
1146 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1147 struct list_head *target_list)
1152 if (ref->data.strong == 0) {
1153 ret = binder_inc_node(ref->node, 1, 1, target_list);
1159 if (ref->data.weak == 0) {
1160 ret = binder_inc_node(ref->node, 0, 1, target_list);
1170 * binder_dec_ref() - dec the ref for given handle
1171 * @ref: ref to be decremented
1172 * @strong: if true, strong decrement, else weak
1174 * Decrement the ref.
1176 * Return: true if ref is cleaned up and ready to be freed
1178 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1181 if (ref->data.strong == 0) {
1182 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1183 ref->proc->pid, ref->data.debug_id,
1184 ref->data.desc, ref->data.strong,
1189 if (ref->data.strong == 0)
1190 binder_dec_node(ref->node, strong, 1);
1192 if (ref->data.weak == 0) {
1193 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1194 ref->proc->pid, ref->data.debug_id,
1195 ref->data.desc, ref->data.strong,
1201 if (ref->data.strong == 0 && ref->data.weak == 0) {
1202 binder_cleanup_ref_olocked(ref);
1209 * binder_get_node_from_ref() - get the node from the given proc/desc
1210 * @proc: proc containing the ref
1211 * @desc: the handle associated with the ref
1212 * @need_strong_ref: if true, only return node if ref is strong
1213 * @rdata: the id/refcount data for the ref
1215 * Given a proc and ref handle, return the associated binder_node
1217 * Return: a binder_node or NULL if not found or not strong when strong required
1219 static struct binder_node *binder_get_node_from_ref(
1220 struct binder_proc *proc,
1221 u32 desc, bool need_strong_ref,
1222 struct binder_ref_data *rdata)
1224 struct binder_node *node;
1225 struct binder_ref *ref;
1227 binder_proc_lock(proc);
1228 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1233 * Take an implicit reference on the node to ensure
1234 * it stays alive until the call to binder_put_node()
1236 binder_inc_node_tmpref(node);
1239 binder_proc_unlock(proc);
1244 binder_proc_unlock(proc);
1249 * binder_free_ref() - free the binder_ref
1252 * Free the binder_ref. Free the binder_node indicated by ref->node
1253 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1255 static void binder_free_ref(struct binder_ref *ref)
1258 binder_free_node(ref->node);
1264 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1265 * @proc: proc containing the ref
1266 * @desc: the handle associated with the ref
1267 * @increment: true=inc reference, false=dec reference
1268 * @strong: true=strong reference, false=weak reference
1269 * @rdata: the id/refcount data for the ref
1271 * Given a proc and ref handle, increment or decrement the ref
1272 * according to "increment" arg.
1274 * Return: 0 if successful, else errno
1276 static int binder_update_ref_for_handle(struct binder_proc *proc,
1277 uint32_t desc, bool increment, bool strong,
1278 struct binder_ref_data *rdata)
1281 struct binder_ref *ref;
1282 bool delete_ref = false;
1284 binder_proc_lock(proc);
1285 ref = binder_get_ref_olocked(proc, desc, strong);
1291 ret = binder_inc_ref_olocked(ref, strong, NULL);
1293 delete_ref = binder_dec_ref_olocked(ref, strong);
1297 binder_proc_unlock(proc);
1300 binder_free_ref(ref);
1304 binder_proc_unlock(proc);
1309 * binder_dec_ref_for_handle() - dec the ref for given handle
1310 * @proc: proc containing the ref
1311 * @desc: the handle associated with the ref
1312 * @strong: true=strong reference, false=weak reference
1313 * @rdata: the id/refcount data for the ref
1315 * Just calls binder_update_ref_for_handle() to decrement the ref.
1317 * Return: 0 if successful, else errno
1319 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1320 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1322 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1327 * binder_inc_ref_for_node() - increment the ref for given proc/node
1328 * @proc: proc containing the ref
1329 * @node: target node
1330 * @strong: true=strong reference, false=weak reference
1331 * @target_list: worklist to use if node is incremented
1332 * @rdata: the id/refcount data for the ref
1334 * Given a proc and node, increment the ref. Create the ref if it
1335 * doesn't already exist
1337 * Return: 0 if successful, else errno
1339 static int binder_inc_ref_for_node(struct binder_proc *proc,
1340 struct binder_node *node,
1342 struct list_head *target_list,
1343 struct binder_ref_data *rdata)
1345 struct binder_ref *ref;
1346 struct binder_ref *new_ref = NULL;
1349 binder_proc_lock(proc);
1350 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1352 binder_proc_unlock(proc);
1353 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1356 binder_proc_lock(proc);
1357 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1359 ret = binder_inc_ref_olocked(ref, strong, target_list);
1361 binder_proc_unlock(proc);
1362 if (new_ref && ref != new_ref)
1364 * Another thread created the ref first so
1365 * free the one we allocated
1371 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1372 struct binder_transaction *t)
1374 BUG_ON(!target_thread);
1375 assert_spin_locked(&target_thread->proc->inner_lock);
1376 BUG_ON(target_thread->transaction_stack != t);
1377 BUG_ON(target_thread->transaction_stack->from != target_thread);
1378 target_thread->transaction_stack =
1379 target_thread->transaction_stack->from_parent;
1384 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1385 * @thread: thread to decrement
1387 * A thread needs to be kept alive while being used to create or
1388 * handle a transaction. binder_get_txn_from() is used to safely
1389 * extract t->from from a binder_transaction and keep the thread
1390 * indicated by t->from from being freed. When done with that
1391 * binder_thread, this function is called to decrement the
1392 * tmp_ref and free if appropriate (thread has been released
1393 * and no transaction being processed by the driver)
1395 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1398 * atomic is used to protect the counter value while
1399 * it cannot reach zero or thread->is_dead is false
1401 binder_inner_proc_lock(thread->proc);
1402 atomic_dec(&thread->tmp_ref);
1403 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1404 binder_inner_proc_unlock(thread->proc);
1405 binder_free_thread(thread);
1408 binder_inner_proc_unlock(thread->proc);
1412 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1413 * @proc: proc to decrement
1415 * A binder_proc needs to be kept alive while being used to create or
1416 * handle a transaction. proc->tmp_ref is incremented when
1417 * creating a new transaction or the binder_proc is currently in-use
1418 * by threads that are being released. When done with the binder_proc,
1419 * this function is called to decrement the counter and free the
1420 * proc if appropriate (proc has been released, all threads have
1421 * been released and not currenly in-use to process a transaction).
1423 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1425 binder_inner_proc_lock(proc);
1427 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1429 binder_inner_proc_unlock(proc);
1430 binder_free_proc(proc);
1433 binder_inner_proc_unlock(proc);
1437 * binder_get_txn_from() - safely extract the "from" thread in transaction
1438 * @t: binder transaction for t->from
1440 * Atomically return the "from" thread and increment the tmp_ref
1441 * count for the thread to ensure it stays alive until
1442 * binder_thread_dec_tmpref() is called.
1444 * Return: the value of t->from
1446 static struct binder_thread *binder_get_txn_from(
1447 struct binder_transaction *t)
1449 struct binder_thread *from;
1451 spin_lock(&t->lock);
1454 atomic_inc(&from->tmp_ref);
1455 spin_unlock(&t->lock);
1460 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1461 * @t: binder transaction for t->from
1463 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1464 * to guarantee that the thread cannot be released while operating on it.
1465 * The caller must call binder_inner_proc_unlock() to release the inner lock
1466 * as well as call binder_dec_thread_txn() to release the reference.
1468 * Return: the value of t->from
1470 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1471 struct binder_transaction *t)
1472 __acquires(&t->from->proc->inner_lock)
1474 struct binder_thread *from;
1476 from = binder_get_txn_from(t);
1478 __acquire(&from->proc->inner_lock);
1481 binder_inner_proc_lock(from->proc);
1483 BUG_ON(from != t->from);
1486 binder_inner_proc_unlock(from->proc);
1487 __acquire(&from->proc->inner_lock);
1488 binder_thread_dec_tmpref(from);
1493 * binder_free_txn_fixups() - free unprocessed fd fixups
1494 * @t: binder transaction for t->from
1496 * If the transaction is being torn down prior to being
1497 * processed by the target process, free all of the
1498 * fd fixups and fput the file structs. It is safe to
1499 * call this function after the fixups have been
1500 * processed -- in that case, the list will be empty.
1502 static void binder_free_txn_fixups(struct binder_transaction *t)
1504 struct binder_txn_fd_fixup *fixup, *tmp;
1506 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1508 if (fixup->target_fd >= 0)
1509 put_unused_fd(fixup->target_fd);
1510 list_del(&fixup->fixup_entry);
1515 static void binder_txn_latency_free(struct binder_transaction *t)
1517 int from_proc, from_thread, to_proc, to_thread;
1519 spin_lock(&t->lock);
1520 from_proc = t->from ? t->from->proc->pid : 0;
1521 from_thread = t->from ? t->from->pid : 0;
1522 to_proc = t->to_proc ? t->to_proc->pid : 0;
1523 to_thread = t->to_thread ? t->to_thread->pid : 0;
1524 spin_unlock(&t->lock);
1526 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1529 static void binder_free_transaction(struct binder_transaction *t)
1531 struct binder_proc *target_proc = t->to_proc;
1534 binder_inner_proc_lock(target_proc);
1535 target_proc->outstanding_txns--;
1536 if (target_proc->outstanding_txns < 0)
1537 pr_warn("%s: Unexpected outstanding_txns %d\n",
1538 __func__, target_proc->outstanding_txns);
1539 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1540 wake_up_interruptible_all(&target_proc->freeze_wait);
1542 t->buffer->transaction = NULL;
1543 binder_inner_proc_unlock(target_proc);
1545 if (trace_binder_txn_latency_free_enabled())
1546 binder_txn_latency_free(t);
1548 * If the transaction has no target_proc, then
1549 * t->buffer->transaction has already been cleared.
1551 binder_free_txn_fixups(t);
1553 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1556 static void binder_send_failed_reply(struct binder_transaction *t,
1557 uint32_t error_code)
1559 struct binder_thread *target_thread;
1560 struct binder_transaction *next;
1562 BUG_ON(t->flags & TF_ONE_WAY);
1564 target_thread = binder_get_txn_from_and_acq_inner(t);
1565 if (target_thread) {
1566 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1567 "send failed reply for transaction %d to %d:%d\n",
1569 target_thread->proc->pid,
1570 target_thread->pid);
1572 binder_pop_transaction_ilocked(target_thread, t);
1573 if (target_thread->reply_error.cmd == BR_OK) {
1574 target_thread->reply_error.cmd = error_code;
1575 binder_enqueue_thread_work_ilocked(
1577 &target_thread->reply_error.work);
1578 wake_up_interruptible(&target_thread->wait);
1581 * Cannot get here for normal operation, but
1582 * we can if multiple synchronous transactions
1583 * are sent without blocking for responses.
1584 * Just ignore the 2nd error in this case.
1586 pr_warn("Unexpected reply error: %u\n",
1587 target_thread->reply_error.cmd);
1589 binder_inner_proc_unlock(target_thread->proc);
1590 binder_thread_dec_tmpref(target_thread);
1591 binder_free_transaction(t);
1594 __release(&target_thread->proc->inner_lock);
1595 next = t->from_parent;
1597 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1598 "send failed reply for transaction %d, target dead\n",
1601 binder_free_transaction(t);
1603 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1604 "reply failed, no target thread at root\n");
1608 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1609 "reply failed, no target thread -- retry %d\n",
1615 * binder_cleanup_transaction() - cleans up undelivered transaction
1616 * @t: transaction that needs to be cleaned up
1617 * @reason: reason the transaction wasn't delivered
1618 * @error_code: error to return to caller (if synchronous call)
1620 static void binder_cleanup_transaction(struct binder_transaction *t,
1622 uint32_t error_code)
1624 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1625 binder_send_failed_reply(t, error_code);
1627 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1628 "undelivered transaction %d, %s\n",
1629 t->debug_id, reason);
1630 binder_free_transaction(t);
1635 * binder_get_object() - gets object and checks for valid metadata
1636 * @proc: binder_proc owning the buffer
1637 * @u: sender's user pointer to base of buffer
1638 * @buffer: binder_buffer that we're parsing.
1639 * @offset: offset in the @buffer at which to validate an object.
1640 * @object: struct binder_object to read into
1642 * Copy the binder object at the given offset into @object. If @u is
1643 * provided then the copy is from the sender's buffer. If not, then
1644 * it is copied from the target's @buffer.
1646 * Return: If there's a valid metadata object at @offset, the
1647 * size of that object. Otherwise, it returns zero. The object
1648 * is read into the struct binder_object pointed to by @object.
1650 static size_t binder_get_object(struct binder_proc *proc,
1651 const void __user *u,
1652 struct binder_buffer *buffer,
1653 unsigned long offset,
1654 struct binder_object *object)
1657 struct binder_object_header *hdr;
1658 size_t object_size = 0;
1660 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1661 if (offset > buffer->data_size || read_size < sizeof(*hdr))
1664 if (copy_from_user(object, u + offset, read_size))
1667 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1672 /* Ok, now see if we read a complete object. */
1674 switch (hdr->type) {
1675 case BINDER_TYPE_BINDER:
1676 case BINDER_TYPE_WEAK_BINDER:
1677 case BINDER_TYPE_HANDLE:
1678 case BINDER_TYPE_WEAK_HANDLE:
1679 object_size = sizeof(struct flat_binder_object);
1681 case BINDER_TYPE_FD:
1682 object_size = sizeof(struct binder_fd_object);
1684 case BINDER_TYPE_PTR:
1685 object_size = sizeof(struct binder_buffer_object);
1687 case BINDER_TYPE_FDA:
1688 object_size = sizeof(struct binder_fd_array_object);
1693 if (offset <= buffer->data_size - object_size &&
1694 buffer->data_size >= object_size)
1701 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1702 * @proc: binder_proc owning the buffer
1703 * @b: binder_buffer containing the object
1704 * @object: struct binder_object to read into
1705 * @index: index in offset array at which the binder_buffer_object is
1707 * @start_offset: points to the start of the offset array
1708 * @object_offsetp: offset of @object read from @b
1709 * @num_valid: the number of valid offsets in the offset array
1711 * Return: If @index is within the valid range of the offset array
1712 * described by @start and @num_valid, and if there's a valid
1713 * binder_buffer_object at the offset found in index @index
1714 * of the offset array, that object is returned. Otherwise,
1715 * %NULL is returned.
1716 * Note that the offset found in index @index itself is not
1717 * verified; this function assumes that @num_valid elements
1718 * from @start were previously verified to have valid offsets.
1719 * If @object_offsetp is non-NULL, then the offset within
1720 * @b is written to it.
1722 static struct binder_buffer_object *binder_validate_ptr(
1723 struct binder_proc *proc,
1724 struct binder_buffer *b,
1725 struct binder_object *object,
1726 binder_size_t index,
1727 binder_size_t start_offset,
1728 binder_size_t *object_offsetp,
1729 binder_size_t num_valid)
1732 binder_size_t object_offset;
1733 unsigned long buffer_offset;
1735 if (index >= num_valid)
1738 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1739 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1741 sizeof(object_offset)))
1743 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1744 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1747 *object_offsetp = object_offset;
1749 return &object->bbo;
1753 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1754 * @proc: binder_proc owning the buffer
1755 * @b: transaction buffer
1756 * @objects_start_offset: offset to start of objects buffer
1757 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1758 * @fixup_offset: start offset in @buffer to fix up
1759 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1760 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1762 * Return: %true if a fixup in buffer @buffer at offset @offset is
1765 * For safety reasons, we only allow fixups inside a buffer to happen
1766 * at increasing offsets; additionally, we only allow fixup on the last
1767 * buffer object that was verified, or one of its parents.
1769 * Example of what is allowed:
1772 * B (parent = A, offset = 0)
1773 * C (parent = A, offset = 16)
1774 * D (parent = C, offset = 0)
1775 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1777 * Examples of what is not allowed:
1779 * Decreasing offsets within the same parent:
1781 * C (parent = A, offset = 16)
1782 * B (parent = A, offset = 0) // decreasing offset within A
1784 * Referring to a parent that wasn't the last object or any of its parents:
1786 * B (parent = A, offset = 0)
1787 * C (parent = A, offset = 0)
1788 * C (parent = A, offset = 16)
1789 * D (parent = B, offset = 0) // B is not A or any of A's parents
1791 static bool binder_validate_fixup(struct binder_proc *proc,
1792 struct binder_buffer *b,
1793 binder_size_t objects_start_offset,
1794 binder_size_t buffer_obj_offset,
1795 binder_size_t fixup_offset,
1796 binder_size_t last_obj_offset,
1797 binder_size_t last_min_offset)
1799 if (!last_obj_offset) {
1800 /* Nothing to fix up in */
1804 while (last_obj_offset != buffer_obj_offset) {
1805 unsigned long buffer_offset;
1806 struct binder_object last_object;
1807 struct binder_buffer_object *last_bbo;
1808 size_t object_size = binder_get_object(proc, NULL, b,
1811 if (object_size != sizeof(*last_bbo))
1814 last_bbo = &last_object.bbo;
1816 * Safe to retrieve the parent of last_obj, since it
1817 * was already previously verified by the driver.
1819 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1821 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1822 buffer_offset = objects_start_offset +
1823 sizeof(binder_size_t) * last_bbo->parent;
1824 if (binder_alloc_copy_from_buffer(&proc->alloc,
1827 sizeof(last_obj_offset)))
1830 return (fixup_offset >= last_min_offset);
1834 * struct binder_task_work_cb - for deferred close
1836 * @twork: callback_head for task work
1839 * Structure to pass task work to be handled after
1840 * returning from binder_ioctl() via task_work_add().
1842 struct binder_task_work_cb {
1843 struct callback_head twork;
1848 * binder_do_fd_close() - close list of file descriptors
1849 * @twork: callback head for task work
1851 * It is not safe to call ksys_close() during the binder_ioctl()
1852 * function if there is a chance that binder's own file descriptor
1853 * might be closed. This is to meet the requirements for using
1854 * fdget() (see comments for __fget_light()). Therefore use
1855 * task_work_add() to schedule the close operation once we have
1856 * returned from binder_ioctl(). This function is a callback
1857 * for that mechanism and does the actual ksys_close() on the
1858 * given file descriptor.
1860 static void binder_do_fd_close(struct callback_head *twork)
1862 struct binder_task_work_cb *twcb = container_of(twork,
1863 struct binder_task_work_cb, twork);
1870 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1871 * @fd: file-descriptor to close
1873 * See comments in binder_do_fd_close(). This function is used to schedule
1874 * a file-descriptor to be closed after returning from binder_ioctl().
1876 static void binder_deferred_fd_close(int fd)
1878 struct binder_task_work_cb *twcb;
1880 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1883 init_task_work(&twcb->twork, binder_do_fd_close);
1884 close_fd_get_file(fd, &twcb->file);
1886 filp_close(twcb->file, current->files);
1887 task_work_add(current, &twcb->twork, TWA_RESUME);
1893 static void binder_transaction_buffer_release(struct binder_proc *proc,
1894 struct binder_thread *thread,
1895 struct binder_buffer *buffer,
1896 binder_size_t failed_at,
1899 int debug_id = buffer->debug_id;
1900 binder_size_t off_start_offset, buffer_offset, off_end_offset;
1902 binder_debug(BINDER_DEBUG_TRANSACTION,
1903 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1904 proc->pid, buffer->debug_id,
1905 buffer->data_size, buffer->offsets_size,
1906 (unsigned long long)failed_at);
1908 if (buffer->target_node)
1909 binder_dec_node(buffer->target_node, 1, 0);
1911 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1912 off_end_offset = is_failure && failed_at ? failed_at :
1913 off_start_offset + buffer->offsets_size;
1914 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1915 buffer_offset += sizeof(binder_size_t)) {
1916 struct binder_object_header *hdr;
1917 size_t object_size = 0;
1918 struct binder_object object;
1919 binder_size_t object_offset;
1921 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1922 buffer, buffer_offset,
1923 sizeof(object_offset)))
1924 object_size = binder_get_object(proc, NULL, buffer,
1925 object_offset, &object);
1926 if (object_size == 0) {
1927 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1928 debug_id, (u64)object_offset, buffer->data_size);
1932 switch (hdr->type) {
1933 case BINDER_TYPE_BINDER:
1934 case BINDER_TYPE_WEAK_BINDER: {
1935 struct flat_binder_object *fp;
1936 struct binder_node *node;
1938 fp = to_flat_binder_object(hdr);
1939 node = binder_get_node(proc, fp->binder);
1941 pr_err("transaction release %d bad node %016llx\n",
1942 debug_id, (u64)fp->binder);
1945 binder_debug(BINDER_DEBUG_TRANSACTION,
1946 " node %d u%016llx\n",
1947 node->debug_id, (u64)node->ptr);
1948 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1950 binder_put_node(node);
1952 case BINDER_TYPE_HANDLE:
1953 case BINDER_TYPE_WEAK_HANDLE: {
1954 struct flat_binder_object *fp;
1955 struct binder_ref_data rdata;
1958 fp = to_flat_binder_object(hdr);
1959 ret = binder_dec_ref_for_handle(proc, fp->handle,
1960 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1963 pr_err("transaction release %d bad handle %d, ret = %d\n",
1964 debug_id, fp->handle, ret);
1967 binder_debug(BINDER_DEBUG_TRANSACTION,
1968 " ref %d desc %d\n",
1969 rdata.debug_id, rdata.desc);
1972 case BINDER_TYPE_FD: {
1974 * No need to close the file here since user-space
1975 * closes it for successfully delivered
1976 * transactions. For transactions that weren't
1977 * delivered, the new fd was never allocated so
1978 * there is no need to close and the fput on the
1979 * file is done when the transaction is torn
1983 case BINDER_TYPE_PTR:
1985 * Nothing to do here, this will get cleaned up when the
1986 * transaction buffer gets freed
1989 case BINDER_TYPE_FDA: {
1990 struct binder_fd_array_object *fda;
1991 struct binder_buffer_object *parent;
1992 struct binder_object ptr_object;
1993 binder_size_t fda_offset;
1995 binder_size_t fd_buf_size;
1996 binder_size_t num_valid;
2000 * The fd fixups have not been applied so no
2001 * fds need to be closed.
2006 num_valid = (buffer_offset - off_start_offset) /
2007 sizeof(binder_size_t);
2008 fda = to_binder_fd_array_object(hdr);
2009 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2015 pr_err("transaction release %d bad parent offset\n",
2019 fd_buf_size = sizeof(u32) * fda->num_fds;
2020 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2021 pr_err("transaction release %d invalid number of fds (%lld)\n",
2022 debug_id, (u64)fda->num_fds);
2025 if (fd_buf_size > parent->length ||
2026 fda->parent_offset > parent->length - fd_buf_size) {
2027 /* No space for all file descriptors here. */
2028 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2029 debug_id, (u64)fda->num_fds);
2033 * the source data for binder_buffer_object is visible
2034 * to user-space and the @buffer element is the user
2035 * pointer to the buffer_object containing the fd_array.
2036 * Convert the address to an offset relative to
2037 * the base of the transaction buffer.
2040 (parent->buffer - (uintptr_t)buffer->user_data) +
2042 for (fd_index = 0; fd_index < fda->num_fds;
2046 binder_size_t offset = fda_offset +
2047 fd_index * sizeof(fd);
2049 err = binder_alloc_copy_from_buffer(
2050 &proc->alloc, &fd, buffer,
2051 offset, sizeof(fd));
2054 binder_deferred_fd_close(fd);
2056 * Need to make sure the thread goes
2057 * back to userspace to complete the
2061 thread->looper_need_return = true;
2066 pr_err("transaction release %d bad object type %x\n",
2067 debug_id, hdr->type);
2073 static int binder_translate_binder(struct flat_binder_object *fp,
2074 struct binder_transaction *t,
2075 struct binder_thread *thread)
2077 struct binder_node *node;
2078 struct binder_proc *proc = thread->proc;
2079 struct binder_proc *target_proc = t->to_proc;
2080 struct binder_ref_data rdata;
2083 node = binder_get_node(proc, fp->binder);
2085 node = binder_new_node(proc, fp);
2089 if (fp->cookie != node->cookie) {
2090 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2091 proc->pid, thread->pid, (u64)fp->binder,
2092 node->debug_id, (u64)fp->cookie,
2097 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2102 ret = binder_inc_ref_for_node(target_proc, node,
2103 fp->hdr.type == BINDER_TYPE_BINDER,
2104 &thread->todo, &rdata);
2108 if (fp->hdr.type == BINDER_TYPE_BINDER)
2109 fp->hdr.type = BINDER_TYPE_HANDLE;
2111 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2113 fp->handle = rdata.desc;
2116 trace_binder_transaction_node_to_ref(t, node, &rdata);
2117 binder_debug(BINDER_DEBUG_TRANSACTION,
2118 " node %d u%016llx -> ref %d desc %d\n",
2119 node->debug_id, (u64)node->ptr,
2120 rdata.debug_id, rdata.desc);
2122 binder_put_node(node);
2126 static int binder_translate_handle(struct flat_binder_object *fp,
2127 struct binder_transaction *t,
2128 struct binder_thread *thread)
2130 struct binder_proc *proc = thread->proc;
2131 struct binder_proc *target_proc = t->to_proc;
2132 struct binder_node *node;
2133 struct binder_ref_data src_rdata;
2136 node = binder_get_node_from_ref(proc, fp->handle,
2137 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2139 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2140 proc->pid, thread->pid, fp->handle);
2143 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2148 binder_node_lock(node);
2149 if (node->proc == target_proc) {
2150 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2151 fp->hdr.type = BINDER_TYPE_BINDER;
2153 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2154 fp->binder = node->ptr;
2155 fp->cookie = node->cookie;
2157 binder_inner_proc_lock(node->proc);
2159 __acquire(&node->proc->inner_lock);
2160 binder_inc_node_nilocked(node,
2161 fp->hdr.type == BINDER_TYPE_BINDER,
2164 binder_inner_proc_unlock(node->proc);
2166 __release(&node->proc->inner_lock);
2167 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2168 binder_debug(BINDER_DEBUG_TRANSACTION,
2169 " ref %d desc %d -> node %d u%016llx\n",
2170 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2172 binder_node_unlock(node);
2174 struct binder_ref_data dest_rdata;
2176 binder_node_unlock(node);
2177 ret = binder_inc_ref_for_node(target_proc, node,
2178 fp->hdr.type == BINDER_TYPE_HANDLE,
2184 fp->handle = dest_rdata.desc;
2186 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2188 binder_debug(BINDER_DEBUG_TRANSACTION,
2189 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2190 src_rdata.debug_id, src_rdata.desc,
2191 dest_rdata.debug_id, dest_rdata.desc,
2195 binder_put_node(node);
2199 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2200 struct binder_transaction *t,
2201 struct binder_thread *thread,
2202 struct binder_transaction *in_reply_to)
2204 struct binder_proc *proc = thread->proc;
2205 struct binder_proc *target_proc = t->to_proc;
2206 struct binder_txn_fd_fixup *fixup;
2209 bool target_allows_fd;
2212 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2214 target_allows_fd = t->buffer->target_node->accept_fds;
2215 if (!target_allows_fd) {
2216 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2217 proc->pid, thread->pid,
2218 in_reply_to ? "reply" : "transaction",
2221 goto err_fd_not_accepted;
2226 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2227 proc->pid, thread->pid, fd);
2231 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2238 * Add fixup record for this transaction. The allocation
2239 * of the fd in the target needs to be done from a
2242 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2248 fixup->offset = fd_offset;
2249 fixup->target_fd = -1;
2250 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2251 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2259 err_fd_not_accepted:
2264 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2265 * @offset offset in target buffer to fixup
2266 * @skip_size bytes to skip in copy (fixup will be written later)
2267 * @fixup_data data to write at fixup offset
2270 * This is used for the pointer fixup list (pf) which is created and consumed
2271 * during binder_transaction() and is only accessed locally. No
2272 * locking is necessary.
2274 * The list is ordered by @offset.
2276 struct binder_ptr_fixup {
2277 binder_size_t offset;
2279 binder_uintptr_t fixup_data;
2280 struct list_head node;
2284 * struct binder_sg_copy - scatter-gather data to be copied
2285 * @offset offset in target buffer
2286 * @sender_uaddr user address in source buffer
2287 * @length bytes to copy
2290 * This is used for the sg copy list (sgc) which is created and consumed
2291 * during binder_transaction() and is only accessed locally. No
2292 * locking is necessary.
2294 * The list is ordered by @offset.
2296 struct binder_sg_copy {
2297 binder_size_t offset;
2298 const void __user *sender_uaddr;
2300 struct list_head node;
2304 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2305 * @alloc: binder_alloc associated with @buffer
2306 * @buffer: binder buffer in target process
2307 * @sgc_head: list_head of scatter-gather copy list
2308 * @pf_head: list_head of pointer fixup list
2310 * Processes all elements of @sgc_head, applying fixups from @pf_head
2311 * and copying the scatter-gather data from the source process' user
2312 * buffer to the target's buffer. It is expected that the list creation
2313 * and processing all occurs during binder_transaction() so these lists
2314 * are only accessed in local context.
2316 * Return: 0=success, else -errno
2318 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2319 struct binder_buffer *buffer,
2320 struct list_head *sgc_head,
2321 struct list_head *pf_head)
2324 struct binder_sg_copy *sgc, *tmpsgc;
2325 struct binder_ptr_fixup *tmppf;
2326 struct binder_ptr_fixup *pf =
2327 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2330 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2331 size_t bytes_copied = 0;
2333 while (bytes_copied < sgc->length) {
2335 size_t bytes_left = sgc->length - bytes_copied;
2336 size_t offset = sgc->offset + bytes_copied;
2339 * We copy up to the fixup (pointed to by pf)
2341 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2343 if (!ret && copy_size)
2344 ret = binder_alloc_copy_user_to_buffer(
2347 sgc->sender_uaddr + bytes_copied,
2349 bytes_copied += copy_size;
2350 if (copy_size != bytes_left) {
2352 /* we stopped at a fixup offset */
2353 if (pf->skip_size) {
2355 * we are just skipping. This is for
2356 * BINDER_TYPE_FDA where the translated
2357 * fds will be fixed up when we get
2358 * to target context.
2360 bytes_copied += pf->skip_size;
2362 /* apply the fixup indicated by pf */
2364 ret = binder_alloc_copy_to_buffer(
2368 sizeof(pf->fixup_data));
2369 bytes_copied += sizeof(pf->fixup_data);
2371 list_del(&pf->node);
2373 pf = list_first_entry_or_null(pf_head,
2374 struct binder_ptr_fixup, node);
2377 list_del(&sgc->node);
2380 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2381 BUG_ON(pf->skip_size == 0);
2382 list_del(&pf->node);
2385 BUG_ON(!list_empty(sgc_head));
2387 return ret > 0 ? -EINVAL : ret;
2391 * binder_cleanup_deferred_txn_lists() - free specified lists
2392 * @sgc_head: list_head of scatter-gather copy list
2393 * @pf_head: list_head of pointer fixup list
2395 * Called to clean up @sgc_head and @pf_head if there is an
2398 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2399 struct list_head *pf_head)
2401 struct binder_sg_copy *sgc, *tmpsgc;
2402 struct binder_ptr_fixup *pf, *tmppf;
2404 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2405 list_del(&sgc->node);
2408 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2409 list_del(&pf->node);
2415 * binder_defer_copy() - queue a scatter-gather buffer for copy
2416 * @sgc_head: list_head of scatter-gather copy list
2417 * @offset: binder buffer offset in target process
2418 * @sender_uaddr: user address in source process
2419 * @length: bytes to copy
2421 * Specify a scatter-gather block to be copied. The actual copy must
2422 * be deferred until all the needed fixups are identified and queued.
2423 * Then the copy and fixups are done together so un-translated values
2424 * from the source are never visible in the target buffer.
2426 * We are guaranteed that repeated calls to this function will have
2427 * monotonically increasing @offset values so the list will naturally
2430 * Return: 0=success, else -errno
2432 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2433 const void __user *sender_uaddr, size_t length)
2435 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2440 bc->offset = offset;
2441 bc->sender_uaddr = sender_uaddr;
2442 bc->length = length;
2443 INIT_LIST_HEAD(&bc->node);
2446 * We are guaranteed that the deferred copies are in-order
2447 * so just add to the tail.
2449 list_add_tail(&bc->node, sgc_head);
2455 * binder_add_fixup() - queue a fixup to be applied to sg copy
2456 * @pf_head: list_head of binder ptr fixup list
2457 * @offset: binder buffer offset in target process
2458 * @fixup: bytes to be copied for fixup
2459 * @skip_size: bytes to skip when copying (fixup will be applied later)
2461 * Add the specified fixup to a list ordered by @offset. When copying
2462 * the scatter-gather buffers, the fixup will be copied instead of
2463 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2464 * will be applied later (in target process context), so we just skip
2465 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2468 * This function is called *mostly* in @offset order, but there are
2469 * exceptions. Since out-of-order inserts are relatively uncommon,
2470 * we insert the new element by searching backward from the tail of
2473 * Return: 0=success, else -errno
2475 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2476 binder_uintptr_t fixup, size_t skip_size)
2478 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2479 struct binder_ptr_fixup *tmppf;
2484 pf->offset = offset;
2485 pf->fixup_data = fixup;
2486 pf->skip_size = skip_size;
2487 INIT_LIST_HEAD(&pf->node);
2489 /* Fixups are *mostly* added in-order, but there are some
2490 * exceptions. Look backwards through list for insertion point.
2492 list_for_each_entry_reverse(tmppf, pf_head, node) {
2493 if (tmppf->offset < pf->offset) {
2494 list_add(&pf->node, &tmppf->node);
2499 * if we get here, then the new offset is the lowest so
2500 * insert at the head
2502 list_add(&pf->node, pf_head);
2506 static int binder_translate_fd_array(struct list_head *pf_head,
2507 struct binder_fd_array_object *fda,
2508 const void __user *sender_ubuffer,
2509 struct binder_buffer_object *parent,
2510 struct binder_buffer_object *sender_uparent,
2511 struct binder_transaction *t,
2512 struct binder_thread *thread,
2513 struct binder_transaction *in_reply_to)
2515 binder_size_t fdi, fd_buf_size;
2516 binder_size_t fda_offset;
2517 const void __user *sender_ufda_base;
2518 struct binder_proc *proc = thread->proc;
2521 if (fda->num_fds == 0)
2524 fd_buf_size = sizeof(u32) * fda->num_fds;
2525 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2526 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2527 proc->pid, thread->pid, (u64)fda->num_fds);
2530 if (fd_buf_size > parent->length ||
2531 fda->parent_offset > parent->length - fd_buf_size) {
2532 /* No space for all file descriptors here. */
2533 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2534 proc->pid, thread->pid, (u64)fda->num_fds);
2538 * the source data for binder_buffer_object is visible
2539 * to user-space and the @buffer element is the user
2540 * pointer to the buffer_object containing the fd_array.
2541 * Convert the address to an offset relative to
2542 * the base of the transaction buffer.
2544 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2546 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2549 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2550 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2551 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2552 proc->pid, thread->pid);
2555 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2559 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2561 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2562 binder_size_t sender_uoffset = fdi * sizeof(fd);
2564 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2566 ret = binder_translate_fd(fd, offset, t, thread,
2569 return ret > 0 ? -EINVAL : ret;
2574 static int binder_fixup_parent(struct list_head *pf_head,
2575 struct binder_transaction *t,
2576 struct binder_thread *thread,
2577 struct binder_buffer_object *bp,
2578 binder_size_t off_start_offset,
2579 binder_size_t num_valid,
2580 binder_size_t last_fixup_obj_off,
2581 binder_size_t last_fixup_min_off)
2583 struct binder_buffer_object *parent;
2584 struct binder_buffer *b = t->buffer;
2585 struct binder_proc *proc = thread->proc;
2586 struct binder_proc *target_proc = t->to_proc;
2587 struct binder_object object;
2588 binder_size_t buffer_offset;
2589 binder_size_t parent_offset;
2591 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2594 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2595 off_start_offset, &parent_offset,
2598 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2599 proc->pid, thread->pid);
2603 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2604 parent_offset, bp->parent_offset,
2606 last_fixup_min_off)) {
2607 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2608 proc->pid, thread->pid);
2612 if (parent->length < sizeof(binder_uintptr_t) ||
2613 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2614 /* No space for a pointer here! */
2615 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2616 proc->pid, thread->pid);
2619 buffer_offset = bp->parent_offset +
2620 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2621 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2625 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2626 * @t: transaction to send
2627 * @proc: process to send the transaction to
2628 * @thread: thread in @proc to send the transaction to (may be NULL)
2630 * This function queues a transaction to the specified process. It will try
2631 * to find a thread in the target process to handle the transaction and
2632 * wake it up. If no thread is found, the work is queued to the proc
2635 * If the @thread parameter is not NULL, the transaction is always queued
2636 * to the waitlist of that specific thread.
2638 * Return: 0 if the transaction was successfully queued
2639 * BR_DEAD_REPLY if the target process or thread is dead
2640 * BR_FROZEN_REPLY if the target process or thread is frozen
2642 static int binder_proc_transaction(struct binder_transaction *t,
2643 struct binder_proc *proc,
2644 struct binder_thread *thread)
2646 struct binder_node *node = t->buffer->target_node;
2647 bool oneway = !!(t->flags & TF_ONE_WAY);
2648 bool pending_async = false;
2651 binder_node_lock(node);
2654 if (node->has_async_transaction)
2655 pending_async = true;
2657 node->has_async_transaction = true;
2660 binder_inner_proc_lock(proc);
2661 if (proc->is_frozen) {
2662 proc->sync_recv |= !oneway;
2663 proc->async_recv |= oneway;
2666 if ((proc->is_frozen && !oneway) || proc->is_dead ||
2667 (thread && thread->is_dead)) {
2668 binder_inner_proc_unlock(proc);
2669 binder_node_unlock(node);
2670 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2673 if (!thread && !pending_async)
2674 thread = binder_select_thread_ilocked(proc);
2677 binder_enqueue_thread_work_ilocked(thread, &t->work);
2678 else if (!pending_async)
2679 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2681 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2684 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2686 proc->outstanding_txns++;
2687 binder_inner_proc_unlock(proc);
2688 binder_node_unlock(node);
2694 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2695 * @node: struct binder_node for which to get refs
2696 * @proc: returns @node->proc if valid
2697 * @error: if no @proc then returns BR_DEAD_REPLY
2699 * User-space normally keeps the node alive when creating a transaction
2700 * since it has a reference to the target. The local strong ref keeps it
2701 * alive if the sending process dies before the target process processes
2702 * the transaction. If the source process is malicious or has a reference
2703 * counting bug, relying on the local strong ref can fail.
2705 * Since user-space can cause the local strong ref to go away, we also take
2706 * a tmpref on the node to ensure it survives while we are constructing
2707 * the transaction. We also need a tmpref on the proc while we are
2708 * constructing the transaction, so we take that here as well.
2710 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2711 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2712 * target proc has died, @error is set to BR_DEAD_REPLY
2714 static struct binder_node *binder_get_node_refs_for_txn(
2715 struct binder_node *node,
2716 struct binder_proc **procp,
2719 struct binder_node *target_node = NULL;
2721 binder_node_inner_lock(node);
2724 binder_inc_node_nilocked(node, 1, 0, NULL);
2725 binder_inc_node_tmpref_ilocked(node);
2726 node->proc->tmp_ref++;
2727 *procp = node->proc;
2729 *error = BR_DEAD_REPLY;
2730 binder_node_inner_unlock(node);
2735 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2736 uint32_t command, int32_t param)
2738 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2741 /* annotation for sparse */
2742 __release(&from->proc->inner_lock);
2746 /* don't override existing errors */
2747 if (from->ee.command == BR_OK)
2748 binder_set_extended_error(&from->ee, id, command, param);
2749 binder_inner_proc_unlock(from->proc);
2750 binder_thread_dec_tmpref(from);
2753 static void binder_transaction(struct binder_proc *proc,
2754 struct binder_thread *thread,
2755 struct binder_transaction_data *tr, int reply,
2756 binder_size_t extra_buffers_size)
2759 struct binder_transaction *t;
2760 struct binder_work *w;
2761 struct binder_work *tcomplete;
2762 binder_size_t buffer_offset = 0;
2763 binder_size_t off_start_offset, off_end_offset;
2764 binder_size_t off_min;
2765 binder_size_t sg_buf_offset, sg_buf_end_offset;
2766 binder_size_t user_offset = 0;
2767 struct binder_proc *target_proc = NULL;
2768 struct binder_thread *target_thread = NULL;
2769 struct binder_node *target_node = NULL;
2770 struct binder_transaction *in_reply_to = NULL;
2771 struct binder_transaction_log_entry *e;
2772 uint32_t return_error = 0;
2773 uint32_t return_error_param = 0;
2774 uint32_t return_error_line = 0;
2775 binder_size_t last_fixup_obj_off = 0;
2776 binder_size_t last_fixup_min_off = 0;
2777 struct binder_context *context = proc->context;
2778 int t_debug_id = atomic_inc_return(&binder_last_id);
2779 char *secctx = NULL;
2781 struct list_head sgc_head;
2782 struct list_head pf_head;
2783 const void __user *user_buffer = (const void __user *)
2784 (uintptr_t)tr->data.ptr.buffer;
2785 INIT_LIST_HEAD(&sgc_head);
2786 INIT_LIST_HEAD(&pf_head);
2788 e = binder_transaction_log_add(&binder_transaction_log);
2789 e->debug_id = t_debug_id;
2790 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2791 e->from_proc = proc->pid;
2792 e->from_thread = thread->pid;
2793 e->target_handle = tr->target.handle;
2794 e->data_size = tr->data_size;
2795 e->offsets_size = tr->offsets_size;
2796 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2798 binder_inner_proc_lock(proc);
2799 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2800 binder_inner_proc_unlock(proc);
2803 binder_inner_proc_lock(proc);
2804 in_reply_to = thread->transaction_stack;
2805 if (in_reply_to == NULL) {
2806 binder_inner_proc_unlock(proc);
2807 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2808 proc->pid, thread->pid);
2809 return_error = BR_FAILED_REPLY;
2810 return_error_param = -EPROTO;
2811 return_error_line = __LINE__;
2812 goto err_empty_call_stack;
2814 if (in_reply_to->to_thread != thread) {
2815 spin_lock(&in_reply_to->lock);
2816 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2817 proc->pid, thread->pid, in_reply_to->debug_id,
2818 in_reply_to->to_proc ?
2819 in_reply_to->to_proc->pid : 0,
2820 in_reply_to->to_thread ?
2821 in_reply_to->to_thread->pid : 0);
2822 spin_unlock(&in_reply_to->lock);
2823 binder_inner_proc_unlock(proc);
2824 return_error = BR_FAILED_REPLY;
2825 return_error_param = -EPROTO;
2826 return_error_line = __LINE__;
2828 goto err_bad_call_stack;
2830 thread->transaction_stack = in_reply_to->to_parent;
2831 binder_inner_proc_unlock(proc);
2832 binder_set_nice(in_reply_to->saved_priority);
2833 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2834 if (target_thread == NULL) {
2835 /* annotation for sparse */
2836 __release(&target_thread->proc->inner_lock);
2837 return_error = BR_DEAD_REPLY;
2838 return_error_line = __LINE__;
2839 goto err_dead_binder;
2841 if (target_thread->transaction_stack != in_reply_to) {
2842 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2843 proc->pid, thread->pid,
2844 target_thread->transaction_stack ?
2845 target_thread->transaction_stack->debug_id : 0,
2846 in_reply_to->debug_id);
2847 binder_inner_proc_unlock(target_thread->proc);
2848 return_error = BR_FAILED_REPLY;
2849 return_error_param = -EPROTO;
2850 return_error_line = __LINE__;
2852 target_thread = NULL;
2853 goto err_dead_binder;
2855 target_proc = target_thread->proc;
2856 target_proc->tmp_ref++;
2857 binder_inner_proc_unlock(target_thread->proc);
2859 if (tr->target.handle) {
2860 struct binder_ref *ref;
2863 * There must already be a strong ref
2864 * on this node. If so, do a strong
2865 * increment on the node to ensure it
2866 * stays alive until the transaction is
2869 binder_proc_lock(proc);
2870 ref = binder_get_ref_olocked(proc, tr->target.handle,
2873 target_node = binder_get_node_refs_for_txn(
2874 ref->node, &target_proc,
2877 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2878 proc->pid, thread->pid, tr->target.handle);
2879 return_error = BR_FAILED_REPLY;
2881 binder_proc_unlock(proc);
2883 mutex_lock(&context->context_mgr_node_lock);
2884 target_node = context->binder_context_mgr_node;
2886 target_node = binder_get_node_refs_for_txn(
2887 target_node, &target_proc,
2890 return_error = BR_DEAD_REPLY;
2891 mutex_unlock(&context->context_mgr_node_lock);
2892 if (target_node && target_proc->pid == proc->pid) {
2893 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2894 proc->pid, thread->pid);
2895 return_error = BR_FAILED_REPLY;
2896 return_error_param = -EINVAL;
2897 return_error_line = __LINE__;
2898 goto err_invalid_target_handle;
2903 * return_error is set above
2905 return_error_param = -EINVAL;
2906 return_error_line = __LINE__;
2907 goto err_dead_binder;
2909 e->to_node = target_node->debug_id;
2910 if (WARN_ON(proc == target_proc)) {
2911 return_error = BR_FAILED_REPLY;
2912 return_error_param = -EINVAL;
2913 return_error_line = __LINE__;
2914 goto err_invalid_target_handle;
2916 if (security_binder_transaction(proc->cred,
2917 target_proc->cred) < 0) {
2918 return_error = BR_FAILED_REPLY;
2919 return_error_param = -EPERM;
2920 return_error_line = __LINE__;
2921 goto err_invalid_target_handle;
2923 binder_inner_proc_lock(proc);
2925 w = list_first_entry_or_null(&thread->todo,
2926 struct binder_work, entry);
2927 if (!(tr->flags & TF_ONE_WAY) && w &&
2928 w->type == BINDER_WORK_TRANSACTION) {
2930 * Do not allow new outgoing transaction from a
2931 * thread that has a transaction at the head of
2932 * its todo list. Only need to check the head
2933 * because binder_select_thread_ilocked picks a
2934 * thread from proc->waiting_threads to enqueue
2935 * the transaction, and nothing is queued to the
2936 * todo list while the thread is on waiting_threads.
2938 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2939 proc->pid, thread->pid);
2940 binder_inner_proc_unlock(proc);
2941 return_error = BR_FAILED_REPLY;
2942 return_error_param = -EPROTO;
2943 return_error_line = __LINE__;
2944 goto err_bad_todo_list;
2947 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2948 struct binder_transaction *tmp;
2950 tmp = thread->transaction_stack;
2951 if (tmp->to_thread != thread) {
2952 spin_lock(&tmp->lock);
2953 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2954 proc->pid, thread->pid, tmp->debug_id,
2955 tmp->to_proc ? tmp->to_proc->pid : 0,
2957 tmp->to_thread->pid : 0);
2958 spin_unlock(&tmp->lock);
2959 binder_inner_proc_unlock(proc);
2960 return_error = BR_FAILED_REPLY;
2961 return_error_param = -EPROTO;
2962 return_error_line = __LINE__;
2963 goto err_bad_call_stack;
2966 struct binder_thread *from;
2968 spin_lock(&tmp->lock);
2970 if (from && from->proc == target_proc) {
2971 atomic_inc(&from->tmp_ref);
2972 target_thread = from;
2973 spin_unlock(&tmp->lock);
2976 spin_unlock(&tmp->lock);
2977 tmp = tmp->from_parent;
2980 binder_inner_proc_unlock(proc);
2983 e->to_thread = target_thread->pid;
2984 e->to_proc = target_proc->pid;
2986 /* TODO: reuse incoming transaction for reply */
2987 t = kzalloc(sizeof(*t), GFP_KERNEL);
2989 return_error = BR_FAILED_REPLY;
2990 return_error_param = -ENOMEM;
2991 return_error_line = __LINE__;
2992 goto err_alloc_t_failed;
2994 INIT_LIST_HEAD(&t->fd_fixups);
2995 binder_stats_created(BINDER_STAT_TRANSACTION);
2996 spin_lock_init(&t->lock);
2998 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2999 if (tcomplete == NULL) {
3000 return_error = BR_FAILED_REPLY;
3001 return_error_param = -ENOMEM;
3002 return_error_line = __LINE__;
3003 goto err_alloc_tcomplete_failed;
3005 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3007 t->debug_id = t_debug_id;
3010 binder_debug(BINDER_DEBUG_TRANSACTION,
3011 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3012 proc->pid, thread->pid, t->debug_id,
3013 target_proc->pid, target_thread->pid,
3014 (u64)tr->data.ptr.buffer,
3015 (u64)tr->data.ptr.offsets,
3016 (u64)tr->data_size, (u64)tr->offsets_size,
3017 (u64)extra_buffers_size);
3019 binder_debug(BINDER_DEBUG_TRANSACTION,
3020 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3021 proc->pid, thread->pid, t->debug_id,
3022 target_proc->pid, target_node->debug_id,
3023 (u64)tr->data.ptr.buffer,
3024 (u64)tr->data.ptr.offsets,
3025 (u64)tr->data_size, (u64)tr->offsets_size,
3026 (u64)extra_buffers_size);
3028 if (!reply && !(tr->flags & TF_ONE_WAY))
3032 t->sender_euid = task_euid(proc->tsk);
3033 t->to_proc = target_proc;
3034 t->to_thread = target_thread;
3036 t->flags = tr->flags;
3037 t->priority = task_nice(current);
3039 if (target_node && target_node->txn_security_ctx) {
3043 security_cred_getsecid(proc->cred, &secid);
3044 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3046 return_error = BR_FAILED_REPLY;
3047 return_error_param = ret;
3048 return_error_line = __LINE__;
3049 goto err_get_secctx_failed;
3051 added_size = ALIGN(secctx_sz, sizeof(u64));
3052 extra_buffers_size += added_size;
3053 if (extra_buffers_size < added_size) {
3054 /* integer overflow of extra_buffers_size */
3055 return_error = BR_FAILED_REPLY;
3056 return_error_param = -EINVAL;
3057 return_error_line = __LINE__;
3058 goto err_bad_extra_size;
3062 trace_binder_transaction(reply, t, target_node);
3064 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3065 tr->offsets_size, extra_buffers_size,
3066 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3067 if (IS_ERR(t->buffer)) {
3069 * -ESRCH indicates VMA cleared. The target is dying.
3071 return_error_param = PTR_ERR(t->buffer);
3072 return_error = return_error_param == -ESRCH ?
3073 BR_DEAD_REPLY : BR_FAILED_REPLY;
3074 return_error_line = __LINE__;
3076 goto err_binder_alloc_buf_failed;
3080 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3081 ALIGN(tr->offsets_size, sizeof(void *)) +
3082 ALIGN(extra_buffers_size, sizeof(void *)) -
3083 ALIGN(secctx_sz, sizeof(u64));
3085 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3086 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3087 t->buffer, buf_offset,
3090 t->security_ctx = 0;
3093 security_release_secctx(secctx, secctx_sz);
3096 t->buffer->debug_id = t->debug_id;
3097 t->buffer->transaction = t;
3098 t->buffer->target_node = target_node;
3099 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3100 trace_binder_transaction_alloc_buf(t->buffer);
3102 if (binder_alloc_copy_user_to_buffer(
3103 &target_proc->alloc,
3105 ALIGN(tr->data_size, sizeof(void *)),
3106 (const void __user *)
3107 (uintptr_t)tr->data.ptr.offsets,
3108 tr->offsets_size)) {
3109 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3110 proc->pid, thread->pid);
3111 return_error = BR_FAILED_REPLY;
3112 return_error_param = -EFAULT;
3113 return_error_line = __LINE__;
3114 goto err_copy_data_failed;
3116 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3117 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3118 proc->pid, thread->pid, (u64)tr->offsets_size);
3119 return_error = BR_FAILED_REPLY;
3120 return_error_param = -EINVAL;
3121 return_error_line = __LINE__;
3122 goto err_bad_offset;
3124 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3125 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3126 proc->pid, thread->pid,
3127 (u64)extra_buffers_size);
3128 return_error = BR_FAILED_REPLY;
3129 return_error_param = -EINVAL;
3130 return_error_line = __LINE__;
3131 goto err_bad_offset;
3133 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3134 buffer_offset = off_start_offset;
3135 off_end_offset = off_start_offset + tr->offsets_size;
3136 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3137 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3138 ALIGN(secctx_sz, sizeof(u64));
3140 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3141 buffer_offset += sizeof(binder_size_t)) {
3142 struct binder_object_header *hdr;
3144 struct binder_object object;
3145 binder_size_t object_offset;
3146 binder_size_t copy_size;
3148 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3152 sizeof(object_offset))) {
3153 return_error = BR_FAILED_REPLY;
3154 return_error_param = -EINVAL;
3155 return_error_line = __LINE__;
3156 goto err_bad_offset;
3160 * Copy the source user buffer up to the next object
3161 * that will be processed.
3163 copy_size = object_offset - user_offset;
3164 if (copy_size && (user_offset > object_offset ||
3165 binder_alloc_copy_user_to_buffer(
3166 &target_proc->alloc,
3167 t->buffer, user_offset,
3168 user_buffer + user_offset,
3170 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3171 proc->pid, thread->pid);
3172 return_error = BR_FAILED_REPLY;
3173 return_error_param = -EFAULT;
3174 return_error_line = __LINE__;
3175 goto err_copy_data_failed;
3177 object_size = binder_get_object(target_proc, user_buffer,
3178 t->buffer, object_offset, &object);
3179 if (object_size == 0 || object_offset < off_min) {
3180 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3181 proc->pid, thread->pid,
3184 (u64)t->buffer->data_size);
3185 return_error = BR_FAILED_REPLY;
3186 return_error_param = -EINVAL;
3187 return_error_line = __LINE__;
3188 goto err_bad_offset;
3191 * Set offset to the next buffer fragment to be
3194 user_offset = object_offset + object_size;
3197 off_min = object_offset + object_size;
3198 switch (hdr->type) {
3199 case BINDER_TYPE_BINDER:
3200 case BINDER_TYPE_WEAK_BINDER: {
3201 struct flat_binder_object *fp;
3203 fp = to_flat_binder_object(hdr);
3204 ret = binder_translate_binder(fp, t, thread);
3207 binder_alloc_copy_to_buffer(&target_proc->alloc,
3211 return_error = BR_FAILED_REPLY;
3212 return_error_param = ret;
3213 return_error_line = __LINE__;
3214 goto err_translate_failed;
3217 case BINDER_TYPE_HANDLE:
3218 case BINDER_TYPE_WEAK_HANDLE: {
3219 struct flat_binder_object *fp;
3221 fp = to_flat_binder_object(hdr);
3222 ret = binder_translate_handle(fp, t, thread);
3224 binder_alloc_copy_to_buffer(&target_proc->alloc,
3228 return_error = BR_FAILED_REPLY;
3229 return_error_param = ret;
3230 return_error_line = __LINE__;
3231 goto err_translate_failed;
3235 case BINDER_TYPE_FD: {
3236 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3237 binder_size_t fd_offset = object_offset +
3238 (uintptr_t)&fp->fd - (uintptr_t)fp;
3239 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3240 thread, in_reply_to);
3244 binder_alloc_copy_to_buffer(&target_proc->alloc,
3248 return_error = BR_FAILED_REPLY;
3249 return_error_param = ret;
3250 return_error_line = __LINE__;
3251 goto err_translate_failed;
3254 case BINDER_TYPE_FDA: {
3255 struct binder_object ptr_object;
3256 binder_size_t parent_offset;
3257 struct binder_object user_object;
3258 size_t user_parent_size;
3259 struct binder_fd_array_object *fda =
3260 to_binder_fd_array_object(hdr);
3261 size_t num_valid = (buffer_offset - off_start_offset) /
3262 sizeof(binder_size_t);
3263 struct binder_buffer_object *parent =
3264 binder_validate_ptr(target_proc, t->buffer,
3265 &ptr_object, fda->parent,
3270 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3271 proc->pid, thread->pid);
3272 return_error = BR_FAILED_REPLY;
3273 return_error_param = -EINVAL;
3274 return_error_line = __LINE__;
3275 goto err_bad_parent;
3277 if (!binder_validate_fixup(target_proc, t->buffer,
3282 last_fixup_min_off)) {
3283 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3284 proc->pid, thread->pid);
3285 return_error = BR_FAILED_REPLY;
3286 return_error_param = -EINVAL;
3287 return_error_line = __LINE__;
3288 goto err_bad_parent;
3291 * We need to read the user version of the parent
3292 * object to get the original user offset
3295 binder_get_object(proc, user_buffer, t->buffer,
3296 parent_offset, &user_object);
3297 if (user_parent_size != sizeof(user_object.bbo)) {
3298 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3299 proc->pid, thread->pid,
3301 sizeof(user_object.bbo));
3302 return_error = BR_FAILED_REPLY;
3303 return_error_param = -EINVAL;
3304 return_error_line = __LINE__;
3305 goto err_bad_parent;
3307 ret = binder_translate_fd_array(&pf_head, fda,
3308 user_buffer, parent,
3309 &user_object.bbo, t,
3310 thread, in_reply_to);
3312 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3317 return_error = BR_FAILED_REPLY;
3318 return_error_param = ret > 0 ? -EINVAL : ret;
3319 return_error_line = __LINE__;
3320 goto err_translate_failed;
3322 last_fixup_obj_off = parent_offset;
3323 last_fixup_min_off =
3324 fda->parent_offset + sizeof(u32) * fda->num_fds;
3326 case BINDER_TYPE_PTR: {
3327 struct binder_buffer_object *bp =
3328 to_binder_buffer_object(hdr);
3329 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3332 if (bp->length > buf_left) {
3333 binder_user_error("%d:%d got transaction with too large buffer\n",
3334 proc->pid, thread->pid);
3335 return_error = BR_FAILED_REPLY;
3336 return_error_param = -EINVAL;
3337 return_error_line = __LINE__;
3338 goto err_bad_offset;
3340 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3341 (const void __user *)(uintptr_t)bp->buffer,
3344 return_error = BR_FAILED_REPLY;
3345 return_error_param = ret;
3346 return_error_line = __LINE__;
3347 goto err_translate_failed;
3349 /* Fixup buffer pointer to target proc address space */
3350 bp->buffer = (uintptr_t)
3351 t->buffer->user_data + sg_buf_offset;
3352 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3354 num_valid = (buffer_offset - off_start_offset) /
3355 sizeof(binder_size_t);
3356 ret = binder_fixup_parent(&pf_head, t,
3361 last_fixup_min_off);
3363 binder_alloc_copy_to_buffer(&target_proc->alloc,
3367 return_error = BR_FAILED_REPLY;
3368 return_error_param = ret;
3369 return_error_line = __LINE__;
3370 goto err_translate_failed;
3372 last_fixup_obj_off = object_offset;
3373 last_fixup_min_off = 0;
3376 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3377 proc->pid, thread->pid, hdr->type);
3378 return_error = BR_FAILED_REPLY;
3379 return_error_param = -EINVAL;
3380 return_error_line = __LINE__;
3381 goto err_bad_object_type;
3384 /* Done processing objects, copy the rest of the buffer */
3385 if (binder_alloc_copy_user_to_buffer(
3386 &target_proc->alloc,
3387 t->buffer, user_offset,
3388 user_buffer + user_offset,
3389 tr->data_size - user_offset)) {
3390 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3391 proc->pid, thread->pid);
3392 return_error = BR_FAILED_REPLY;
3393 return_error_param = -EFAULT;
3394 return_error_line = __LINE__;
3395 goto err_copy_data_failed;
3398 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3399 &sgc_head, &pf_head);
3401 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3402 proc->pid, thread->pid);
3403 return_error = BR_FAILED_REPLY;
3404 return_error_param = ret;
3405 return_error_line = __LINE__;
3406 goto err_copy_data_failed;
3408 if (t->buffer->oneway_spam_suspect)
3409 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3411 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3412 t->work.type = BINDER_WORK_TRANSACTION;
3415 binder_enqueue_thread_work(thread, tcomplete);
3416 binder_inner_proc_lock(target_proc);
3417 if (target_thread->is_dead) {
3418 return_error = BR_DEAD_REPLY;
3419 binder_inner_proc_unlock(target_proc);
3420 goto err_dead_proc_or_thread;
3422 BUG_ON(t->buffer->async_transaction != 0);
3423 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3424 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3425 target_proc->outstanding_txns++;
3426 binder_inner_proc_unlock(target_proc);
3427 wake_up_interruptible_sync(&target_thread->wait);
3428 binder_free_transaction(in_reply_to);
3429 } else if (!(t->flags & TF_ONE_WAY)) {
3430 BUG_ON(t->buffer->async_transaction != 0);
3431 binder_inner_proc_lock(proc);
3433 * Defer the TRANSACTION_COMPLETE, so we don't return to
3434 * userspace immediately; this allows the target process to
3435 * immediately start processing this transaction, reducing
3436 * latency. We will then return the TRANSACTION_COMPLETE when
3437 * the target replies (or there is an error).
3439 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3441 t->from_parent = thread->transaction_stack;
3442 thread->transaction_stack = t;
3443 binder_inner_proc_unlock(proc);
3444 return_error = binder_proc_transaction(t,
3445 target_proc, target_thread);
3447 binder_inner_proc_lock(proc);
3448 binder_pop_transaction_ilocked(thread, t);
3449 binder_inner_proc_unlock(proc);
3450 goto err_dead_proc_or_thread;
3453 BUG_ON(target_node == NULL);
3454 BUG_ON(t->buffer->async_transaction != 1);
3455 binder_enqueue_thread_work(thread, tcomplete);
3456 return_error = binder_proc_transaction(t, target_proc, NULL);
3458 goto err_dead_proc_or_thread;
3461 binder_thread_dec_tmpref(target_thread);
3462 binder_proc_dec_tmpref(target_proc);
3464 binder_dec_node_tmpref(target_node);
3466 * write barrier to synchronize with initialization
3470 WRITE_ONCE(e->debug_id_done, t_debug_id);
3473 err_dead_proc_or_thread:
3474 return_error_line = __LINE__;
3475 binder_dequeue_work(proc, tcomplete);
3476 err_translate_failed:
3477 err_bad_object_type:
3480 err_copy_data_failed:
3481 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3482 binder_free_txn_fixups(t);
3483 trace_binder_transaction_failed_buffer_release(t->buffer);
3484 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3485 buffer_offset, true);
3487 binder_dec_node_tmpref(target_node);
3489 t->buffer->transaction = NULL;
3490 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3491 err_binder_alloc_buf_failed:
3494 security_release_secctx(secctx, secctx_sz);
3495 err_get_secctx_failed:
3497 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3498 err_alloc_tcomplete_failed:
3499 if (trace_binder_txn_latency_free_enabled())
3500 binder_txn_latency_free(t);
3502 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3506 err_empty_call_stack:
3508 err_invalid_target_handle:
3510 binder_thread_dec_tmpref(target_thread);
3512 binder_proc_dec_tmpref(target_proc);
3514 binder_dec_node(target_node, 1, 0);
3515 binder_dec_node_tmpref(target_node);
3518 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3519 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3520 proc->pid, thread->pid, reply ? "reply" :
3521 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3522 target_proc ? target_proc->pid : 0,
3523 target_thread ? target_thread->pid : 0,
3524 t_debug_id, return_error, return_error_param,
3525 (u64)tr->data_size, (u64)tr->offsets_size,
3529 struct binder_transaction_log_entry *fe;
3531 e->return_error = return_error;
3532 e->return_error_param = return_error_param;
3533 e->return_error_line = return_error_line;
3534 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3537 * write barrier to synchronize with initialization
3541 WRITE_ONCE(e->debug_id_done, t_debug_id);
3542 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3545 BUG_ON(thread->return_error.cmd != BR_OK);
3547 binder_set_txn_from_error(in_reply_to, t_debug_id,
3548 return_error, return_error_param);
3549 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3550 binder_enqueue_thread_work(thread, &thread->return_error.work);
3551 binder_send_failed_reply(in_reply_to, return_error);
3553 binder_inner_proc_lock(proc);
3554 binder_set_extended_error(&thread->ee, t_debug_id,
3555 return_error, return_error_param);
3556 binder_inner_proc_unlock(proc);
3557 thread->return_error.cmd = return_error;
3558 binder_enqueue_thread_work(thread, &thread->return_error.work);
3563 * binder_free_buf() - free the specified buffer
3564 * @proc: binder proc that owns buffer
3565 * @buffer: buffer to be freed
3566 * @is_failure: failed to send transaction
3568 * If buffer for an async transaction, enqueue the next async
3569 * transaction from the node.
3571 * Cleanup buffer and free it.
3574 binder_free_buf(struct binder_proc *proc,
3575 struct binder_thread *thread,
3576 struct binder_buffer *buffer, bool is_failure)
3578 binder_inner_proc_lock(proc);
3579 if (buffer->transaction) {
3580 buffer->transaction->buffer = NULL;
3581 buffer->transaction = NULL;
3583 binder_inner_proc_unlock(proc);
3584 if (buffer->async_transaction && buffer->target_node) {
3585 struct binder_node *buf_node;
3586 struct binder_work *w;
3588 buf_node = buffer->target_node;
3589 binder_node_inner_lock(buf_node);
3590 BUG_ON(!buf_node->has_async_transaction);
3591 BUG_ON(buf_node->proc != proc);
3592 w = binder_dequeue_work_head_ilocked(
3593 &buf_node->async_todo);
3595 buf_node->has_async_transaction = false;
3597 binder_enqueue_work_ilocked(
3599 binder_wakeup_proc_ilocked(proc);
3601 binder_node_inner_unlock(buf_node);
3603 trace_binder_transaction_buffer_release(buffer);
3604 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3605 binder_alloc_free_buf(&proc->alloc, buffer);
3608 static int binder_thread_write(struct binder_proc *proc,
3609 struct binder_thread *thread,
3610 binder_uintptr_t binder_buffer, size_t size,
3611 binder_size_t *consumed)
3614 struct binder_context *context = proc->context;
3615 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3616 void __user *ptr = buffer + *consumed;
3617 void __user *end = buffer + size;
3619 while (ptr < end && thread->return_error.cmd == BR_OK) {
3622 if (get_user(cmd, (uint32_t __user *)ptr))
3624 ptr += sizeof(uint32_t);
3625 trace_binder_command(cmd);
3626 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3627 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3628 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3629 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3637 const char *debug_string;
3638 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3639 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3640 struct binder_ref_data rdata;
3642 if (get_user(target, (uint32_t __user *)ptr))
3645 ptr += sizeof(uint32_t);
3647 if (increment && !target) {
3648 struct binder_node *ctx_mgr_node;
3650 mutex_lock(&context->context_mgr_node_lock);
3651 ctx_mgr_node = context->binder_context_mgr_node;
3653 if (ctx_mgr_node->proc == proc) {
3654 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3655 proc->pid, thread->pid);
3656 mutex_unlock(&context->context_mgr_node_lock);
3659 ret = binder_inc_ref_for_node(
3661 strong, NULL, &rdata);
3663 mutex_unlock(&context->context_mgr_node_lock);
3666 ret = binder_update_ref_for_handle(
3667 proc, target, increment, strong,
3669 if (!ret && rdata.desc != target) {
3670 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3671 proc->pid, thread->pid,
3672 target, rdata.desc);
3676 debug_string = "IncRefs";
3679 debug_string = "Acquire";
3682 debug_string = "Release";
3686 debug_string = "DecRefs";
3690 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3691 proc->pid, thread->pid, debug_string,
3692 strong, target, ret);
3695 binder_debug(BINDER_DEBUG_USER_REFS,
3696 "%d:%d %s ref %d desc %d s %d w %d\n",
3697 proc->pid, thread->pid, debug_string,
3698 rdata.debug_id, rdata.desc, rdata.strong,
3702 case BC_INCREFS_DONE:
3703 case BC_ACQUIRE_DONE: {
3704 binder_uintptr_t node_ptr;
3705 binder_uintptr_t cookie;
3706 struct binder_node *node;
3709 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3711 ptr += sizeof(binder_uintptr_t);
3712 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3714 ptr += sizeof(binder_uintptr_t);
3715 node = binder_get_node(proc, node_ptr);
3717 binder_user_error("%d:%d %s u%016llx no match\n",
3718 proc->pid, thread->pid,
3719 cmd == BC_INCREFS_DONE ?
3725 if (cookie != node->cookie) {
3726 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3727 proc->pid, thread->pid,
3728 cmd == BC_INCREFS_DONE ?
3729 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3730 (u64)node_ptr, node->debug_id,
3731 (u64)cookie, (u64)node->cookie);
3732 binder_put_node(node);
3735 binder_node_inner_lock(node);
3736 if (cmd == BC_ACQUIRE_DONE) {
3737 if (node->pending_strong_ref == 0) {
3738 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3739 proc->pid, thread->pid,
3741 binder_node_inner_unlock(node);
3742 binder_put_node(node);
3745 node->pending_strong_ref = 0;
3747 if (node->pending_weak_ref == 0) {
3748 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3749 proc->pid, thread->pid,
3751 binder_node_inner_unlock(node);
3752 binder_put_node(node);
3755 node->pending_weak_ref = 0;
3757 free_node = binder_dec_node_nilocked(node,
3758 cmd == BC_ACQUIRE_DONE, 0);
3760 binder_debug(BINDER_DEBUG_USER_REFS,
3761 "%d:%d %s node %d ls %d lw %d tr %d\n",
3762 proc->pid, thread->pid,
3763 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3764 node->debug_id, node->local_strong_refs,
3765 node->local_weak_refs, node->tmp_refs);
3766 binder_node_inner_unlock(node);
3767 binder_put_node(node);
3770 case BC_ATTEMPT_ACQUIRE:
3771 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3773 case BC_ACQUIRE_RESULT:
3774 pr_err("BC_ACQUIRE_RESULT not supported\n");
3777 case BC_FREE_BUFFER: {
3778 binder_uintptr_t data_ptr;
3779 struct binder_buffer *buffer;
3781 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3783 ptr += sizeof(binder_uintptr_t);
3785 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3787 if (IS_ERR_OR_NULL(buffer)) {
3788 if (PTR_ERR(buffer) == -EPERM) {
3790 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3791 proc->pid, thread->pid,
3795 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3796 proc->pid, thread->pid,
3801 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3802 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3803 proc->pid, thread->pid, (u64)data_ptr,
3805 buffer->transaction ? "active" : "finished");
3806 binder_free_buf(proc, thread, buffer, false);
3810 case BC_TRANSACTION_SG:
3812 struct binder_transaction_data_sg tr;
3814 if (copy_from_user(&tr, ptr, sizeof(tr)))
3817 binder_transaction(proc, thread, &tr.transaction_data,
3818 cmd == BC_REPLY_SG, tr.buffers_size);
3821 case BC_TRANSACTION:
3823 struct binder_transaction_data tr;
3825 if (copy_from_user(&tr, ptr, sizeof(tr)))
3828 binder_transaction(proc, thread, &tr,
3829 cmd == BC_REPLY, 0);
3833 case BC_REGISTER_LOOPER:
3834 binder_debug(BINDER_DEBUG_THREADS,
3835 "%d:%d BC_REGISTER_LOOPER\n",
3836 proc->pid, thread->pid);
3837 binder_inner_proc_lock(proc);
3838 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3839 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3840 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3841 proc->pid, thread->pid);
3842 } else if (proc->requested_threads == 0) {
3843 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3844 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3845 proc->pid, thread->pid);
3847 proc->requested_threads--;
3848 proc->requested_threads_started++;
3850 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3851 binder_inner_proc_unlock(proc);
3853 case BC_ENTER_LOOPER:
3854 binder_debug(BINDER_DEBUG_THREADS,
3855 "%d:%d BC_ENTER_LOOPER\n",
3856 proc->pid, thread->pid);
3857 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3858 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3859 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3860 proc->pid, thread->pid);
3862 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3864 case BC_EXIT_LOOPER:
3865 binder_debug(BINDER_DEBUG_THREADS,
3866 "%d:%d BC_EXIT_LOOPER\n",
3867 proc->pid, thread->pid);
3868 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3871 case BC_REQUEST_DEATH_NOTIFICATION:
3872 case BC_CLEAR_DEATH_NOTIFICATION: {
3874 binder_uintptr_t cookie;
3875 struct binder_ref *ref;
3876 struct binder_ref_death *death = NULL;
3878 if (get_user(target, (uint32_t __user *)ptr))
3880 ptr += sizeof(uint32_t);
3881 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3883 ptr += sizeof(binder_uintptr_t);
3884 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3886 * Allocate memory for death notification
3887 * before taking lock
3889 death = kzalloc(sizeof(*death), GFP_KERNEL);
3890 if (death == NULL) {
3891 WARN_ON(thread->return_error.cmd !=
3893 thread->return_error.cmd = BR_ERROR;
3894 binder_enqueue_thread_work(
3896 &thread->return_error.work);
3898 BINDER_DEBUG_FAILED_TRANSACTION,
3899 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3900 proc->pid, thread->pid);
3904 binder_proc_lock(proc);
3905 ref = binder_get_ref_olocked(proc, target, false);
3907 binder_user_error("%d:%d %s invalid ref %d\n",
3908 proc->pid, thread->pid,
3909 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3910 "BC_REQUEST_DEATH_NOTIFICATION" :
3911 "BC_CLEAR_DEATH_NOTIFICATION",
3913 binder_proc_unlock(proc);
3918 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3919 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3920 proc->pid, thread->pid,
3921 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3922 "BC_REQUEST_DEATH_NOTIFICATION" :
3923 "BC_CLEAR_DEATH_NOTIFICATION",
3924 (u64)cookie, ref->data.debug_id,
3925 ref->data.desc, ref->data.strong,
3926 ref->data.weak, ref->node->debug_id);
3928 binder_node_lock(ref->node);
3929 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3931 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3932 proc->pid, thread->pid);
3933 binder_node_unlock(ref->node);
3934 binder_proc_unlock(proc);
3938 binder_stats_created(BINDER_STAT_DEATH);
3939 INIT_LIST_HEAD(&death->work.entry);
3940 death->cookie = cookie;
3942 if (ref->node->proc == NULL) {
3943 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3945 binder_inner_proc_lock(proc);
3946 binder_enqueue_work_ilocked(
3947 &ref->death->work, &proc->todo);
3948 binder_wakeup_proc_ilocked(proc);
3949 binder_inner_proc_unlock(proc);
3952 if (ref->death == NULL) {
3953 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3954 proc->pid, thread->pid);
3955 binder_node_unlock(ref->node);
3956 binder_proc_unlock(proc);
3960 if (death->cookie != cookie) {
3961 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3962 proc->pid, thread->pid,
3965 binder_node_unlock(ref->node);
3966 binder_proc_unlock(proc);
3970 binder_inner_proc_lock(proc);
3971 if (list_empty(&death->work.entry)) {
3972 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3973 if (thread->looper &
3974 (BINDER_LOOPER_STATE_REGISTERED |
3975 BINDER_LOOPER_STATE_ENTERED))
3976 binder_enqueue_thread_work_ilocked(
3980 binder_enqueue_work_ilocked(
3983 binder_wakeup_proc_ilocked(
3987 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3988 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3990 binder_inner_proc_unlock(proc);
3992 binder_node_unlock(ref->node);
3993 binder_proc_unlock(proc);
3995 case BC_DEAD_BINDER_DONE: {
3996 struct binder_work *w;
3997 binder_uintptr_t cookie;
3998 struct binder_ref_death *death = NULL;
4000 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4003 ptr += sizeof(cookie);
4004 binder_inner_proc_lock(proc);
4005 list_for_each_entry(w, &proc->delivered_death,
4007 struct binder_ref_death *tmp_death =
4009 struct binder_ref_death,
4012 if (tmp_death->cookie == cookie) {
4017 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4018 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4019 proc->pid, thread->pid, (u64)cookie,
4021 if (death == NULL) {
4022 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4023 proc->pid, thread->pid, (u64)cookie);
4024 binder_inner_proc_unlock(proc);
4027 binder_dequeue_work_ilocked(&death->work);
4028 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4029 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4030 if (thread->looper &
4031 (BINDER_LOOPER_STATE_REGISTERED |
4032 BINDER_LOOPER_STATE_ENTERED))
4033 binder_enqueue_thread_work_ilocked(
4034 thread, &death->work);
4036 binder_enqueue_work_ilocked(
4039 binder_wakeup_proc_ilocked(proc);
4042 binder_inner_proc_unlock(proc);
4046 pr_err("%d:%d unknown command %d\n",
4047 proc->pid, thread->pid, cmd);
4050 *consumed = ptr - buffer;
4055 static void binder_stat_br(struct binder_proc *proc,
4056 struct binder_thread *thread, uint32_t cmd)
4058 trace_binder_return(cmd);
4059 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4060 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4061 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4062 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4066 static int binder_put_node_cmd(struct binder_proc *proc,
4067 struct binder_thread *thread,
4069 binder_uintptr_t node_ptr,
4070 binder_uintptr_t node_cookie,
4072 uint32_t cmd, const char *cmd_name)
4074 void __user *ptr = *ptrp;
4076 if (put_user(cmd, (uint32_t __user *)ptr))
4078 ptr += sizeof(uint32_t);
4080 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4082 ptr += sizeof(binder_uintptr_t);
4084 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4086 ptr += sizeof(binder_uintptr_t);
4088 binder_stat_br(proc, thread, cmd);
4089 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4090 proc->pid, thread->pid, cmd_name, node_debug_id,
4091 (u64)node_ptr, (u64)node_cookie);
4097 static int binder_wait_for_work(struct binder_thread *thread,
4101 struct binder_proc *proc = thread->proc;
4104 freezer_do_not_count();
4105 binder_inner_proc_lock(proc);
4107 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4108 if (binder_has_work_ilocked(thread, do_proc_work))
4111 list_add(&thread->waiting_thread_node,
4112 &proc->waiting_threads);
4113 binder_inner_proc_unlock(proc);
4115 binder_inner_proc_lock(proc);
4116 list_del_init(&thread->waiting_thread_node);
4117 if (signal_pending(current)) {
4122 finish_wait(&thread->wait, &wait);
4123 binder_inner_proc_unlock(proc);
4130 * binder_apply_fd_fixups() - finish fd translation
4131 * @proc: binder_proc associated @t->buffer
4132 * @t: binder transaction with list of fd fixups
4134 * Now that we are in the context of the transaction target
4135 * process, we can allocate and install fds. Process the
4136 * list of fds to translate and fixup the buffer with the
4137 * new fds first and only then install the files.
4139 * If we fail to allocate an fd, skip the install and release
4140 * any fds that have already been allocated.
4142 static int binder_apply_fd_fixups(struct binder_proc *proc,
4143 struct binder_transaction *t)
4145 struct binder_txn_fd_fixup *fixup, *tmp;
4148 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4149 int fd = get_unused_fd_flags(O_CLOEXEC);
4152 binder_debug(BINDER_DEBUG_TRANSACTION,
4153 "failed fd fixup txn %d fd %d\n",
4158 binder_debug(BINDER_DEBUG_TRANSACTION,
4159 "fd fixup txn %d fd %d\n",
4161 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4162 fixup->target_fd = fd;
4163 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4170 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4171 fd_install(fixup->target_fd, fixup->file);
4172 list_del(&fixup->fixup_entry);
4179 binder_free_txn_fixups(t);
4183 static int binder_thread_read(struct binder_proc *proc,
4184 struct binder_thread *thread,
4185 binder_uintptr_t binder_buffer, size_t size,
4186 binder_size_t *consumed, int non_block)
4188 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4189 void __user *ptr = buffer + *consumed;
4190 void __user *end = buffer + size;
4193 int wait_for_proc_work;
4195 if (*consumed == 0) {
4196 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4198 ptr += sizeof(uint32_t);
4202 binder_inner_proc_lock(proc);
4203 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4204 binder_inner_proc_unlock(proc);
4206 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4208 trace_binder_wait_for_work(wait_for_proc_work,
4209 !!thread->transaction_stack,
4210 !binder_worklist_empty(proc, &thread->todo));
4211 if (wait_for_proc_work) {
4212 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4213 BINDER_LOOPER_STATE_ENTERED))) {
4214 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4215 proc->pid, thread->pid, thread->looper);
4216 wait_event_interruptible(binder_user_error_wait,
4217 binder_stop_on_user_error < 2);
4219 binder_set_nice(proc->default_priority);
4223 if (!binder_has_work(thread, wait_for_proc_work))
4226 ret = binder_wait_for_work(thread, wait_for_proc_work);
4229 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4236 struct binder_transaction_data_secctx tr;
4237 struct binder_transaction_data *trd = &tr.transaction_data;
4238 struct binder_work *w = NULL;
4239 struct list_head *list = NULL;
4240 struct binder_transaction *t = NULL;
4241 struct binder_thread *t_from;
4242 size_t trsize = sizeof(*trd);
4244 binder_inner_proc_lock(proc);
4245 if (!binder_worklist_empty_ilocked(&thread->todo))
4246 list = &thread->todo;
4247 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4251 binder_inner_proc_unlock(proc);
4254 if (ptr - buffer == 4 && !thread->looper_need_return)
4259 if (end - ptr < sizeof(tr) + 4) {
4260 binder_inner_proc_unlock(proc);
4263 w = binder_dequeue_work_head_ilocked(list);
4264 if (binder_worklist_empty_ilocked(&thread->todo))
4265 thread->process_todo = false;
4268 case BINDER_WORK_TRANSACTION: {
4269 binder_inner_proc_unlock(proc);
4270 t = container_of(w, struct binder_transaction, work);
4272 case BINDER_WORK_RETURN_ERROR: {
4273 struct binder_error *e = container_of(
4274 w, struct binder_error, work);
4276 WARN_ON(e->cmd == BR_OK);
4277 binder_inner_proc_unlock(proc);
4278 if (put_user(e->cmd, (uint32_t __user *)ptr))
4282 ptr += sizeof(uint32_t);
4284 binder_stat_br(proc, thread, cmd);
4286 case BINDER_WORK_TRANSACTION_COMPLETE:
4287 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4288 if (proc->oneway_spam_detection_enabled &&
4289 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4290 cmd = BR_ONEWAY_SPAM_SUSPECT;
4292 cmd = BR_TRANSACTION_COMPLETE;
4293 binder_inner_proc_unlock(proc);
4295 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4296 if (put_user(cmd, (uint32_t __user *)ptr))
4298 ptr += sizeof(uint32_t);
4300 binder_stat_br(proc, thread, cmd);
4301 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4302 "%d:%d BR_TRANSACTION_COMPLETE\n",
4303 proc->pid, thread->pid);
4305 case BINDER_WORK_NODE: {
4306 struct binder_node *node = container_of(w, struct binder_node, work);
4308 binder_uintptr_t node_ptr = node->ptr;
4309 binder_uintptr_t node_cookie = node->cookie;
4310 int node_debug_id = node->debug_id;
4313 void __user *orig_ptr = ptr;
4315 BUG_ON(proc != node->proc);
4316 strong = node->internal_strong_refs ||
4317 node->local_strong_refs;
4318 weak = !hlist_empty(&node->refs) ||
4319 node->local_weak_refs ||
4320 node->tmp_refs || strong;
4321 has_strong_ref = node->has_strong_ref;
4322 has_weak_ref = node->has_weak_ref;
4324 if (weak && !has_weak_ref) {
4325 node->has_weak_ref = 1;
4326 node->pending_weak_ref = 1;
4327 node->local_weak_refs++;
4329 if (strong && !has_strong_ref) {
4330 node->has_strong_ref = 1;
4331 node->pending_strong_ref = 1;
4332 node->local_strong_refs++;
4334 if (!strong && has_strong_ref)
4335 node->has_strong_ref = 0;
4336 if (!weak && has_weak_ref)
4337 node->has_weak_ref = 0;
4338 if (!weak && !strong) {
4339 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4340 "%d:%d node %d u%016llx c%016llx deleted\n",
4341 proc->pid, thread->pid,
4345 rb_erase(&node->rb_node, &proc->nodes);
4346 binder_inner_proc_unlock(proc);
4347 binder_node_lock(node);
4349 * Acquire the node lock before freeing the
4350 * node to serialize with other threads that
4351 * may have been holding the node lock while
4352 * decrementing this node (avoids race where
4353 * this thread frees while the other thread
4354 * is unlocking the node after the final
4357 binder_node_unlock(node);
4358 binder_free_node(node);
4360 binder_inner_proc_unlock(proc);
4362 if (weak && !has_weak_ref)
4363 ret = binder_put_node_cmd(
4364 proc, thread, &ptr, node_ptr,
4365 node_cookie, node_debug_id,
4366 BR_INCREFS, "BR_INCREFS");
4367 if (!ret && strong && !has_strong_ref)
4368 ret = binder_put_node_cmd(
4369 proc, thread, &ptr, node_ptr,
4370 node_cookie, node_debug_id,
4371 BR_ACQUIRE, "BR_ACQUIRE");
4372 if (!ret && !strong && has_strong_ref)
4373 ret = binder_put_node_cmd(
4374 proc, thread, &ptr, node_ptr,
4375 node_cookie, node_debug_id,
4376 BR_RELEASE, "BR_RELEASE");
4377 if (!ret && !weak && has_weak_ref)
4378 ret = binder_put_node_cmd(
4379 proc, thread, &ptr, node_ptr,
4380 node_cookie, node_debug_id,
4381 BR_DECREFS, "BR_DECREFS");
4382 if (orig_ptr == ptr)
4383 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4384 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4385 proc->pid, thread->pid,
4392 case BINDER_WORK_DEAD_BINDER:
4393 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4394 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4395 struct binder_ref_death *death;
4397 binder_uintptr_t cookie;
4399 death = container_of(w, struct binder_ref_death, work);
4400 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4401 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4403 cmd = BR_DEAD_BINDER;
4404 cookie = death->cookie;
4406 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4407 "%d:%d %s %016llx\n",
4408 proc->pid, thread->pid,
4409 cmd == BR_DEAD_BINDER ?
4411 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4413 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4414 binder_inner_proc_unlock(proc);
4416 binder_stats_deleted(BINDER_STAT_DEATH);
4418 binder_enqueue_work_ilocked(
4419 w, &proc->delivered_death);
4420 binder_inner_proc_unlock(proc);
4422 if (put_user(cmd, (uint32_t __user *)ptr))
4424 ptr += sizeof(uint32_t);
4425 if (put_user(cookie,
4426 (binder_uintptr_t __user *)ptr))
4428 ptr += sizeof(binder_uintptr_t);
4429 binder_stat_br(proc, thread, cmd);
4430 if (cmd == BR_DEAD_BINDER)
4431 goto done; /* DEAD_BINDER notifications can cause transactions */
4434 binder_inner_proc_unlock(proc);
4435 pr_err("%d:%d: bad work type %d\n",
4436 proc->pid, thread->pid, w->type);
4443 BUG_ON(t->buffer == NULL);
4444 if (t->buffer->target_node) {
4445 struct binder_node *target_node = t->buffer->target_node;
4447 trd->target.ptr = target_node->ptr;
4448 trd->cookie = target_node->cookie;
4449 t->saved_priority = task_nice(current);
4450 if (t->priority < target_node->min_priority &&
4451 !(t->flags & TF_ONE_WAY))
4452 binder_set_nice(t->priority);
4453 else if (!(t->flags & TF_ONE_WAY) ||
4454 t->saved_priority > target_node->min_priority)
4455 binder_set_nice(target_node->min_priority);
4456 cmd = BR_TRANSACTION;
4458 trd->target.ptr = 0;
4462 trd->code = t->code;
4463 trd->flags = t->flags;
4464 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4466 t_from = binder_get_txn_from(t);
4468 struct task_struct *sender = t_from->proc->tsk;
4471 task_tgid_nr_ns(sender,
4472 task_active_pid_ns(current));
4474 trd->sender_pid = 0;
4477 ret = binder_apply_fd_fixups(proc, t);
4479 struct binder_buffer *buffer = t->buffer;
4480 bool oneway = !!(t->flags & TF_ONE_WAY);
4481 int tid = t->debug_id;
4484 binder_thread_dec_tmpref(t_from);
4485 buffer->transaction = NULL;
4486 binder_cleanup_transaction(t, "fd fixups failed",
4488 binder_free_buf(proc, thread, buffer, true);
4489 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4490 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4491 proc->pid, thread->pid,
4493 (cmd == BR_REPLY ? "reply " : ""),
4494 tid, BR_FAILED_REPLY, ret, __LINE__);
4495 if (cmd == BR_REPLY) {
4496 cmd = BR_FAILED_REPLY;
4497 if (put_user(cmd, (uint32_t __user *)ptr))
4499 ptr += sizeof(uint32_t);
4500 binder_stat_br(proc, thread, cmd);
4505 trd->data_size = t->buffer->data_size;
4506 trd->offsets_size = t->buffer->offsets_size;
4507 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4508 trd->data.ptr.offsets = trd->data.ptr.buffer +
4509 ALIGN(t->buffer->data_size,
4512 tr.secctx = t->security_ctx;
4513 if (t->security_ctx) {
4514 cmd = BR_TRANSACTION_SEC_CTX;
4515 trsize = sizeof(tr);
4517 if (put_user(cmd, (uint32_t __user *)ptr)) {
4519 binder_thread_dec_tmpref(t_from);
4521 binder_cleanup_transaction(t, "put_user failed",
4526 ptr += sizeof(uint32_t);
4527 if (copy_to_user(ptr, &tr, trsize)) {
4529 binder_thread_dec_tmpref(t_from);
4531 binder_cleanup_transaction(t, "copy_to_user failed",
4538 trace_binder_transaction_received(t);
4539 binder_stat_br(proc, thread, cmd);
4540 binder_debug(BINDER_DEBUG_TRANSACTION,
4541 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4542 proc->pid, thread->pid,
4543 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4544 (cmd == BR_TRANSACTION_SEC_CTX) ?
4545 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4546 t->debug_id, t_from ? t_from->proc->pid : 0,
4547 t_from ? t_from->pid : 0, cmd,
4548 t->buffer->data_size, t->buffer->offsets_size,
4549 (u64)trd->data.ptr.buffer,
4550 (u64)trd->data.ptr.offsets);
4553 binder_thread_dec_tmpref(t_from);
4554 t->buffer->allow_user_free = 1;
4555 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4556 binder_inner_proc_lock(thread->proc);
4557 t->to_parent = thread->transaction_stack;
4558 t->to_thread = thread;
4559 thread->transaction_stack = t;
4560 binder_inner_proc_unlock(thread->proc);
4562 binder_free_transaction(t);
4569 *consumed = ptr - buffer;
4570 binder_inner_proc_lock(proc);
4571 if (proc->requested_threads == 0 &&
4572 list_empty(&thread->proc->waiting_threads) &&
4573 proc->requested_threads_started < proc->max_threads &&
4574 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4575 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4576 /*spawn a new thread if we leave this out */) {
4577 proc->requested_threads++;
4578 binder_inner_proc_unlock(proc);
4579 binder_debug(BINDER_DEBUG_THREADS,
4580 "%d:%d BR_SPAWN_LOOPER\n",
4581 proc->pid, thread->pid);
4582 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4584 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4586 binder_inner_proc_unlock(proc);
4590 static void binder_release_work(struct binder_proc *proc,
4591 struct list_head *list)
4593 struct binder_work *w;
4594 enum binder_work_type wtype;
4597 binder_inner_proc_lock(proc);
4598 w = binder_dequeue_work_head_ilocked(list);
4599 wtype = w ? w->type : 0;
4600 binder_inner_proc_unlock(proc);
4605 case BINDER_WORK_TRANSACTION: {
4606 struct binder_transaction *t;
4608 t = container_of(w, struct binder_transaction, work);
4610 binder_cleanup_transaction(t, "process died.",
4613 case BINDER_WORK_RETURN_ERROR: {
4614 struct binder_error *e = container_of(
4615 w, struct binder_error, work);
4617 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4618 "undelivered TRANSACTION_ERROR: %u\n",
4621 case BINDER_WORK_TRANSACTION_COMPLETE: {
4622 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4623 "undelivered TRANSACTION_COMPLETE\n");
4625 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4627 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4628 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4629 struct binder_ref_death *death;
4631 death = container_of(w, struct binder_ref_death, work);
4632 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4633 "undelivered death notification, %016llx\n",
4634 (u64)death->cookie);
4636 binder_stats_deleted(BINDER_STAT_DEATH);
4638 case BINDER_WORK_NODE:
4641 pr_err("unexpected work type, %d, not freed\n",
4649 static struct binder_thread *binder_get_thread_ilocked(
4650 struct binder_proc *proc, struct binder_thread *new_thread)
4652 struct binder_thread *thread = NULL;
4653 struct rb_node *parent = NULL;
4654 struct rb_node **p = &proc->threads.rb_node;
4658 thread = rb_entry(parent, struct binder_thread, rb_node);
4660 if (current->pid < thread->pid)
4662 else if (current->pid > thread->pid)
4663 p = &(*p)->rb_right;
4669 thread = new_thread;
4670 binder_stats_created(BINDER_STAT_THREAD);
4671 thread->proc = proc;
4672 thread->pid = current->pid;
4673 atomic_set(&thread->tmp_ref, 0);
4674 init_waitqueue_head(&thread->wait);
4675 INIT_LIST_HEAD(&thread->todo);
4676 rb_link_node(&thread->rb_node, parent, p);
4677 rb_insert_color(&thread->rb_node, &proc->threads);
4678 thread->looper_need_return = true;
4679 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4680 thread->return_error.cmd = BR_OK;
4681 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4682 thread->reply_error.cmd = BR_OK;
4683 thread->ee.command = BR_OK;
4684 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4688 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4690 struct binder_thread *thread;
4691 struct binder_thread *new_thread;
4693 binder_inner_proc_lock(proc);
4694 thread = binder_get_thread_ilocked(proc, NULL);
4695 binder_inner_proc_unlock(proc);
4697 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4698 if (new_thread == NULL)
4700 binder_inner_proc_lock(proc);
4701 thread = binder_get_thread_ilocked(proc, new_thread);
4702 binder_inner_proc_unlock(proc);
4703 if (thread != new_thread)
4709 static void binder_free_proc(struct binder_proc *proc)
4711 struct binder_device *device;
4713 BUG_ON(!list_empty(&proc->todo));
4714 BUG_ON(!list_empty(&proc->delivered_death));
4715 if (proc->outstanding_txns)
4716 pr_warn("%s: Unexpected outstanding_txns %d\n",
4717 __func__, proc->outstanding_txns);
4718 device = container_of(proc->context, struct binder_device, context);
4719 if (refcount_dec_and_test(&device->ref)) {
4720 kfree(proc->context->name);
4723 binder_alloc_deferred_release(&proc->alloc);
4724 put_task_struct(proc->tsk);
4725 put_cred(proc->cred);
4726 binder_stats_deleted(BINDER_STAT_PROC);
4730 static void binder_free_thread(struct binder_thread *thread)
4732 BUG_ON(!list_empty(&thread->todo));
4733 binder_stats_deleted(BINDER_STAT_THREAD);
4734 binder_proc_dec_tmpref(thread->proc);
4738 static int binder_thread_release(struct binder_proc *proc,
4739 struct binder_thread *thread)
4741 struct binder_transaction *t;
4742 struct binder_transaction *send_reply = NULL;
4743 int active_transactions = 0;
4744 struct binder_transaction *last_t = NULL;
4746 binder_inner_proc_lock(thread->proc);
4748 * take a ref on the proc so it survives
4749 * after we remove this thread from proc->threads.
4750 * The corresponding dec is when we actually
4751 * free the thread in binder_free_thread()
4755 * take a ref on this thread to ensure it
4756 * survives while we are releasing it
4758 atomic_inc(&thread->tmp_ref);
4759 rb_erase(&thread->rb_node, &proc->threads);
4760 t = thread->transaction_stack;
4762 spin_lock(&t->lock);
4763 if (t->to_thread == thread)
4766 __acquire(&t->lock);
4768 thread->is_dead = true;
4772 active_transactions++;
4773 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4774 "release %d:%d transaction %d %s, still active\n",
4775 proc->pid, thread->pid,
4777 (t->to_thread == thread) ? "in" : "out");
4779 if (t->to_thread == thread) {
4780 thread->proc->outstanding_txns--;
4782 t->to_thread = NULL;
4784 t->buffer->transaction = NULL;
4788 } else if (t->from == thread) {
4793 spin_unlock(&last_t->lock);
4795 spin_lock(&t->lock);
4797 __acquire(&t->lock);
4799 /* annotation for sparse, lock not acquired in last iteration above */
4800 __release(&t->lock);
4803 * If this thread used poll, make sure we remove the waitqueue from any
4804 * poll data structures holding it.
4806 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4807 wake_up_pollfree(&thread->wait);
4809 binder_inner_proc_unlock(thread->proc);
4812 * This is needed to avoid races between wake_up_pollfree() above and
4813 * someone else removing the last entry from the queue for other reasons
4814 * (e.g. ep_remove_wait_queue() being called due to an epoll file
4815 * descriptor being closed). Such other users hold an RCU read lock, so
4816 * we can be sure they're done after we call synchronize_rcu().
4818 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4822 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4823 binder_release_work(proc, &thread->todo);
4824 binder_thread_dec_tmpref(thread);
4825 return active_transactions;
4828 static __poll_t binder_poll(struct file *filp,
4829 struct poll_table_struct *wait)
4831 struct binder_proc *proc = filp->private_data;
4832 struct binder_thread *thread = NULL;
4833 bool wait_for_proc_work;
4835 thread = binder_get_thread(proc);
4839 binder_inner_proc_lock(thread->proc);
4840 thread->looper |= BINDER_LOOPER_STATE_POLL;
4841 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4843 binder_inner_proc_unlock(thread->proc);
4845 poll_wait(filp, &thread->wait, wait);
4847 if (binder_has_work(thread, wait_for_proc_work))
4853 static int binder_ioctl_write_read(struct file *filp,
4854 unsigned int cmd, unsigned long arg,
4855 struct binder_thread *thread)
4858 struct binder_proc *proc = filp->private_data;
4859 unsigned int size = _IOC_SIZE(cmd);
4860 void __user *ubuf = (void __user *)arg;
4861 struct binder_write_read bwr;
4863 if (size != sizeof(struct binder_write_read)) {
4867 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4871 binder_debug(BINDER_DEBUG_READ_WRITE,
4872 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4873 proc->pid, thread->pid,
4874 (u64)bwr.write_size, (u64)bwr.write_buffer,
4875 (u64)bwr.read_size, (u64)bwr.read_buffer);
4877 if (bwr.write_size > 0) {
4878 ret = binder_thread_write(proc, thread,
4881 &bwr.write_consumed);
4882 trace_binder_write_done(ret);
4884 bwr.read_consumed = 0;
4885 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4890 if (bwr.read_size > 0) {
4891 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4894 filp->f_flags & O_NONBLOCK);
4895 trace_binder_read_done(ret);
4896 binder_inner_proc_lock(proc);
4897 if (!binder_worklist_empty_ilocked(&proc->todo))
4898 binder_wakeup_proc_ilocked(proc);
4899 binder_inner_proc_unlock(proc);
4901 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4906 binder_debug(BINDER_DEBUG_READ_WRITE,
4907 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4908 proc->pid, thread->pid,
4909 (u64)bwr.write_consumed, (u64)bwr.write_size,
4910 (u64)bwr.read_consumed, (u64)bwr.read_size);
4911 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4919 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4920 struct flat_binder_object *fbo)
4923 struct binder_proc *proc = filp->private_data;
4924 struct binder_context *context = proc->context;
4925 struct binder_node *new_node;
4926 kuid_t curr_euid = current_euid();
4928 mutex_lock(&context->context_mgr_node_lock);
4929 if (context->binder_context_mgr_node) {
4930 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4934 ret = security_binder_set_context_mgr(proc->cred);
4937 if (uid_valid(context->binder_context_mgr_uid)) {
4938 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4939 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4940 from_kuid(&init_user_ns, curr_euid),
4941 from_kuid(&init_user_ns,
4942 context->binder_context_mgr_uid));
4947 context->binder_context_mgr_uid = curr_euid;
4949 new_node = binder_new_node(proc, fbo);
4954 binder_node_lock(new_node);
4955 new_node->local_weak_refs++;
4956 new_node->local_strong_refs++;
4957 new_node->has_strong_ref = 1;
4958 new_node->has_weak_ref = 1;
4959 context->binder_context_mgr_node = new_node;
4960 binder_node_unlock(new_node);
4961 binder_put_node(new_node);
4963 mutex_unlock(&context->context_mgr_node_lock);
4967 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4968 struct binder_node_info_for_ref *info)
4970 struct binder_node *node;
4971 struct binder_context *context = proc->context;
4972 __u32 handle = info->handle;
4974 if (info->strong_count || info->weak_count || info->reserved1 ||
4975 info->reserved2 || info->reserved3) {
4976 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4981 /* This ioctl may only be used by the context manager */
4982 mutex_lock(&context->context_mgr_node_lock);
4983 if (!context->binder_context_mgr_node ||
4984 context->binder_context_mgr_node->proc != proc) {
4985 mutex_unlock(&context->context_mgr_node_lock);
4988 mutex_unlock(&context->context_mgr_node_lock);
4990 node = binder_get_node_from_ref(proc, handle, true, NULL);
4994 info->strong_count = node->local_strong_refs +
4995 node->internal_strong_refs;
4996 info->weak_count = node->local_weak_refs;
4998 binder_put_node(node);
5003 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5004 struct binder_node_debug_info *info)
5007 binder_uintptr_t ptr = info->ptr;
5009 memset(info, 0, sizeof(*info));
5011 binder_inner_proc_lock(proc);
5012 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5013 struct binder_node *node = rb_entry(n, struct binder_node,
5015 if (node->ptr > ptr) {
5016 info->ptr = node->ptr;
5017 info->cookie = node->cookie;
5018 info->has_strong_ref = node->has_strong_ref;
5019 info->has_weak_ref = node->has_weak_ref;
5023 binder_inner_proc_unlock(proc);
5028 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5031 struct binder_thread *thread;
5033 if (proc->outstanding_txns > 0)
5036 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5037 thread = rb_entry(n, struct binder_thread, rb_node);
5038 if (thread->transaction_stack)
5044 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5045 struct binder_proc *target_proc)
5049 if (!info->enable) {
5050 binder_inner_proc_lock(target_proc);
5051 target_proc->sync_recv = false;
5052 target_proc->async_recv = false;
5053 target_proc->is_frozen = false;
5054 binder_inner_proc_unlock(target_proc);
5059 * Freezing the target. Prevent new transactions by
5060 * setting frozen state. If timeout specified, wait
5061 * for transactions to drain.
5063 binder_inner_proc_lock(target_proc);
5064 target_proc->sync_recv = false;
5065 target_proc->async_recv = false;
5066 target_proc->is_frozen = true;
5067 binder_inner_proc_unlock(target_proc);
5069 if (info->timeout_ms > 0)
5070 ret = wait_event_interruptible_timeout(
5071 target_proc->freeze_wait,
5072 (!target_proc->outstanding_txns),
5073 msecs_to_jiffies(info->timeout_ms));
5075 /* Check pending transactions that wait for reply */
5077 binder_inner_proc_lock(target_proc);
5078 if (binder_txns_pending_ilocked(target_proc))
5080 binder_inner_proc_unlock(target_proc);
5084 binder_inner_proc_lock(target_proc);
5085 target_proc->is_frozen = false;
5086 binder_inner_proc_unlock(target_proc);
5092 static int binder_ioctl_get_freezer_info(
5093 struct binder_frozen_status_info *info)
5095 struct binder_proc *target_proc;
5099 info->sync_recv = 0;
5100 info->async_recv = 0;
5102 mutex_lock(&binder_procs_lock);
5103 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5104 if (target_proc->pid == info->pid) {
5106 binder_inner_proc_lock(target_proc);
5107 txns_pending = binder_txns_pending_ilocked(target_proc);
5108 info->sync_recv |= target_proc->sync_recv |
5109 (txns_pending << 1);
5110 info->async_recv |= target_proc->async_recv;
5111 binder_inner_proc_unlock(target_proc);
5114 mutex_unlock(&binder_procs_lock);
5122 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5125 struct binder_extended_error *ee = &thread->ee;
5127 binder_inner_proc_lock(thread->proc);
5128 if (copy_to_user(ubuf, ee, sizeof(*ee))) {
5129 binder_inner_proc_unlock(thread->proc);
5134 ee->command = BR_OK;
5136 binder_inner_proc_unlock(thread->proc);
5141 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5144 struct binder_proc *proc = filp->private_data;
5145 struct binder_thread *thread;
5146 unsigned int size = _IOC_SIZE(cmd);
5147 void __user *ubuf = (void __user *)arg;
5149 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5150 proc->pid, current->pid, cmd, arg);*/
5152 binder_selftest_alloc(&proc->alloc);
5154 trace_binder_ioctl(cmd, arg);
5156 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5160 thread = binder_get_thread(proc);
5161 if (thread == NULL) {
5167 case BINDER_WRITE_READ:
5168 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5172 case BINDER_SET_MAX_THREADS: {
5175 if (copy_from_user(&max_threads, ubuf,
5176 sizeof(max_threads))) {
5180 binder_inner_proc_lock(proc);
5181 proc->max_threads = max_threads;
5182 binder_inner_proc_unlock(proc);
5185 case BINDER_SET_CONTEXT_MGR_EXT: {
5186 struct flat_binder_object fbo;
5188 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5192 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5197 case BINDER_SET_CONTEXT_MGR:
5198 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5202 case BINDER_THREAD_EXIT:
5203 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5204 proc->pid, thread->pid);
5205 binder_thread_release(proc, thread);
5208 case BINDER_VERSION: {
5209 struct binder_version __user *ver = ubuf;
5211 if (size != sizeof(struct binder_version)) {
5215 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5216 &ver->protocol_version)) {
5222 case BINDER_GET_NODE_INFO_FOR_REF: {
5223 struct binder_node_info_for_ref info;
5225 if (copy_from_user(&info, ubuf, sizeof(info))) {
5230 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5234 if (copy_to_user(ubuf, &info, sizeof(info))) {
5241 case BINDER_GET_NODE_DEBUG_INFO: {
5242 struct binder_node_debug_info info;
5244 if (copy_from_user(&info, ubuf, sizeof(info))) {
5249 ret = binder_ioctl_get_node_debug_info(proc, &info);
5253 if (copy_to_user(ubuf, &info, sizeof(info))) {
5259 case BINDER_FREEZE: {
5260 struct binder_freeze_info info;
5261 struct binder_proc **target_procs = NULL, *target_proc;
5262 int target_procs_count = 0, i = 0;
5266 if (copy_from_user(&info, ubuf, sizeof(info))) {
5271 mutex_lock(&binder_procs_lock);
5272 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5273 if (target_proc->pid == info.pid)
5274 target_procs_count++;
5277 if (target_procs_count == 0) {
5278 mutex_unlock(&binder_procs_lock);
5283 target_procs = kcalloc(target_procs_count,
5284 sizeof(struct binder_proc *),
5287 if (!target_procs) {
5288 mutex_unlock(&binder_procs_lock);
5293 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5294 if (target_proc->pid != info.pid)
5297 binder_inner_proc_lock(target_proc);
5298 target_proc->tmp_ref++;
5299 binder_inner_proc_unlock(target_proc);
5301 target_procs[i++] = target_proc;
5303 mutex_unlock(&binder_procs_lock);
5305 for (i = 0; i < target_procs_count; i++) {
5307 ret = binder_ioctl_freeze(&info,
5310 binder_proc_dec_tmpref(target_procs[i]);
5313 kfree(target_procs);
5319 case BINDER_GET_FROZEN_INFO: {
5320 struct binder_frozen_status_info info;
5322 if (copy_from_user(&info, ubuf, sizeof(info))) {
5327 ret = binder_ioctl_get_freezer_info(&info);
5331 if (copy_to_user(ubuf, &info, sizeof(info))) {
5337 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5340 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5344 binder_inner_proc_lock(proc);
5345 proc->oneway_spam_detection_enabled = (bool)enable;
5346 binder_inner_proc_unlock(proc);
5349 case BINDER_GET_EXTENDED_ERROR:
5350 ret = binder_ioctl_get_extended_error(thread, ubuf);
5361 thread->looper_need_return = false;
5362 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5363 if (ret && ret != -EINTR)
5364 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5366 trace_binder_ioctl_done(ret);
5370 static void binder_vma_open(struct vm_area_struct *vma)
5372 struct binder_proc *proc = vma->vm_private_data;
5374 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5375 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5376 proc->pid, vma->vm_start, vma->vm_end,
5377 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5378 (unsigned long)pgprot_val(vma->vm_page_prot));
5381 static void binder_vma_close(struct vm_area_struct *vma)
5383 struct binder_proc *proc = vma->vm_private_data;
5385 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5386 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5387 proc->pid, vma->vm_start, vma->vm_end,
5388 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5389 (unsigned long)pgprot_val(vma->vm_page_prot));
5390 binder_alloc_vma_close(&proc->alloc);
5393 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5395 return VM_FAULT_SIGBUS;
5398 static const struct vm_operations_struct binder_vm_ops = {
5399 .open = binder_vma_open,
5400 .close = binder_vma_close,
5401 .fault = binder_vm_fault,
5404 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5406 struct binder_proc *proc = filp->private_data;
5408 if (proc->tsk != current->group_leader)
5411 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5412 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5413 __func__, proc->pid, vma->vm_start, vma->vm_end,
5414 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5415 (unsigned long)pgprot_val(vma->vm_page_prot));
5417 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5418 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5419 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5422 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5423 vma->vm_flags &= ~VM_MAYWRITE;
5425 vma->vm_ops = &binder_vm_ops;
5426 vma->vm_private_data = proc;
5428 return binder_alloc_mmap_handler(&proc->alloc, vma);
5431 static int binder_open(struct inode *nodp, struct file *filp)
5433 struct binder_proc *proc, *itr;
5434 struct binder_device *binder_dev;
5435 struct binderfs_info *info;
5436 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5437 bool existing_pid = false;
5439 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5440 current->group_leader->pid, current->pid);
5442 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5445 spin_lock_init(&proc->inner_lock);
5446 spin_lock_init(&proc->outer_lock);
5447 get_task_struct(current->group_leader);
5448 proc->tsk = current->group_leader;
5449 proc->cred = get_cred(filp->f_cred);
5450 INIT_LIST_HEAD(&proc->todo);
5451 init_waitqueue_head(&proc->freeze_wait);
5452 proc->default_priority = task_nice(current);
5453 /* binderfs stashes devices in i_private */
5454 if (is_binderfs_device(nodp)) {
5455 binder_dev = nodp->i_private;
5456 info = nodp->i_sb->s_fs_info;
5457 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5459 binder_dev = container_of(filp->private_data,
5460 struct binder_device, miscdev);
5462 refcount_inc(&binder_dev->ref);
5463 proc->context = &binder_dev->context;
5464 binder_alloc_init(&proc->alloc);
5466 binder_stats_created(BINDER_STAT_PROC);
5467 proc->pid = current->group_leader->pid;
5468 INIT_LIST_HEAD(&proc->delivered_death);
5469 INIT_LIST_HEAD(&proc->waiting_threads);
5470 filp->private_data = proc;
5472 mutex_lock(&binder_procs_lock);
5473 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5474 if (itr->pid == proc->pid) {
5475 existing_pid = true;
5479 hlist_add_head(&proc->proc_node, &binder_procs);
5480 mutex_unlock(&binder_procs_lock);
5482 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5485 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5487 * proc debug entries are shared between contexts.
5488 * Only create for the first PID to avoid debugfs log spamming
5489 * The printing code will anyway print all contexts for a given
5490 * PID so this is not a problem.
5492 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5493 binder_debugfs_dir_entry_proc,
5494 (void *)(unsigned long)proc->pid,
5498 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5500 struct dentry *binderfs_entry;
5502 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5504 * Similar to debugfs, the process specific log file is shared
5505 * between contexts. Only create for the first PID.
5506 * This is ok since same as debugfs, the log file will contain
5507 * information on all contexts of a given PID.
5509 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5510 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5511 if (!IS_ERR(binderfs_entry)) {
5512 proc->binderfs_entry = binderfs_entry;
5516 error = PTR_ERR(binderfs_entry);
5517 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5525 static int binder_flush(struct file *filp, fl_owner_t id)
5527 struct binder_proc *proc = filp->private_data;
5529 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5534 static void binder_deferred_flush(struct binder_proc *proc)
5539 binder_inner_proc_lock(proc);
5540 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5541 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5543 thread->looper_need_return = true;
5544 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5545 wake_up_interruptible(&thread->wait);
5549 binder_inner_proc_unlock(proc);
5551 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5552 "binder_flush: %d woke %d threads\n", proc->pid,
5556 static int binder_release(struct inode *nodp, struct file *filp)
5558 struct binder_proc *proc = filp->private_data;
5560 debugfs_remove(proc->debugfs_entry);
5562 if (proc->binderfs_entry) {
5563 binderfs_remove_file(proc->binderfs_entry);
5564 proc->binderfs_entry = NULL;
5567 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5572 static int binder_node_release(struct binder_node *node, int refs)
5574 struct binder_ref *ref;
5576 struct binder_proc *proc = node->proc;
5578 binder_release_work(proc, &node->async_todo);
5580 binder_node_lock(node);
5581 binder_inner_proc_lock(proc);
5582 binder_dequeue_work_ilocked(&node->work);
5584 * The caller must have taken a temporary ref on the node,
5586 BUG_ON(!node->tmp_refs);
5587 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5588 binder_inner_proc_unlock(proc);
5589 binder_node_unlock(node);
5590 binder_free_node(node);
5596 node->local_strong_refs = 0;
5597 node->local_weak_refs = 0;
5598 binder_inner_proc_unlock(proc);
5600 spin_lock(&binder_dead_nodes_lock);
5601 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5602 spin_unlock(&binder_dead_nodes_lock);
5604 hlist_for_each_entry(ref, &node->refs, node_entry) {
5607 * Need the node lock to synchronize
5608 * with new notification requests and the
5609 * inner lock to synchronize with queued
5610 * death notifications.
5612 binder_inner_proc_lock(ref->proc);
5614 binder_inner_proc_unlock(ref->proc);
5620 BUG_ON(!list_empty(&ref->death->work.entry));
5621 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5622 binder_enqueue_work_ilocked(&ref->death->work,
5624 binder_wakeup_proc_ilocked(ref->proc);
5625 binder_inner_proc_unlock(ref->proc);
5628 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5629 "node %d now dead, refs %d, death %d\n",
5630 node->debug_id, refs, death);
5631 binder_node_unlock(node);
5632 binder_put_node(node);
5637 static void binder_deferred_release(struct binder_proc *proc)
5639 struct binder_context *context = proc->context;
5641 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5643 mutex_lock(&binder_procs_lock);
5644 hlist_del(&proc->proc_node);
5645 mutex_unlock(&binder_procs_lock);
5647 mutex_lock(&context->context_mgr_node_lock);
5648 if (context->binder_context_mgr_node &&
5649 context->binder_context_mgr_node->proc == proc) {
5650 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5651 "%s: %d context_mgr_node gone\n",
5652 __func__, proc->pid);
5653 context->binder_context_mgr_node = NULL;
5655 mutex_unlock(&context->context_mgr_node_lock);
5656 binder_inner_proc_lock(proc);
5658 * Make sure proc stays alive after we
5659 * remove all the threads
5663 proc->is_dead = true;
5664 proc->is_frozen = false;
5665 proc->sync_recv = false;
5666 proc->async_recv = false;
5668 active_transactions = 0;
5669 while ((n = rb_first(&proc->threads))) {
5670 struct binder_thread *thread;
5672 thread = rb_entry(n, struct binder_thread, rb_node);
5673 binder_inner_proc_unlock(proc);
5675 active_transactions += binder_thread_release(proc, thread);
5676 binder_inner_proc_lock(proc);
5681 while ((n = rb_first(&proc->nodes))) {
5682 struct binder_node *node;
5684 node = rb_entry(n, struct binder_node, rb_node);
5687 * take a temporary ref on the node before
5688 * calling binder_node_release() which will either
5689 * kfree() the node or call binder_put_node()
5691 binder_inc_node_tmpref_ilocked(node);
5692 rb_erase(&node->rb_node, &proc->nodes);
5693 binder_inner_proc_unlock(proc);
5694 incoming_refs = binder_node_release(node, incoming_refs);
5695 binder_inner_proc_lock(proc);
5697 binder_inner_proc_unlock(proc);
5700 binder_proc_lock(proc);
5701 while ((n = rb_first(&proc->refs_by_desc))) {
5702 struct binder_ref *ref;
5704 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5706 binder_cleanup_ref_olocked(ref);
5707 binder_proc_unlock(proc);
5708 binder_free_ref(ref);
5709 binder_proc_lock(proc);
5711 binder_proc_unlock(proc);
5713 binder_release_work(proc, &proc->todo);
5714 binder_release_work(proc, &proc->delivered_death);
5716 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5717 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5718 __func__, proc->pid, threads, nodes, incoming_refs,
5719 outgoing_refs, active_transactions);
5721 binder_proc_dec_tmpref(proc);
5724 static void binder_deferred_func(struct work_struct *work)
5726 struct binder_proc *proc;
5731 mutex_lock(&binder_deferred_lock);
5732 if (!hlist_empty(&binder_deferred_list)) {
5733 proc = hlist_entry(binder_deferred_list.first,
5734 struct binder_proc, deferred_work_node);
5735 hlist_del_init(&proc->deferred_work_node);
5736 defer = proc->deferred_work;
5737 proc->deferred_work = 0;
5742 mutex_unlock(&binder_deferred_lock);
5744 if (defer & BINDER_DEFERRED_FLUSH)
5745 binder_deferred_flush(proc);
5747 if (defer & BINDER_DEFERRED_RELEASE)
5748 binder_deferred_release(proc); /* frees proc */
5751 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5754 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5756 mutex_lock(&binder_deferred_lock);
5757 proc->deferred_work |= defer;
5758 if (hlist_unhashed(&proc->deferred_work_node)) {
5759 hlist_add_head(&proc->deferred_work_node,
5760 &binder_deferred_list);
5761 schedule_work(&binder_deferred_work);
5763 mutex_unlock(&binder_deferred_lock);
5766 static void print_binder_transaction_ilocked(struct seq_file *m,
5767 struct binder_proc *proc,
5769 struct binder_transaction *t)
5771 struct binder_proc *to_proc;
5772 struct binder_buffer *buffer = t->buffer;
5774 spin_lock(&t->lock);
5775 to_proc = t->to_proc;
5777 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5778 prefix, t->debug_id, t,
5779 t->from ? t->from->proc->pid : 0,
5780 t->from ? t->from->pid : 0,
5781 to_proc ? to_proc->pid : 0,
5782 t->to_thread ? t->to_thread->pid : 0,
5783 t->code, t->flags, t->priority, t->need_reply);
5784 spin_unlock(&t->lock);
5786 if (proc != to_proc) {
5788 * Can only safely deref buffer if we are holding the
5789 * correct proc inner lock for this node
5795 if (buffer == NULL) {
5796 seq_puts(m, " buffer free\n");
5799 if (buffer->target_node)
5800 seq_printf(m, " node %d", buffer->target_node->debug_id);
5801 seq_printf(m, " size %zd:%zd data %pK\n",
5802 buffer->data_size, buffer->offsets_size,
5806 static void print_binder_work_ilocked(struct seq_file *m,
5807 struct binder_proc *proc,
5809 const char *transaction_prefix,
5810 struct binder_work *w)
5812 struct binder_node *node;
5813 struct binder_transaction *t;
5816 case BINDER_WORK_TRANSACTION:
5817 t = container_of(w, struct binder_transaction, work);
5818 print_binder_transaction_ilocked(
5819 m, proc, transaction_prefix, t);
5821 case BINDER_WORK_RETURN_ERROR: {
5822 struct binder_error *e = container_of(
5823 w, struct binder_error, work);
5825 seq_printf(m, "%stransaction error: %u\n",
5828 case BINDER_WORK_TRANSACTION_COMPLETE:
5829 seq_printf(m, "%stransaction complete\n", prefix);
5831 case BINDER_WORK_NODE:
5832 node = container_of(w, struct binder_node, work);
5833 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5834 prefix, node->debug_id,
5835 (u64)node->ptr, (u64)node->cookie);
5837 case BINDER_WORK_DEAD_BINDER:
5838 seq_printf(m, "%shas dead binder\n", prefix);
5840 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5841 seq_printf(m, "%shas cleared dead binder\n", prefix);
5843 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5844 seq_printf(m, "%shas cleared death notification\n", prefix);
5847 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5852 static void print_binder_thread_ilocked(struct seq_file *m,
5853 struct binder_thread *thread,
5856 struct binder_transaction *t;
5857 struct binder_work *w;
5858 size_t start_pos = m->count;
5861 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5862 thread->pid, thread->looper,
5863 thread->looper_need_return,
5864 atomic_read(&thread->tmp_ref));
5865 header_pos = m->count;
5866 t = thread->transaction_stack;
5868 if (t->from == thread) {
5869 print_binder_transaction_ilocked(m, thread->proc,
5870 " outgoing transaction", t);
5872 } else if (t->to_thread == thread) {
5873 print_binder_transaction_ilocked(m, thread->proc,
5874 " incoming transaction", t);
5877 print_binder_transaction_ilocked(m, thread->proc,
5878 " bad transaction", t);
5882 list_for_each_entry(w, &thread->todo, entry) {
5883 print_binder_work_ilocked(m, thread->proc, " ",
5884 " pending transaction", w);
5886 if (!print_always && m->count == header_pos)
5887 m->count = start_pos;
5890 static void print_binder_node_nilocked(struct seq_file *m,
5891 struct binder_node *node)
5893 struct binder_ref *ref;
5894 struct binder_work *w;
5898 hlist_for_each_entry(ref, &node->refs, node_entry)
5901 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5902 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5903 node->has_strong_ref, node->has_weak_ref,
5904 node->local_strong_refs, node->local_weak_refs,
5905 node->internal_strong_refs, count, node->tmp_refs);
5907 seq_puts(m, " proc");
5908 hlist_for_each_entry(ref, &node->refs, node_entry)
5909 seq_printf(m, " %d", ref->proc->pid);
5913 list_for_each_entry(w, &node->async_todo, entry)
5914 print_binder_work_ilocked(m, node->proc, " ",
5915 " pending async transaction", w);
5919 static void print_binder_ref_olocked(struct seq_file *m,
5920 struct binder_ref *ref)
5922 binder_node_lock(ref->node);
5923 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5924 ref->data.debug_id, ref->data.desc,
5925 ref->node->proc ? "" : "dead ",
5926 ref->node->debug_id, ref->data.strong,
5927 ref->data.weak, ref->death);
5928 binder_node_unlock(ref->node);
5931 static void print_binder_proc(struct seq_file *m,
5932 struct binder_proc *proc, int print_all)
5934 struct binder_work *w;
5936 size_t start_pos = m->count;
5938 struct binder_node *last_node = NULL;
5940 seq_printf(m, "proc %d\n", proc->pid);
5941 seq_printf(m, "context %s\n", proc->context->name);
5942 header_pos = m->count;
5944 binder_inner_proc_lock(proc);
5945 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5946 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5947 rb_node), print_all);
5949 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5950 struct binder_node *node = rb_entry(n, struct binder_node,
5952 if (!print_all && !node->has_async_transaction)
5956 * take a temporary reference on the node so it
5957 * survives and isn't removed from the tree
5958 * while we print it.
5960 binder_inc_node_tmpref_ilocked(node);
5961 /* Need to drop inner lock to take node lock */
5962 binder_inner_proc_unlock(proc);
5964 binder_put_node(last_node);
5965 binder_node_inner_lock(node);
5966 print_binder_node_nilocked(m, node);
5967 binder_node_inner_unlock(node);
5969 binder_inner_proc_lock(proc);
5971 binder_inner_proc_unlock(proc);
5973 binder_put_node(last_node);
5976 binder_proc_lock(proc);
5977 for (n = rb_first(&proc->refs_by_desc);
5980 print_binder_ref_olocked(m, rb_entry(n,
5983 binder_proc_unlock(proc);
5985 binder_alloc_print_allocated(m, &proc->alloc);
5986 binder_inner_proc_lock(proc);
5987 list_for_each_entry(w, &proc->todo, entry)
5988 print_binder_work_ilocked(m, proc, " ",
5989 " pending transaction", w);
5990 list_for_each_entry(w, &proc->delivered_death, entry) {
5991 seq_puts(m, " has delivered dead binder\n");
5994 binder_inner_proc_unlock(proc);
5995 if (!print_all && m->count == header_pos)
5996 m->count = start_pos;
5999 static const char * const binder_return_strings[] = {
6004 "BR_ACQUIRE_RESULT",
6006 "BR_TRANSACTION_COMPLETE",
6011 "BR_ATTEMPT_ACQUIRE",
6016 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6019 "BR_ONEWAY_SPAM_SUSPECT",
6022 static const char * const binder_command_strings[] = {
6025 "BC_ACQUIRE_RESULT",
6033 "BC_ATTEMPT_ACQUIRE",
6034 "BC_REGISTER_LOOPER",
6037 "BC_REQUEST_DEATH_NOTIFICATION",
6038 "BC_CLEAR_DEATH_NOTIFICATION",
6039 "BC_DEAD_BINDER_DONE",
6040 "BC_TRANSACTION_SG",
6044 static const char * const binder_objstat_strings[] = {
6051 "transaction_complete"
6054 static void print_binder_stats(struct seq_file *m, const char *prefix,
6055 struct binder_stats *stats)
6059 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6060 ARRAY_SIZE(binder_command_strings));
6061 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6062 int temp = atomic_read(&stats->bc[i]);
6065 seq_printf(m, "%s%s: %d\n", prefix,
6066 binder_command_strings[i], temp);
6069 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6070 ARRAY_SIZE(binder_return_strings));
6071 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6072 int temp = atomic_read(&stats->br[i]);
6075 seq_printf(m, "%s%s: %d\n", prefix,
6076 binder_return_strings[i], temp);
6079 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6080 ARRAY_SIZE(binder_objstat_strings));
6081 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6082 ARRAY_SIZE(stats->obj_deleted));
6083 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6084 int created = atomic_read(&stats->obj_created[i]);
6085 int deleted = atomic_read(&stats->obj_deleted[i]);
6087 if (created || deleted)
6088 seq_printf(m, "%s%s: active %d total %d\n",
6090 binder_objstat_strings[i],
6096 static void print_binder_proc_stats(struct seq_file *m,
6097 struct binder_proc *proc)
6099 struct binder_work *w;
6100 struct binder_thread *thread;
6102 int count, strong, weak, ready_threads;
6103 size_t free_async_space =
6104 binder_alloc_get_free_async_space(&proc->alloc);
6106 seq_printf(m, "proc %d\n", proc->pid);
6107 seq_printf(m, "context %s\n", proc->context->name);
6110 binder_inner_proc_lock(proc);
6111 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6114 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6117 seq_printf(m, " threads: %d\n", count);
6118 seq_printf(m, " requested threads: %d+%d/%d\n"
6119 " ready threads %d\n"
6120 " free async space %zd\n", proc->requested_threads,
6121 proc->requested_threads_started, proc->max_threads,
6125 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6127 binder_inner_proc_unlock(proc);
6128 seq_printf(m, " nodes: %d\n", count);
6132 binder_proc_lock(proc);
6133 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6134 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6137 strong += ref->data.strong;
6138 weak += ref->data.weak;
6140 binder_proc_unlock(proc);
6141 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6143 count = binder_alloc_get_allocated_count(&proc->alloc);
6144 seq_printf(m, " buffers: %d\n", count);
6146 binder_alloc_print_pages(m, &proc->alloc);
6149 binder_inner_proc_lock(proc);
6150 list_for_each_entry(w, &proc->todo, entry) {
6151 if (w->type == BINDER_WORK_TRANSACTION)
6154 binder_inner_proc_unlock(proc);
6155 seq_printf(m, " pending transactions: %d\n", count);
6157 print_binder_stats(m, " ", &proc->stats);
6161 int binder_state_show(struct seq_file *m, void *unused)
6163 struct binder_proc *proc;
6164 struct binder_node *node;
6165 struct binder_node *last_node = NULL;
6167 seq_puts(m, "binder state:\n");
6169 spin_lock(&binder_dead_nodes_lock);
6170 if (!hlist_empty(&binder_dead_nodes))
6171 seq_puts(m, "dead nodes:\n");
6172 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6174 * take a temporary reference on the node so it
6175 * survives and isn't removed from the list
6176 * while we print it.
6179 spin_unlock(&binder_dead_nodes_lock);
6181 binder_put_node(last_node);
6182 binder_node_lock(node);
6183 print_binder_node_nilocked(m, node);
6184 binder_node_unlock(node);
6186 spin_lock(&binder_dead_nodes_lock);
6188 spin_unlock(&binder_dead_nodes_lock);
6190 binder_put_node(last_node);
6192 mutex_lock(&binder_procs_lock);
6193 hlist_for_each_entry(proc, &binder_procs, proc_node)
6194 print_binder_proc(m, proc, 1);
6195 mutex_unlock(&binder_procs_lock);
6200 int binder_stats_show(struct seq_file *m, void *unused)
6202 struct binder_proc *proc;
6204 seq_puts(m, "binder stats:\n");
6206 print_binder_stats(m, "", &binder_stats);
6208 mutex_lock(&binder_procs_lock);
6209 hlist_for_each_entry(proc, &binder_procs, proc_node)
6210 print_binder_proc_stats(m, proc);
6211 mutex_unlock(&binder_procs_lock);
6216 int binder_transactions_show(struct seq_file *m, void *unused)
6218 struct binder_proc *proc;
6220 seq_puts(m, "binder transactions:\n");
6221 mutex_lock(&binder_procs_lock);
6222 hlist_for_each_entry(proc, &binder_procs, proc_node)
6223 print_binder_proc(m, proc, 0);
6224 mutex_unlock(&binder_procs_lock);
6229 static int proc_show(struct seq_file *m, void *unused)
6231 struct binder_proc *itr;
6232 int pid = (unsigned long)m->private;
6234 mutex_lock(&binder_procs_lock);
6235 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6236 if (itr->pid == pid) {
6237 seq_puts(m, "binder proc state:\n");
6238 print_binder_proc(m, itr, 1);
6241 mutex_unlock(&binder_procs_lock);
6246 static void print_binder_transaction_log_entry(struct seq_file *m,
6247 struct binder_transaction_log_entry *e)
6249 int debug_id = READ_ONCE(e->debug_id_done);
6251 * read barrier to guarantee debug_id_done read before
6252 * we print the log values
6256 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6257 e->debug_id, (e->call_type == 2) ? "reply" :
6258 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6259 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6260 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6261 e->return_error, e->return_error_param,
6262 e->return_error_line);
6264 * read-barrier to guarantee read of debug_id_done after
6265 * done printing the fields of the entry
6268 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6269 "\n" : " (incomplete)\n");
6272 int binder_transaction_log_show(struct seq_file *m, void *unused)
6274 struct binder_transaction_log *log = m->private;
6275 unsigned int log_cur = atomic_read(&log->cur);
6280 count = log_cur + 1;
6281 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6282 0 : count % ARRAY_SIZE(log->entry);
6283 if (count > ARRAY_SIZE(log->entry) || log->full)
6284 count = ARRAY_SIZE(log->entry);
6285 for (i = 0; i < count; i++) {
6286 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6288 print_binder_transaction_log_entry(m, &log->entry[index]);
6293 const struct file_operations binder_fops = {
6294 .owner = THIS_MODULE,
6295 .poll = binder_poll,
6296 .unlocked_ioctl = binder_ioctl,
6297 .compat_ioctl = compat_ptr_ioctl,
6298 .mmap = binder_mmap,
6299 .open = binder_open,
6300 .flush = binder_flush,
6301 .release = binder_release,
6304 static int __init init_binder_device(const char *name)
6307 struct binder_device *binder_device;
6309 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6313 binder_device->miscdev.fops = &binder_fops;
6314 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6315 binder_device->miscdev.name = name;
6317 refcount_set(&binder_device->ref, 1);
6318 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6319 binder_device->context.name = name;
6320 mutex_init(&binder_device->context.context_mgr_node_lock);
6322 ret = misc_register(&binder_device->miscdev);
6324 kfree(binder_device);
6328 hlist_add_head(&binder_device->hlist, &binder_devices);
6333 static int __init binder_init(void)
6336 char *device_name, *device_tmp;
6337 struct binder_device *device;
6338 struct hlist_node *tmp;
6339 char *device_names = NULL;
6341 ret = binder_alloc_shrinker_init();
6345 atomic_set(&binder_transaction_log.cur, ~0U);
6346 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6348 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6349 if (binder_debugfs_dir_entry_root)
6350 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6351 binder_debugfs_dir_entry_root);
6353 if (binder_debugfs_dir_entry_root) {
6354 debugfs_create_file("state",
6356 binder_debugfs_dir_entry_root,
6358 &binder_state_fops);
6359 debugfs_create_file("stats",
6361 binder_debugfs_dir_entry_root,
6363 &binder_stats_fops);
6364 debugfs_create_file("transactions",
6366 binder_debugfs_dir_entry_root,
6368 &binder_transactions_fops);
6369 debugfs_create_file("transaction_log",
6371 binder_debugfs_dir_entry_root,
6372 &binder_transaction_log,
6373 &binder_transaction_log_fops);
6374 debugfs_create_file("failed_transaction_log",
6376 binder_debugfs_dir_entry_root,
6377 &binder_transaction_log_failed,
6378 &binder_transaction_log_fops);
6381 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6382 strcmp(binder_devices_param, "") != 0) {
6384 * Copy the module_parameter string, because we don't want to
6385 * tokenize it in-place.
6387 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6388 if (!device_names) {
6390 goto err_alloc_device_names_failed;
6393 device_tmp = device_names;
6394 while ((device_name = strsep(&device_tmp, ","))) {
6395 ret = init_binder_device(device_name);
6397 goto err_init_binder_device_failed;
6401 ret = init_binderfs();
6403 goto err_init_binder_device_failed;
6407 err_init_binder_device_failed:
6408 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6409 misc_deregister(&device->miscdev);
6410 hlist_del(&device->hlist);
6414 kfree(device_names);
6416 err_alloc_device_names_failed:
6417 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6422 device_initcall(binder_init);
6424 #define CREATE_TRACE_POINTS
6425 #include "binder_trace.h"
6427 MODULE_LICENSE("GPL v2");