3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <linux/fdtable.h>
55 #include <linux/file.h>
56 #include <linux/freezer.h>
58 #include <linux/list.h>
59 #include <linux/miscdevice.h>
60 #include <linux/module.h>
61 #include <linux/mutex.h>
62 #include <linux/nsproxy.h>
63 #include <linux/poll.h>
64 #include <linux/debugfs.h>
65 #include <linux/rbtree.h>
66 #include <linux/sched/signal.h>
67 #include <linux/sched/mm.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
73 #include <linux/ratelimit.h>
74 #include <linux/syscalls.h>
75 #include <linux/task_work.h>
77 #include <uapi/linux/android/binder.h>
79 #include <asm/cacheflush.h>
81 #include "binder_alloc.h"
82 #include "binder_internal.h"
83 #include "binder_trace.h"
85 static HLIST_HEAD(binder_deferred_list);
86 static DEFINE_MUTEX(binder_deferred_lock);
88 static HLIST_HEAD(binder_devices);
89 static HLIST_HEAD(binder_procs);
90 static DEFINE_MUTEX(binder_procs_lock);
92 static HLIST_HEAD(binder_dead_nodes);
93 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
95 static struct dentry *binder_debugfs_dir_entry_root;
96 static struct dentry *binder_debugfs_dir_entry_proc;
97 static atomic_t binder_last_id;
99 static int proc_show(struct seq_file *m, void *unused);
100 DEFINE_SHOW_ATTRIBUTE(proc);
102 /* This is only defined in include/asm-arm/sizes.h */
108 #define SZ_4M 0x400000
111 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
114 BINDER_DEBUG_USER_ERROR = 1U << 0,
115 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
116 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
117 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
118 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
119 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
120 BINDER_DEBUG_READ_WRITE = 1U << 6,
121 BINDER_DEBUG_USER_REFS = 1U << 7,
122 BINDER_DEBUG_THREADS = 1U << 8,
123 BINDER_DEBUG_TRANSACTION = 1U << 9,
124 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
125 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
126 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
127 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
128 BINDER_DEBUG_SPINLOCKS = 1U << 14,
130 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
131 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
132 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
134 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
135 module_param_named(devices, binder_devices_param, charp, 0444);
137 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
138 static int binder_stop_on_user_error;
140 static int binder_set_stop_on_user_error(const char *val,
141 const struct kernel_param *kp)
145 ret = param_set_int(val, kp);
146 if (binder_stop_on_user_error < 2)
147 wake_up(&binder_user_error_wait);
150 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
151 param_get_int, &binder_stop_on_user_error, 0644);
153 #define binder_debug(mask, x...) \
155 if (binder_debug_mask & mask) \
156 pr_info_ratelimited(x); \
159 #define binder_user_error(x...) \
161 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
162 pr_info_ratelimited(x); \
163 if (binder_stop_on_user_error) \
164 binder_stop_on_user_error = 2; \
167 #define to_flat_binder_object(hdr) \
168 container_of(hdr, struct flat_binder_object, hdr)
170 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
172 #define to_binder_buffer_object(hdr) \
173 container_of(hdr, struct binder_buffer_object, hdr)
175 #define to_binder_fd_array_object(hdr) \
176 container_of(hdr, struct binder_fd_array_object, hdr)
178 enum binder_stat_types {
184 BINDER_STAT_TRANSACTION,
185 BINDER_STAT_TRANSACTION_COMPLETE,
189 struct binder_stats {
190 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
191 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
192 atomic_t obj_created[BINDER_STAT_COUNT];
193 atomic_t obj_deleted[BINDER_STAT_COUNT];
196 static struct binder_stats binder_stats;
198 static inline void binder_stats_deleted(enum binder_stat_types type)
200 atomic_inc(&binder_stats.obj_deleted[type]);
203 static inline void binder_stats_created(enum binder_stat_types type)
205 atomic_inc(&binder_stats.obj_created[type]);
208 struct binder_transaction_log_entry {
220 int return_error_line;
221 uint32_t return_error;
222 uint32_t return_error_param;
223 const char *context_name;
225 struct binder_transaction_log {
228 struct binder_transaction_log_entry entry[32];
230 static struct binder_transaction_log binder_transaction_log;
231 static struct binder_transaction_log binder_transaction_log_failed;
233 static struct binder_transaction_log_entry *binder_transaction_log_add(
234 struct binder_transaction_log *log)
236 struct binder_transaction_log_entry *e;
237 unsigned int cur = atomic_inc_return(&log->cur);
239 if (cur >= ARRAY_SIZE(log->entry))
241 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
242 WRITE_ONCE(e->debug_id_done, 0);
244 * write-barrier to synchronize access to e->debug_id_done.
245 * We make sure the initialized 0 value is seen before
246 * memset() other fields are zeroed by memset.
249 memset(e, 0, sizeof(*e));
254 * struct binder_work - work enqueued on a worklist
255 * @entry: node enqueued on list
256 * @type: type of work to be performed
258 * There are separate work lists for proc, thread, and node (async).
261 struct list_head entry;
264 BINDER_WORK_TRANSACTION = 1,
265 BINDER_WORK_TRANSACTION_COMPLETE,
266 BINDER_WORK_RETURN_ERROR,
268 BINDER_WORK_DEAD_BINDER,
269 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
270 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
274 struct binder_error {
275 struct binder_work work;
280 * struct binder_node - binder node bookkeeping
281 * @debug_id: unique ID for debugging
282 * (invariant after initialized)
283 * @lock: lock for node fields
284 * @work: worklist element for node work
285 * (protected by @proc->inner_lock)
286 * @rb_node: element for proc->nodes tree
287 * (protected by @proc->inner_lock)
288 * @dead_node: element for binder_dead_nodes list
289 * (protected by binder_dead_nodes_lock)
290 * @proc: binder_proc that owns this node
291 * (invariant after initialized)
292 * @refs: list of references on this node
293 * (protected by @lock)
294 * @internal_strong_refs: used to take strong references when
295 * initiating a transaction
296 * (protected by @proc->inner_lock if @proc
298 * @local_weak_refs: weak user refs from local process
299 * (protected by @proc->inner_lock if @proc
301 * @local_strong_refs: strong user refs from local process
302 * (protected by @proc->inner_lock if @proc
304 * @tmp_refs: temporary kernel refs
305 * (protected by @proc->inner_lock while @proc
306 * is valid, and by binder_dead_nodes_lock
307 * if @proc is NULL. During inc/dec and node release
308 * it is also protected by @lock to provide safety
309 * as the node dies and @proc becomes NULL)
310 * @ptr: userspace pointer for node
311 * (invariant, no lock needed)
312 * @cookie: userspace cookie for node
313 * (invariant, no lock needed)
314 * @has_strong_ref: userspace notified of strong ref
315 * (protected by @proc->inner_lock if @proc
317 * @pending_strong_ref: userspace has acked notification of strong ref
318 * (protected by @proc->inner_lock if @proc
320 * @has_weak_ref: userspace notified of weak ref
321 * (protected by @proc->inner_lock if @proc
323 * @pending_weak_ref: userspace has acked notification of weak ref
324 * (protected by @proc->inner_lock if @proc
326 * @has_async_transaction: async transaction to node in progress
327 * (protected by @lock)
328 * @accept_fds: file descriptor operations supported for node
329 * (invariant after initialized)
330 * @min_priority: minimum scheduling priority
331 * (invariant after initialized)
332 * @txn_security_ctx: require sender's security context
333 * (invariant after initialized)
334 * @async_todo: list of async work items
335 * (protected by @proc->inner_lock)
337 * Bookkeeping structure for binder nodes.
342 struct binder_work work;
344 struct rb_node rb_node;
345 struct hlist_node dead_node;
347 struct binder_proc *proc;
348 struct hlist_head refs;
349 int internal_strong_refs;
351 int local_strong_refs;
353 binder_uintptr_t ptr;
354 binder_uintptr_t cookie;
357 * bitfield elements protected by
361 u8 pending_strong_ref:1;
363 u8 pending_weak_ref:1;
367 * invariant after initialization
370 u8 txn_security_ctx:1;
373 bool has_async_transaction;
374 struct list_head async_todo;
377 struct binder_ref_death {
379 * @work: worklist element for death notifications
380 * (protected by inner_lock of the proc that
381 * this ref belongs to)
383 struct binder_work work;
384 binder_uintptr_t cookie;
388 * struct binder_ref_data - binder_ref counts and id
389 * @debug_id: unique ID for the ref
390 * @desc: unique userspace handle for ref
391 * @strong: strong ref count (debugging only if not locked)
392 * @weak: weak ref count (debugging only if not locked)
394 * Structure to hold ref count and ref id information. Since
395 * the actual ref can only be accessed with a lock, this structure
396 * is used to return information about the ref to callers of
397 * ref inc/dec functions.
399 struct binder_ref_data {
407 * struct binder_ref - struct to track references on nodes
408 * @data: binder_ref_data containing id, handle, and current refcounts
409 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
410 * @rb_node_node: node for lookup by @node in proc's rb_tree
411 * @node_entry: list entry for node->refs list in target node
412 * (protected by @node->lock)
413 * @proc: binder_proc containing ref
414 * @node: binder_node of target node. When cleaning up a
415 * ref for deletion in binder_cleanup_ref, a non-NULL
416 * @node indicates the node must be freed
417 * @death: pointer to death notification (ref_death) if requested
418 * (protected by @node->lock)
420 * Structure to track references from procA to target node (on procB). This
421 * structure is unsafe to access without holding @proc->outer_lock.
424 /* Lookups needed: */
425 /* node + proc => ref (transaction) */
426 /* desc + proc => ref (transaction, inc/dec ref) */
427 /* node => refs + procs (proc exit) */
428 struct binder_ref_data data;
429 struct rb_node rb_node_desc;
430 struct rb_node rb_node_node;
431 struct hlist_node node_entry;
432 struct binder_proc *proc;
433 struct binder_node *node;
434 struct binder_ref_death *death;
437 enum binder_deferred_state {
438 BINDER_DEFERRED_FLUSH = 0x01,
439 BINDER_DEFERRED_RELEASE = 0x02,
443 * struct binder_proc - binder process bookkeeping
444 * @proc_node: element for binder_procs list
445 * @threads: rbtree of binder_threads in this proc
446 * (protected by @inner_lock)
447 * @nodes: rbtree of binder nodes associated with
448 * this proc ordered by node->ptr
449 * (protected by @inner_lock)
450 * @refs_by_desc: rbtree of refs ordered by ref->desc
451 * (protected by @outer_lock)
452 * @refs_by_node: rbtree of refs ordered by ref->node
453 * (protected by @outer_lock)
454 * @waiting_threads: threads currently waiting for proc work
455 * (protected by @inner_lock)
456 * @pid PID of group_leader of process
457 * (invariant after initialized)
458 * @tsk task_struct for group_leader of process
459 * (invariant after initialized)
460 * @deferred_work_node: element for binder_deferred_list
461 * (protected by binder_deferred_lock)
462 * @deferred_work: bitmap of deferred work to perform
463 * (protected by binder_deferred_lock)
464 * @is_dead: process is dead and awaiting free
465 * when outstanding transactions are cleaned up
466 * (protected by @inner_lock)
467 * @todo: list of work for this process
468 * (protected by @inner_lock)
469 * @stats: per-process binder statistics
470 * (atomics, no lock needed)
471 * @delivered_death: list of delivered death notification
472 * (protected by @inner_lock)
473 * @max_threads: cap on number of binder threads
474 * (protected by @inner_lock)
475 * @requested_threads: number of binder threads requested but not
476 * yet started. In current implementation, can
478 * (protected by @inner_lock)
479 * @requested_threads_started: number binder threads started
480 * (protected by @inner_lock)
481 * @tmp_ref: temporary reference to indicate proc is in use
482 * (protected by @inner_lock)
483 * @default_priority: default scheduler priority
484 * (invariant after initialized)
485 * @debugfs_entry: debugfs node
486 * @alloc: binder allocator bookkeeping
487 * @context: binder_context for this proc
488 * (invariant after initialized)
489 * @inner_lock: can nest under outer_lock and/or node lock
490 * @outer_lock: no nesting under innor or node lock
491 * Lock order: 1) outer, 2) node, 3) inner
493 * Bookkeeping structure for binder processes
496 struct hlist_node proc_node;
497 struct rb_root threads;
498 struct rb_root nodes;
499 struct rb_root refs_by_desc;
500 struct rb_root refs_by_node;
501 struct list_head waiting_threads;
503 struct task_struct *tsk;
504 struct hlist_node deferred_work_node;
508 struct list_head todo;
509 struct binder_stats stats;
510 struct list_head delivered_death;
512 int requested_threads;
513 int requested_threads_started;
515 long default_priority;
516 struct dentry *debugfs_entry;
517 struct binder_alloc alloc;
518 struct binder_context *context;
519 spinlock_t inner_lock;
520 spinlock_t outer_lock;
524 BINDER_LOOPER_STATE_REGISTERED = 0x01,
525 BINDER_LOOPER_STATE_ENTERED = 0x02,
526 BINDER_LOOPER_STATE_EXITED = 0x04,
527 BINDER_LOOPER_STATE_INVALID = 0x08,
528 BINDER_LOOPER_STATE_WAITING = 0x10,
529 BINDER_LOOPER_STATE_POLL = 0x20,
533 * struct binder_thread - binder thread bookkeeping
534 * @proc: binder process for this thread
535 * (invariant after initialization)
536 * @rb_node: element for proc->threads rbtree
537 * (protected by @proc->inner_lock)
538 * @waiting_thread_node: element for @proc->waiting_threads list
539 * (protected by @proc->inner_lock)
540 * @pid: PID for this thread
541 * (invariant after initialization)
542 * @looper: bitmap of looping state
543 * (only accessed by this thread)
544 * @looper_needs_return: looping thread needs to exit driver
546 * @transaction_stack: stack of in-progress transactions for this thread
547 * (protected by @proc->inner_lock)
548 * @todo: list of work to do for this thread
549 * (protected by @proc->inner_lock)
550 * @process_todo: whether work in @todo should be processed
551 * (protected by @proc->inner_lock)
552 * @return_error: transaction errors reported by this thread
553 * (only accessed by this thread)
554 * @reply_error: transaction errors reported by target thread
555 * (protected by @proc->inner_lock)
556 * @wait: wait queue for thread work
557 * @stats: per-thread statistics
558 * (atomics, no lock needed)
559 * @tmp_ref: temporary reference to indicate thread is in use
560 * (atomic since @proc->inner_lock cannot
561 * always be acquired)
562 * @is_dead: thread is dead and awaiting free
563 * when outstanding transactions are cleaned up
564 * (protected by @proc->inner_lock)
566 * Bookkeeping structure for binder threads.
568 struct binder_thread {
569 struct binder_proc *proc;
570 struct rb_node rb_node;
571 struct list_head waiting_thread_node;
573 int looper; /* only modified by this thread */
574 bool looper_need_return; /* can be written by other thread */
575 struct binder_transaction *transaction_stack;
576 struct list_head todo;
578 struct binder_error return_error;
579 struct binder_error reply_error;
580 wait_queue_head_t wait;
581 struct binder_stats stats;
587 * struct binder_txn_fd_fixup - transaction fd fixup list element
588 * @fixup_entry: list entry
589 * @file: struct file to be associated with new fd
590 * @offset: offset in buffer data to this fixup
592 * List element for fd fixups in a transaction. Since file
593 * descriptors need to be allocated in the context of the
594 * target process, we pass each fd to be processed in this
597 struct binder_txn_fd_fixup {
598 struct list_head fixup_entry;
603 struct binder_transaction {
605 struct binder_work work;
606 struct binder_thread *from;
607 struct binder_transaction *from_parent;
608 struct binder_proc *to_proc;
609 struct binder_thread *to_thread;
610 struct binder_transaction *to_parent;
611 unsigned need_reply:1;
612 /* unsigned is_dead:1; */ /* not used at the moment */
614 struct binder_buffer *buffer;
620 struct list_head fd_fixups;
621 binder_uintptr_t security_ctx;
623 * @lock: protects @from, @to_proc, and @to_thread
625 * @from, @to_proc, and @to_thread can be set to NULL
626 * during thread teardown
632 * struct binder_object - union of flat binder object types
633 * @hdr: generic object header
634 * @fbo: binder object (nodes and refs)
635 * @fdo: file descriptor object
636 * @bbo: binder buffer pointer
637 * @fdao: file descriptor array
639 * Used for type-independent object copies
641 struct binder_object {
643 struct binder_object_header hdr;
644 struct flat_binder_object fbo;
645 struct binder_fd_object fdo;
646 struct binder_buffer_object bbo;
647 struct binder_fd_array_object fdao;
652 * binder_proc_lock() - Acquire outer lock for given binder_proc
653 * @proc: struct binder_proc to acquire
655 * Acquires proc->outer_lock. Used to protect binder_ref
656 * structures associated with the given proc.
658 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
660 _binder_proc_lock(struct binder_proc *proc, int line)
661 __acquires(&proc->outer_lock)
663 binder_debug(BINDER_DEBUG_SPINLOCKS,
664 "%s: line=%d\n", __func__, line);
665 spin_lock(&proc->outer_lock);
669 * binder_proc_unlock() - Release spinlock for given binder_proc
670 * @proc: struct binder_proc to acquire
672 * Release lock acquired via binder_proc_lock()
674 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
676 _binder_proc_unlock(struct binder_proc *proc, int line)
677 __releases(&proc->outer_lock)
679 binder_debug(BINDER_DEBUG_SPINLOCKS,
680 "%s: line=%d\n", __func__, line);
681 spin_unlock(&proc->outer_lock);
685 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
686 * @proc: struct binder_proc to acquire
688 * Acquires proc->inner_lock. Used to protect todo lists
690 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
692 _binder_inner_proc_lock(struct binder_proc *proc, int line)
693 __acquires(&proc->inner_lock)
695 binder_debug(BINDER_DEBUG_SPINLOCKS,
696 "%s: line=%d\n", __func__, line);
697 spin_lock(&proc->inner_lock);
701 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
702 * @proc: struct binder_proc to acquire
704 * Release lock acquired via binder_inner_proc_lock()
706 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
708 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
709 __releases(&proc->inner_lock)
711 binder_debug(BINDER_DEBUG_SPINLOCKS,
712 "%s: line=%d\n", __func__, line);
713 spin_unlock(&proc->inner_lock);
717 * binder_node_lock() - Acquire spinlock for given binder_node
718 * @node: struct binder_node to acquire
720 * Acquires node->lock. Used to protect binder_node fields
722 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
724 _binder_node_lock(struct binder_node *node, int line)
725 __acquires(&node->lock)
727 binder_debug(BINDER_DEBUG_SPINLOCKS,
728 "%s: line=%d\n", __func__, line);
729 spin_lock(&node->lock);
733 * binder_node_unlock() - Release spinlock for given binder_proc
734 * @node: struct binder_node to acquire
736 * Release lock acquired via binder_node_lock()
738 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
740 _binder_node_unlock(struct binder_node *node, int line)
741 __releases(&node->lock)
743 binder_debug(BINDER_DEBUG_SPINLOCKS,
744 "%s: line=%d\n", __func__, line);
745 spin_unlock(&node->lock);
749 * binder_node_inner_lock() - Acquire node and inner locks
750 * @node: struct binder_node to acquire
752 * Acquires node->lock. If node->proc also acquires
753 * proc->inner_lock. Used to protect binder_node fields
755 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
757 _binder_node_inner_lock(struct binder_node *node, int line)
758 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
760 binder_debug(BINDER_DEBUG_SPINLOCKS,
761 "%s: line=%d\n", __func__, line);
762 spin_lock(&node->lock);
764 binder_inner_proc_lock(node->proc);
766 /* annotation for sparse */
767 __acquire(&node->proc->inner_lock);
771 * binder_node_unlock() - Release node and inner locks
772 * @node: struct binder_node to acquire
774 * Release lock acquired via binder_node_lock()
776 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
778 _binder_node_inner_unlock(struct binder_node *node, int line)
779 __releases(&node->lock) __releases(&node->proc->inner_lock)
781 struct binder_proc *proc = node->proc;
783 binder_debug(BINDER_DEBUG_SPINLOCKS,
784 "%s: line=%d\n", __func__, line);
786 binder_inner_proc_unlock(proc);
788 /* annotation for sparse */
789 __release(&node->proc->inner_lock);
790 spin_unlock(&node->lock);
793 static bool binder_worklist_empty_ilocked(struct list_head *list)
795 return list_empty(list);
799 * binder_worklist_empty() - Check if no items on the work list
800 * @proc: binder_proc associated with list
801 * @list: list to check
803 * Return: true if there are no items on list, else false
805 static bool binder_worklist_empty(struct binder_proc *proc,
806 struct list_head *list)
810 binder_inner_proc_lock(proc);
811 ret = binder_worklist_empty_ilocked(list);
812 binder_inner_proc_unlock(proc);
817 * binder_enqueue_work_ilocked() - Add an item to the work list
818 * @work: struct binder_work to add to list
819 * @target_list: list to add work to
821 * Adds the work to the specified list. Asserts that work
822 * is not already on a list.
824 * Requires the proc->inner_lock to be held.
827 binder_enqueue_work_ilocked(struct binder_work *work,
828 struct list_head *target_list)
830 BUG_ON(target_list == NULL);
831 BUG_ON(work->entry.next && !list_empty(&work->entry));
832 list_add_tail(&work->entry, target_list);
836 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
837 * @thread: thread to queue work to
838 * @work: struct binder_work to add to list
840 * Adds the work to the todo list of the thread. Doesn't set the process_todo
841 * flag, which means that (if it wasn't already set) the thread will go to
842 * sleep without handling this work when it calls read.
844 * Requires the proc->inner_lock to be held.
847 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
848 struct binder_work *work)
850 WARN_ON(!list_empty(&thread->waiting_thread_node));
851 binder_enqueue_work_ilocked(work, &thread->todo);
855 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
856 * @thread: thread to queue work to
857 * @work: struct binder_work to add to list
859 * Adds the work to the todo list of the thread, and enables processing
862 * Requires the proc->inner_lock to be held.
865 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
866 struct binder_work *work)
868 WARN_ON(!list_empty(&thread->waiting_thread_node));
869 binder_enqueue_work_ilocked(work, &thread->todo);
870 thread->process_todo = true;
874 * binder_enqueue_thread_work() - Add an item to the thread work list
875 * @thread: thread to queue work to
876 * @work: struct binder_work to add to list
878 * Adds the work to the todo list of the thread, and enables processing
882 binder_enqueue_thread_work(struct binder_thread *thread,
883 struct binder_work *work)
885 binder_inner_proc_lock(thread->proc);
886 binder_enqueue_thread_work_ilocked(thread, work);
887 binder_inner_proc_unlock(thread->proc);
891 binder_dequeue_work_ilocked(struct binder_work *work)
893 list_del_init(&work->entry);
897 * binder_dequeue_work() - Removes an item from the work list
898 * @proc: binder_proc associated with list
899 * @work: struct binder_work to remove from list
901 * Removes the specified work item from whatever list it is on.
902 * Can safely be called if work is not on any list.
905 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
907 binder_inner_proc_lock(proc);
908 binder_dequeue_work_ilocked(work);
909 binder_inner_proc_unlock(proc);
912 static struct binder_work *binder_dequeue_work_head_ilocked(
913 struct list_head *list)
915 struct binder_work *w;
917 w = list_first_entry_or_null(list, struct binder_work, entry);
919 list_del_init(&w->entry);
924 * binder_dequeue_work_head() - Dequeues the item at head of list
925 * @proc: binder_proc associated with list
926 * @list: list to dequeue head
928 * Removes the head of the list if there are items on the list
930 * Return: pointer dequeued binder_work, NULL if list was empty
932 static struct binder_work *binder_dequeue_work_head(
933 struct binder_proc *proc,
934 struct list_head *list)
936 struct binder_work *w;
938 binder_inner_proc_lock(proc);
939 w = binder_dequeue_work_head_ilocked(list);
940 binder_inner_proc_unlock(proc);
945 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
946 static void binder_free_thread(struct binder_thread *thread);
947 static void binder_free_proc(struct binder_proc *proc);
948 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
950 static bool binder_has_work_ilocked(struct binder_thread *thread,
953 return thread->process_todo ||
954 thread->looper_need_return ||
956 !binder_worklist_empty_ilocked(&thread->proc->todo));
959 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
963 binder_inner_proc_lock(thread->proc);
964 has_work = binder_has_work_ilocked(thread, do_proc_work);
965 binder_inner_proc_unlock(thread->proc);
970 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
972 return !thread->transaction_stack &&
973 binder_worklist_empty_ilocked(&thread->todo) &&
974 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
975 BINDER_LOOPER_STATE_REGISTERED));
978 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
982 struct binder_thread *thread;
984 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
985 thread = rb_entry(n, struct binder_thread, rb_node);
986 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
987 binder_available_for_proc_work_ilocked(thread)) {
989 wake_up_interruptible_sync(&thread->wait);
991 wake_up_interruptible(&thread->wait);
997 * binder_select_thread_ilocked() - selects a thread for doing proc work.
998 * @proc: process to select a thread from
1000 * Note that calling this function moves the thread off the waiting_threads
1001 * list, so it can only be woken up by the caller of this function, or a
1002 * signal. Therefore, callers *should* always wake up the thread this function
1005 * Return: If there's a thread currently waiting for process work,
1006 * returns that thread. Otherwise returns NULL.
1008 static struct binder_thread *
1009 binder_select_thread_ilocked(struct binder_proc *proc)
1011 struct binder_thread *thread;
1013 assert_spin_locked(&proc->inner_lock);
1014 thread = list_first_entry_or_null(&proc->waiting_threads,
1015 struct binder_thread,
1016 waiting_thread_node);
1019 list_del_init(&thread->waiting_thread_node);
1025 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1026 * @proc: process to wake up a thread in
1027 * @thread: specific thread to wake-up (may be NULL)
1028 * @sync: whether to do a synchronous wake-up
1030 * This function wakes up a thread in the @proc process.
1031 * The caller may provide a specific thread to wake-up in
1032 * the @thread parameter. If @thread is NULL, this function
1033 * will wake up threads that have called poll().
1035 * Note that for this function to work as expected, callers
1036 * should first call binder_select_thread() to find a thread
1037 * to handle the work (if they don't have a thread already),
1038 * and pass the result into the @thread parameter.
1040 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1041 struct binder_thread *thread,
1044 assert_spin_locked(&proc->inner_lock);
1048 wake_up_interruptible_sync(&thread->wait);
1050 wake_up_interruptible(&thread->wait);
1054 /* Didn't find a thread waiting for proc work; this can happen
1056 * 1. All threads are busy handling transactions
1057 * In that case, one of those threads should call back into
1058 * the kernel driver soon and pick up this work.
1059 * 2. Threads are using the (e)poll interface, in which case
1060 * they may be blocked on the waitqueue without having been
1061 * added to waiting_threads. For this case, we just iterate
1062 * over all threads not handling transaction work, and
1063 * wake them all up. We wake all because we don't know whether
1064 * a thread that called into (e)poll is handling non-binder
1067 binder_wakeup_poll_threads_ilocked(proc, sync);
1070 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1072 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1074 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1077 static void binder_set_nice(long nice)
1081 if (can_nice(current, nice)) {
1082 set_user_nice(current, nice);
1085 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1086 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1087 "%d: nice value %ld not allowed use %ld instead\n",
1088 current->pid, nice, min_nice);
1089 set_user_nice(current, min_nice);
1090 if (min_nice <= MAX_NICE)
1092 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1095 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1096 binder_uintptr_t ptr)
1098 struct rb_node *n = proc->nodes.rb_node;
1099 struct binder_node *node;
1101 assert_spin_locked(&proc->inner_lock);
1104 node = rb_entry(n, struct binder_node, rb_node);
1106 if (ptr < node->ptr)
1108 else if (ptr > node->ptr)
1112 * take an implicit weak reference
1113 * to ensure node stays alive until
1114 * call to binder_put_node()
1116 binder_inc_node_tmpref_ilocked(node);
1123 static struct binder_node *binder_get_node(struct binder_proc *proc,
1124 binder_uintptr_t ptr)
1126 struct binder_node *node;
1128 binder_inner_proc_lock(proc);
1129 node = binder_get_node_ilocked(proc, ptr);
1130 binder_inner_proc_unlock(proc);
1134 static struct binder_node *binder_init_node_ilocked(
1135 struct binder_proc *proc,
1136 struct binder_node *new_node,
1137 struct flat_binder_object *fp)
1139 struct rb_node **p = &proc->nodes.rb_node;
1140 struct rb_node *parent = NULL;
1141 struct binder_node *node;
1142 binder_uintptr_t ptr = fp ? fp->binder : 0;
1143 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1144 __u32 flags = fp ? fp->flags : 0;
1146 assert_spin_locked(&proc->inner_lock);
1151 node = rb_entry(parent, struct binder_node, rb_node);
1153 if (ptr < node->ptr)
1155 else if (ptr > node->ptr)
1156 p = &(*p)->rb_right;
1159 * A matching node is already in
1160 * the rb tree. Abandon the init
1163 binder_inc_node_tmpref_ilocked(node);
1168 binder_stats_created(BINDER_STAT_NODE);
1170 rb_link_node(&node->rb_node, parent, p);
1171 rb_insert_color(&node->rb_node, &proc->nodes);
1172 node->debug_id = atomic_inc_return(&binder_last_id);
1175 node->cookie = cookie;
1176 node->work.type = BINDER_WORK_NODE;
1177 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1178 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1179 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1180 spin_lock_init(&node->lock);
1181 INIT_LIST_HEAD(&node->work.entry);
1182 INIT_LIST_HEAD(&node->async_todo);
1183 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1184 "%d:%d node %d u%016llx c%016llx created\n",
1185 proc->pid, current->pid, node->debug_id,
1186 (u64)node->ptr, (u64)node->cookie);
1191 static struct binder_node *binder_new_node(struct binder_proc *proc,
1192 struct flat_binder_object *fp)
1194 struct binder_node *node;
1195 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1199 binder_inner_proc_lock(proc);
1200 node = binder_init_node_ilocked(proc, new_node, fp);
1201 binder_inner_proc_unlock(proc);
1202 if (node != new_node)
1204 * The node was already added by another thread
1211 static void binder_free_node(struct binder_node *node)
1214 binder_stats_deleted(BINDER_STAT_NODE);
1217 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1219 struct list_head *target_list)
1221 struct binder_proc *proc = node->proc;
1223 assert_spin_locked(&node->lock);
1225 assert_spin_locked(&proc->inner_lock);
1228 if (target_list == NULL &&
1229 node->internal_strong_refs == 0 &&
1231 node == node->proc->context->binder_context_mgr_node &&
1232 node->has_strong_ref)) {
1233 pr_err("invalid inc strong node for %d\n",
1237 node->internal_strong_refs++;
1239 node->local_strong_refs++;
1240 if (!node->has_strong_ref && target_list) {
1241 struct binder_thread *thread = container_of(target_list,
1242 struct binder_thread, todo);
1243 binder_dequeue_work_ilocked(&node->work);
1244 BUG_ON(&thread->todo != target_list);
1245 binder_enqueue_deferred_thread_work_ilocked(thread,
1250 node->local_weak_refs++;
1251 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1252 if (target_list == NULL) {
1253 pr_err("invalid inc weak node for %d\n",
1260 binder_enqueue_work_ilocked(&node->work, target_list);
1266 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1267 struct list_head *target_list)
1271 binder_node_inner_lock(node);
1272 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1273 binder_node_inner_unlock(node);
1278 static bool binder_dec_node_nilocked(struct binder_node *node,
1279 int strong, int internal)
1281 struct binder_proc *proc = node->proc;
1283 assert_spin_locked(&node->lock);
1285 assert_spin_locked(&proc->inner_lock);
1288 node->internal_strong_refs--;
1290 node->local_strong_refs--;
1291 if (node->local_strong_refs || node->internal_strong_refs)
1295 node->local_weak_refs--;
1296 if (node->local_weak_refs || node->tmp_refs ||
1297 !hlist_empty(&node->refs))
1301 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1302 if (list_empty(&node->work.entry)) {
1303 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1304 binder_wakeup_proc_ilocked(proc);
1307 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1308 !node->local_weak_refs && !node->tmp_refs) {
1310 binder_dequeue_work_ilocked(&node->work);
1311 rb_erase(&node->rb_node, &proc->nodes);
1312 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1313 "refless node %d deleted\n",
1316 BUG_ON(!list_empty(&node->work.entry));
1317 spin_lock(&binder_dead_nodes_lock);
1319 * tmp_refs could have changed so
1322 if (node->tmp_refs) {
1323 spin_unlock(&binder_dead_nodes_lock);
1326 hlist_del(&node->dead_node);
1327 spin_unlock(&binder_dead_nodes_lock);
1328 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1329 "dead node %d deleted\n",
1338 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1342 binder_node_inner_lock(node);
1343 free_node = binder_dec_node_nilocked(node, strong, internal);
1344 binder_node_inner_unlock(node);
1346 binder_free_node(node);
1349 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1352 * No call to binder_inc_node() is needed since we
1353 * don't need to inform userspace of any changes to
1360 * binder_inc_node_tmpref() - take a temporary reference on node
1361 * @node: node to reference
1363 * Take reference on node to prevent the node from being freed
1364 * while referenced only by a local variable. The inner lock is
1365 * needed to serialize with the node work on the queue (which
1366 * isn't needed after the node is dead). If the node is dead
1367 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1368 * node->tmp_refs against dead-node-only cases where the node
1369 * lock cannot be acquired (eg traversing the dead node list to
1372 static void binder_inc_node_tmpref(struct binder_node *node)
1374 binder_node_lock(node);
1376 binder_inner_proc_lock(node->proc);
1378 spin_lock(&binder_dead_nodes_lock);
1379 binder_inc_node_tmpref_ilocked(node);
1381 binder_inner_proc_unlock(node->proc);
1383 spin_unlock(&binder_dead_nodes_lock);
1384 binder_node_unlock(node);
1388 * binder_dec_node_tmpref() - remove a temporary reference on node
1389 * @node: node to reference
1391 * Release temporary reference on node taken via binder_inc_node_tmpref()
1393 static void binder_dec_node_tmpref(struct binder_node *node)
1397 binder_node_inner_lock(node);
1399 spin_lock(&binder_dead_nodes_lock);
1401 __acquire(&binder_dead_nodes_lock);
1403 BUG_ON(node->tmp_refs < 0);
1405 spin_unlock(&binder_dead_nodes_lock);
1407 __release(&binder_dead_nodes_lock);
1409 * Call binder_dec_node() to check if all refcounts are 0
1410 * and cleanup is needed. Calling with strong=0 and internal=1
1411 * causes no actual reference to be released in binder_dec_node().
1412 * If that changes, a change is needed here too.
1414 free_node = binder_dec_node_nilocked(node, 0, 1);
1415 binder_node_inner_unlock(node);
1417 binder_free_node(node);
1420 static void binder_put_node(struct binder_node *node)
1422 binder_dec_node_tmpref(node);
1425 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1426 u32 desc, bool need_strong_ref)
1428 struct rb_node *n = proc->refs_by_desc.rb_node;
1429 struct binder_ref *ref;
1432 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1434 if (desc < ref->data.desc) {
1436 } else if (desc > ref->data.desc) {
1438 } else if (need_strong_ref && !ref->data.strong) {
1439 binder_user_error("tried to use weak ref as strong ref\n");
1449 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1450 * @proc: binder_proc that owns the ref
1451 * @node: binder_node of target
1452 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1454 * Look up the ref for the given node and return it if it exists
1456 * If it doesn't exist and the caller provides a newly allocated
1457 * ref, initialize the fields of the newly allocated ref and insert
1458 * into the given proc rb_trees and node refs list.
1460 * Return: the ref for node. It is possible that another thread
1461 * allocated/initialized the ref first in which case the
1462 * returned ref would be different than the passed-in
1463 * new_ref. new_ref must be kfree'd by the caller in
1466 static struct binder_ref *binder_get_ref_for_node_olocked(
1467 struct binder_proc *proc,
1468 struct binder_node *node,
1469 struct binder_ref *new_ref)
1471 struct binder_context *context = proc->context;
1472 struct rb_node **p = &proc->refs_by_node.rb_node;
1473 struct rb_node *parent = NULL;
1474 struct binder_ref *ref;
1479 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1481 if (node < ref->node)
1483 else if (node > ref->node)
1484 p = &(*p)->rb_right;
1491 binder_stats_created(BINDER_STAT_REF);
1492 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1493 new_ref->proc = proc;
1494 new_ref->node = node;
1495 rb_link_node(&new_ref->rb_node_node, parent, p);
1496 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1498 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1499 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1500 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1501 if (ref->data.desc > new_ref->data.desc)
1503 new_ref->data.desc = ref->data.desc + 1;
1506 p = &proc->refs_by_desc.rb_node;
1509 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1511 if (new_ref->data.desc < ref->data.desc)
1513 else if (new_ref->data.desc > ref->data.desc)
1514 p = &(*p)->rb_right;
1518 rb_link_node(&new_ref->rb_node_desc, parent, p);
1519 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1521 binder_node_lock(node);
1522 hlist_add_head(&new_ref->node_entry, &node->refs);
1524 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1525 "%d new ref %d desc %d for node %d\n",
1526 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1528 binder_node_unlock(node);
1532 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1534 bool delete_node = false;
1536 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1537 "%d delete ref %d desc %d for node %d\n",
1538 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1539 ref->node->debug_id);
1541 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1542 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1544 binder_node_inner_lock(ref->node);
1545 if (ref->data.strong)
1546 binder_dec_node_nilocked(ref->node, 1, 1);
1548 hlist_del(&ref->node_entry);
1549 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1550 binder_node_inner_unlock(ref->node);
1552 * Clear ref->node unless we want the caller to free the node
1556 * The caller uses ref->node to determine
1557 * whether the node needs to be freed. Clear
1558 * it since the node is still alive.
1564 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1565 "%d delete ref %d desc %d has death notification\n",
1566 ref->proc->pid, ref->data.debug_id,
1568 binder_dequeue_work(ref->proc, &ref->death->work);
1569 binder_stats_deleted(BINDER_STAT_DEATH);
1571 binder_stats_deleted(BINDER_STAT_REF);
1575 * binder_inc_ref_olocked() - increment the ref for given handle
1576 * @ref: ref to be incremented
1577 * @strong: if true, strong increment, else weak
1578 * @target_list: list to queue node work on
1580 * Increment the ref. @ref->proc->outer_lock must be held on entry
1582 * Return: 0, if successful, else errno
1584 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1585 struct list_head *target_list)
1590 if (ref->data.strong == 0) {
1591 ret = binder_inc_node(ref->node, 1, 1, target_list);
1597 if (ref->data.weak == 0) {
1598 ret = binder_inc_node(ref->node, 0, 1, target_list);
1608 * binder_dec_ref() - dec the ref for given handle
1609 * @ref: ref to be decremented
1610 * @strong: if true, strong decrement, else weak
1612 * Decrement the ref.
1614 * Return: true if ref is cleaned up and ready to be freed
1616 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1619 if (ref->data.strong == 0) {
1620 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1621 ref->proc->pid, ref->data.debug_id,
1622 ref->data.desc, ref->data.strong,
1627 if (ref->data.strong == 0)
1628 binder_dec_node(ref->node, strong, 1);
1630 if (ref->data.weak == 0) {
1631 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1632 ref->proc->pid, ref->data.debug_id,
1633 ref->data.desc, ref->data.strong,
1639 if (ref->data.strong == 0 && ref->data.weak == 0) {
1640 binder_cleanup_ref_olocked(ref);
1647 * binder_get_node_from_ref() - get the node from the given proc/desc
1648 * @proc: proc containing the ref
1649 * @desc: the handle associated with the ref
1650 * @need_strong_ref: if true, only return node if ref is strong
1651 * @rdata: the id/refcount data for the ref
1653 * Given a proc and ref handle, return the associated binder_node
1655 * Return: a binder_node or NULL if not found or not strong when strong required
1657 static struct binder_node *binder_get_node_from_ref(
1658 struct binder_proc *proc,
1659 u32 desc, bool need_strong_ref,
1660 struct binder_ref_data *rdata)
1662 struct binder_node *node;
1663 struct binder_ref *ref;
1665 binder_proc_lock(proc);
1666 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1671 * Take an implicit reference on the node to ensure
1672 * it stays alive until the call to binder_put_node()
1674 binder_inc_node_tmpref(node);
1677 binder_proc_unlock(proc);
1682 binder_proc_unlock(proc);
1687 * binder_free_ref() - free the binder_ref
1690 * Free the binder_ref. Free the binder_node indicated by ref->node
1691 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1693 static void binder_free_ref(struct binder_ref *ref)
1696 binder_free_node(ref->node);
1702 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1703 * @proc: proc containing the ref
1704 * @desc: the handle associated with the ref
1705 * @increment: true=inc reference, false=dec reference
1706 * @strong: true=strong reference, false=weak reference
1707 * @rdata: the id/refcount data for the ref
1709 * Given a proc and ref handle, increment or decrement the ref
1710 * according to "increment" arg.
1712 * Return: 0 if successful, else errno
1714 static int binder_update_ref_for_handle(struct binder_proc *proc,
1715 uint32_t desc, bool increment, bool strong,
1716 struct binder_ref_data *rdata)
1719 struct binder_ref *ref;
1720 bool delete_ref = false;
1722 binder_proc_lock(proc);
1723 ref = binder_get_ref_olocked(proc, desc, strong);
1729 ret = binder_inc_ref_olocked(ref, strong, NULL);
1731 delete_ref = binder_dec_ref_olocked(ref, strong);
1735 binder_proc_unlock(proc);
1738 binder_free_ref(ref);
1742 binder_proc_unlock(proc);
1747 * binder_dec_ref_for_handle() - dec the ref for given handle
1748 * @proc: proc containing the ref
1749 * @desc: the handle associated with the ref
1750 * @strong: true=strong reference, false=weak reference
1751 * @rdata: the id/refcount data for the ref
1753 * Just calls binder_update_ref_for_handle() to decrement the ref.
1755 * Return: 0 if successful, else errno
1757 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1758 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1760 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1765 * binder_inc_ref_for_node() - increment the ref for given proc/node
1766 * @proc: proc containing the ref
1767 * @node: target node
1768 * @strong: true=strong reference, false=weak reference
1769 * @target_list: worklist to use if node is incremented
1770 * @rdata: the id/refcount data for the ref
1772 * Given a proc and node, increment the ref. Create the ref if it
1773 * doesn't already exist
1775 * Return: 0 if successful, else errno
1777 static int binder_inc_ref_for_node(struct binder_proc *proc,
1778 struct binder_node *node,
1780 struct list_head *target_list,
1781 struct binder_ref_data *rdata)
1783 struct binder_ref *ref;
1784 struct binder_ref *new_ref = NULL;
1787 binder_proc_lock(proc);
1788 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1790 binder_proc_unlock(proc);
1791 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1794 binder_proc_lock(proc);
1795 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1797 ret = binder_inc_ref_olocked(ref, strong, target_list);
1799 binder_proc_unlock(proc);
1800 if (new_ref && ref != new_ref)
1802 * Another thread created the ref first so
1803 * free the one we allocated
1809 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1810 struct binder_transaction *t)
1812 BUG_ON(!target_thread);
1813 assert_spin_locked(&target_thread->proc->inner_lock);
1814 BUG_ON(target_thread->transaction_stack != t);
1815 BUG_ON(target_thread->transaction_stack->from != target_thread);
1816 target_thread->transaction_stack =
1817 target_thread->transaction_stack->from_parent;
1822 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1823 * @thread: thread to decrement
1825 * A thread needs to be kept alive while being used to create or
1826 * handle a transaction. binder_get_txn_from() is used to safely
1827 * extract t->from from a binder_transaction and keep the thread
1828 * indicated by t->from from being freed. When done with that
1829 * binder_thread, this function is called to decrement the
1830 * tmp_ref and free if appropriate (thread has been released
1831 * and no transaction being processed by the driver)
1833 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1836 * atomic is used to protect the counter value while
1837 * it cannot reach zero or thread->is_dead is false
1839 binder_inner_proc_lock(thread->proc);
1840 atomic_dec(&thread->tmp_ref);
1841 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1842 binder_inner_proc_unlock(thread->proc);
1843 binder_free_thread(thread);
1846 binder_inner_proc_unlock(thread->proc);
1850 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1851 * @proc: proc to decrement
1853 * A binder_proc needs to be kept alive while being used to create or
1854 * handle a transaction. proc->tmp_ref is incremented when
1855 * creating a new transaction or the binder_proc is currently in-use
1856 * by threads that are being released. When done with the binder_proc,
1857 * this function is called to decrement the counter and free the
1858 * proc if appropriate (proc has been released, all threads have
1859 * been released and not currenly in-use to process a transaction).
1861 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1863 binder_inner_proc_lock(proc);
1865 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1867 binder_inner_proc_unlock(proc);
1868 binder_free_proc(proc);
1871 binder_inner_proc_unlock(proc);
1875 * binder_get_txn_from() - safely extract the "from" thread in transaction
1876 * @t: binder transaction for t->from
1878 * Atomically return the "from" thread and increment the tmp_ref
1879 * count for the thread to ensure it stays alive until
1880 * binder_thread_dec_tmpref() is called.
1882 * Return: the value of t->from
1884 static struct binder_thread *binder_get_txn_from(
1885 struct binder_transaction *t)
1887 struct binder_thread *from;
1889 spin_lock(&t->lock);
1892 atomic_inc(&from->tmp_ref);
1893 spin_unlock(&t->lock);
1898 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1899 * @t: binder transaction for t->from
1901 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1902 * to guarantee that the thread cannot be released while operating on it.
1903 * The caller must call binder_inner_proc_unlock() to release the inner lock
1904 * as well as call binder_dec_thread_txn() to release the reference.
1906 * Return: the value of t->from
1908 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1909 struct binder_transaction *t)
1910 __acquires(&t->from->proc->inner_lock)
1912 struct binder_thread *from;
1914 from = binder_get_txn_from(t);
1916 __acquire(&from->proc->inner_lock);
1919 binder_inner_proc_lock(from->proc);
1921 BUG_ON(from != t->from);
1924 binder_inner_proc_unlock(from->proc);
1925 __acquire(&from->proc->inner_lock);
1926 binder_thread_dec_tmpref(from);
1931 * binder_free_txn_fixups() - free unprocessed fd fixups
1932 * @t: binder transaction for t->from
1934 * If the transaction is being torn down prior to being
1935 * processed by the target process, free all of the
1936 * fd fixups and fput the file structs. It is safe to
1937 * call this function after the fixups have been
1938 * processed -- in that case, the list will be empty.
1940 static void binder_free_txn_fixups(struct binder_transaction *t)
1942 struct binder_txn_fd_fixup *fixup, *tmp;
1944 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1946 list_del(&fixup->fixup_entry);
1951 static void binder_free_transaction(struct binder_transaction *t)
1954 t->buffer->transaction = NULL;
1955 binder_free_txn_fixups(t);
1957 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1960 static void binder_send_failed_reply(struct binder_transaction *t,
1961 uint32_t error_code)
1963 struct binder_thread *target_thread;
1964 struct binder_transaction *next;
1966 BUG_ON(t->flags & TF_ONE_WAY);
1968 target_thread = binder_get_txn_from_and_acq_inner(t);
1969 if (target_thread) {
1970 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1971 "send failed reply for transaction %d to %d:%d\n",
1973 target_thread->proc->pid,
1974 target_thread->pid);
1976 binder_pop_transaction_ilocked(target_thread, t);
1977 if (target_thread->reply_error.cmd == BR_OK) {
1978 target_thread->reply_error.cmd = error_code;
1979 binder_enqueue_thread_work_ilocked(
1981 &target_thread->reply_error.work);
1982 wake_up_interruptible(&target_thread->wait);
1985 * Cannot get here for normal operation, but
1986 * we can if multiple synchronous transactions
1987 * are sent without blocking for responses.
1988 * Just ignore the 2nd error in this case.
1990 pr_warn("Unexpected reply error: %u\n",
1991 target_thread->reply_error.cmd);
1993 binder_inner_proc_unlock(target_thread->proc);
1994 binder_thread_dec_tmpref(target_thread);
1995 binder_free_transaction(t);
1998 __release(&target_thread->proc->inner_lock);
2000 next = t->from_parent;
2002 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2003 "send failed reply for transaction %d, target dead\n",
2006 binder_free_transaction(t);
2008 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2009 "reply failed, no target thread at root\n");
2013 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2014 "reply failed, no target thread -- retry %d\n",
2020 * binder_cleanup_transaction() - cleans up undelivered transaction
2021 * @t: transaction that needs to be cleaned up
2022 * @reason: reason the transaction wasn't delivered
2023 * @error_code: error to return to caller (if synchronous call)
2025 static void binder_cleanup_transaction(struct binder_transaction *t,
2027 uint32_t error_code)
2029 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2030 binder_send_failed_reply(t, error_code);
2032 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2033 "undelivered transaction %d, %s\n",
2034 t->debug_id, reason);
2035 binder_free_transaction(t);
2040 * binder_get_object() - gets object and checks for valid metadata
2041 * @proc: binder_proc owning the buffer
2042 * @buffer: binder_buffer that we're parsing.
2043 * @offset: offset in the @buffer at which to validate an object.
2044 * @object: struct binder_object to read into
2046 * Return: If there's a valid metadata object at @offset in @buffer, the
2047 * size of that object. Otherwise, it returns zero. The object
2048 * is read into the struct binder_object pointed to by @object.
2050 static size_t binder_get_object(struct binder_proc *proc,
2051 struct binder_buffer *buffer,
2052 unsigned long offset,
2053 struct binder_object *object)
2056 struct binder_object_header *hdr;
2057 size_t object_size = 0;
2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2060 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2061 !IS_ALIGNED(offset, sizeof(u32)))
2063 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2066 /* Ok, now see if we read a complete object. */
2068 switch (hdr->type) {
2069 case BINDER_TYPE_BINDER:
2070 case BINDER_TYPE_WEAK_BINDER:
2071 case BINDER_TYPE_HANDLE:
2072 case BINDER_TYPE_WEAK_HANDLE:
2073 object_size = sizeof(struct flat_binder_object);
2075 case BINDER_TYPE_FD:
2076 object_size = sizeof(struct binder_fd_object);
2078 case BINDER_TYPE_PTR:
2079 object_size = sizeof(struct binder_buffer_object);
2081 case BINDER_TYPE_FDA:
2082 object_size = sizeof(struct binder_fd_array_object);
2087 if (offset <= buffer->data_size - object_size &&
2088 buffer->data_size >= object_size)
2095 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2096 * @proc: binder_proc owning the buffer
2097 * @b: binder_buffer containing the object
2098 * @object: struct binder_object to read into
2099 * @index: index in offset array at which the binder_buffer_object is
2101 * @start_offset: points to the start of the offset array
2102 * @object_offsetp: offset of @object read from @b
2103 * @num_valid: the number of valid offsets in the offset array
2105 * Return: If @index is within the valid range of the offset array
2106 * described by @start and @num_valid, and if there's a valid
2107 * binder_buffer_object at the offset found in index @index
2108 * of the offset array, that object is returned. Otherwise,
2109 * %NULL is returned.
2110 * Note that the offset found in index @index itself is not
2111 * verified; this function assumes that @num_valid elements
2112 * from @start were previously verified to have valid offsets.
2113 * If @object_offsetp is non-NULL, then the offset within
2114 * @b is written to it.
2116 static struct binder_buffer_object *binder_validate_ptr(
2117 struct binder_proc *proc,
2118 struct binder_buffer *b,
2119 struct binder_object *object,
2120 binder_size_t index,
2121 binder_size_t start_offset,
2122 binder_size_t *object_offsetp,
2123 binder_size_t num_valid)
2126 binder_size_t object_offset;
2127 unsigned long buffer_offset;
2129 if (index >= num_valid)
2132 buffer_offset = start_offset + sizeof(binder_size_t) * index;
2133 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2134 b, buffer_offset, sizeof(object_offset));
2135 object_size = binder_get_object(proc, b, object_offset, object);
2136 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2139 *object_offsetp = object_offset;
2141 return &object->bbo;
2145 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2146 * @proc: binder_proc owning the buffer
2147 * @b: transaction buffer
2148 * @objects_start_offset: offset to start of objects buffer
2149 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
2150 * @fixup_offset: start offset in @buffer to fix up
2151 * @last_obj_offset: offset to last binder_buffer_object that we fixed
2152 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
2154 * Return: %true if a fixup in buffer @buffer at offset @offset is
2157 * For safety reasons, we only allow fixups inside a buffer to happen
2158 * at increasing offsets; additionally, we only allow fixup on the last
2159 * buffer object that was verified, or one of its parents.
2161 * Example of what is allowed:
2164 * B (parent = A, offset = 0)
2165 * C (parent = A, offset = 16)
2166 * D (parent = C, offset = 0)
2167 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2169 * Examples of what is not allowed:
2171 * Decreasing offsets within the same parent:
2173 * C (parent = A, offset = 16)
2174 * B (parent = A, offset = 0) // decreasing offset within A
2176 * Referring to a parent that wasn't the last object or any of its parents:
2178 * B (parent = A, offset = 0)
2179 * C (parent = A, offset = 0)
2180 * C (parent = A, offset = 16)
2181 * D (parent = B, offset = 0) // B is not A or any of A's parents
2183 static bool binder_validate_fixup(struct binder_proc *proc,
2184 struct binder_buffer *b,
2185 binder_size_t objects_start_offset,
2186 binder_size_t buffer_obj_offset,
2187 binder_size_t fixup_offset,
2188 binder_size_t last_obj_offset,
2189 binder_size_t last_min_offset)
2191 if (!last_obj_offset) {
2192 /* Nothing to fix up in */
2196 while (last_obj_offset != buffer_obj_offset) {
2197 unsigned long buffer_offset;
2198 struct binder_object last_object;
2199 struct binder_buffer_object *last_bbo;
2200 size_t object_size = binder_get_object(proc, b, last_obj_offset,
2202 if (object_size != sizeof(*last_bbo))
2205 last_bbo = &last_object.bbo;
2207 * Safe to retrieve the parent of last_obj, since it
2208 * was already previously verified by the driver.
2210 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2212 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2213 buffer_offset = objects_start_offset +
2214 sizeof(binder_size_t) * last_bbo->parent,
2215 binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
2217 sizeof(last_obj_offset));
2219 return (fixup_offset >= last_min_offset);
2223 * struct binder_task_work_cb - for deferred close
2225 * @twork: callback_head for task work
2228 * Structure to pass task work to be handled after
2229 * returning from binder_ioctl() via task_work_add().
2231 struct binder_task_work_cb {
2232 struct callback_head twork;
2237 * binder_do_fd_close() - close list of file descriptors
2238 * @twork: callback head for task work
2240 * It is not safe to call ksys_close() during the binder_ioctl()
2241 * function if there is a chance that binder's own file descriptor
2242 * might be closed. This is to meet the requirements for using
2243 * fdget() (see comments for __fget_light()). Therefore use
2244 * task_work_add() to schedule the close operation once we have
2245 * returned from binder_ioctl(). This function is a callback
2246 * for that mechanism and does the actual ksys_close() on the
2247 * given file descriptor.
2249 static void binder_do_fd_close(struct callback_head *twork)
2251 struct binder_task_work_cb *twcb = container_of(twork,
2252 struct binder_task_work_cb, twork);
2259 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2260 * @fd: file-descriptor to close
2262 * See comments in binder_do_fd_close(). This function is used to schedule
2263 * a file-descriptor to be closed after returning from binder_ioctl().
2265 static void binder_deferred_fd_close(int fd)
2267 struct binder_task_work_cb *twcb;
2269 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2272 init_task_work(&twcb->twork, binder_do_fd_close);
2273 __close_fd_get_file(fd, &twcb->file);
2275 task_work_add(current, &twcb->twork, true);
2280 static void binder_transaction_buffer_release(struct binder_proc *proc,
2281 struct binder_buffer *buffer,
2282 binder_size_t failed_at,
2285 int debug_id = buffer->debug_id;
2286 binder_size_t off_start_offset, buffer_offset, off_end_offset;
2288 binder_debug(BINDER_DEBUG_TRANSACTION,
2289 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2290 proc->pid, buffer->debug_id,
2291 buffer->data_size, buffer->offsets_size,
2292 (unsigned long long)failed_at);
2294 if (buffer->target_node)
2295 binder_dec_node(buffer->target_node, 1, 0);
2297 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2298 off_end_offset = is_failure ? failed_at :
2299 off_start_offset + buffer->offsets_size;
2300 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2301 buffer_offset += sizeof(binder_size_t)) {
2302 struct binder_object_header *hdr;
2304 struct binder_object object;
2305 binder_size_t object_offset;
2307 binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2308 buffer, buffer_offset,
2309 sizeof(object_offset));
2310 object_size = binder_get_object(proc, buffer,
2311 object_offset, &object);
2312 if (object_size == 0) {
2313 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2314 debug_id, (u64)object_offset, buffer->data_size);
2318 switch (hdr->type) {
2319 case BINDER_TYPE_BINDER:
2320 case BINDER_TYPE_WEAK_BINDER: {
2321 struct flat_binder_object *fp;
2322 struct binder_node *node;
2324 fp = to_flat_binder_object(hdr);
2325 node = binder_get_node(proc, fp->binder);
2327 pr_err("transaction release %d bad node %016llx\n",
2328 debug_id, (u64)fp->binder);
2331 binder_debug(BINDER_DEBUG_TRANSACTION,
2332 " node %d u%016llx\n",
2333 node->debug_id, (u64)node->ptr);
2334 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2336 binder_put_node(node);
2338 case BINDER_TYPE_HANDLE:
2339 case BINDER_TYPE_WEAK_HANDLE: {
2340 struct flat_binder_object *fp;
2341 struct binder_ref_data rdata;
2344 fp = to_flat_binder_object(hdr);
2345 ret = binder_dec_ref_for_handle(proc, fp->handle,
2346 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2349 pr_err("transaction release %d bad handle %d, ret = %d\n",
2350 debug_id, fp->handle, ret);
2353 binder_debug(BINDER_DEBUG_TRANSACTION,
2354 " ref %d desc %d\n",
2355 rdata.debug_id, rdata.desc);
2358 case BINDER_TYPE_FD: {
2360 * No need to close the file here since user-space
2361 * closes it for for successfully delivered
2362 * transactions. For transactions that weren't
2363 * delivered, the new fd was never allocated so
2364 * there is no need to close and the fput on the
2365 * file is done when the transaction is torn
2368 WARN_ON(failed_at &&
2369 proc->tsk == current->group_leader);
2371 case BINDER_TYPE_PTR:
2373 * Nothing to do here, this will get cleaned up when the
2374 * transaction buffer gets freed
2377 case BINDER_TYPE_FDA: {
2378 struct binder_fd_array_object *fda;
2379 struct binder_buffer_object *parent;
2380 struct binder_object ptr_object;
2381 binder_size_t fda_offset;
2383 binder_size_t fd_buf_size;
2384 binder_size_t num_valid;
2386 if (proc->tsk != current->group_leader) {
2388 * Nothing to do if running in sender context
2389 * The fd fixups have not been applied so no
2390 * fds need to be closed.
2395 num_valid = (buffer_offset - off_start_offset) /
2396 sizeof(binder_size_t);
2397 fda = to_binder_fd_array_object(hdr);
2398 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2404 pr_err("transaction release %d bad parent offset\n",
2408 fd_buf_size = sizeof(u32) * fda->num_fds;
2409 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2410 pr_err("transaction release %d invalid number of fds (%lld)\n",
2411 debug_id, (u64)fda->num_fds);
2414 if (fd_buf_size > parent->length ||
2415 fda->parent_offset > parent->length - fd_buf_size) {
2416 /* No space for all file descriptors here. */
2417 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2418 debug_id, (u64)fda->num_fds);
2422 * the source data for binder_buffer_object is visible
2423 * to user-space and the @buffer element is the user
2424 * pointer to the buffer_object containing the fd_array.
2425 * Convert the address to an offset relative to
2426 * the base of the transaction buffer.
2429 (parent->buffer - (uintptr_t)buffer->user_data) +
2431 for (fd_index = 0; fd_index < fda->num_fds;
2434 binder_size_t offset = fda_offset +
2435 fd_index * sizeof(fd);
2437 binder_alloc_copy_from_buffer(&proc->alloc,
2442 binder_deferred_fd_close(fd);
2446 pr_err("transaction release %d bad object type %x\n",
2447 debug_id, hdr->type);
2453 static int binder_translate_binder(struct flat_binder_object *fp,
2454 struct binder_transaction *t,
2455 struct binder_thread *thread)
2457 struct binder_node *node;
2458 struct binder_proc *proc = thread->proc;
2459 struct binder_proc *target_proc = t->to_proc;
2460 struct binder_ref_data rdata;
2463 node = binder_get_node(proc, fp->binder);
2465 node = binder_new_node(proc, fp);
2469 if (fp->cookie != node->cookie) {
2470 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2471 proc->pid, thread->pid, (u64)fp->binder,
2472 node->debug_id, (u64)fp->cookie,
2477 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2482 ret = binder_inc_ref_for_node(target_proc, node,
2483 fp->hdr.type == BINDER_TYPE_BINDER,
2484 &thread->todo, &rdata);
2488 if (fp->hdr.type == BINDER_TYPE_BINDER)
2489 fp->hdr.type = BINDER_TYPE_HANDLE;
2491 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2493 fp->handle = rdata.desc;
2496 trace_binder_transaction_node_to_ref(t, node, &rdata);
2497 binder_debug(BINDER_DEBUG_TRANSACTION,
2498 " node %d u%016llx -> ref %d desc %d\n",
2499 node->debug_id, (u64)node->ptr,
2500 rdata.debug_id, rdata.desc);
2502 binder_put_node(node);
2506 static int binder_translate_handle(struct flat_binder_object *fp,
2507 struct binder_transaction *t,
2508 struct binder_thread *thread)
2510 struct binder_proc *proc = thread->proc;
2511 struct binder_proc *target_proc = t->to_proc;
2512 struct binder_node *node;
2513 struct binder_ref_data src_rdata;
2516 node = binder_get_node_from_ref(proc, fp->handle,
2517 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2519 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2520 proc->pid, thread->pid, fp->handle);
2523 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2528 binder_node_lock(node);
2529 if (node->proc == target_proc) {
2530 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2531 fp->hdr.type = BINDER_TYPE_BINDER;
2533 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2534 fp->binder = node->ptr;
2535 fp->cookie = node->cookie;
2537 binder_inner_proc_lock(node->proc);
2539 __acquire(&node->proc->inner_lock);
2540 binder_inc_node_nilocked(node,
2541 fp->hdr.type == BINDER_TYPE_BINDER,
2544 binder_inner_proc_unlock(node->proc);
2546 __release(&node->proc->inner_lock);
2547 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2548 binder_debug(BINDER_DEBUG_TRANSACTION,
2549 " ref %d desc %d -> node %d u%016llx\n",
2550 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2552 binder_node_unlock(node);
2554 struct binder_ref_data dest_rdata;
2556 binder_node_unlock(node);
2557 ret = binder_inc_ref_for_node(target_proc, node,
2558 fp->hdr.type == BINDER_TYPE_HANDLE,
2564 fp->handle = dest_rdata.desc;
2566 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2568 binder_debug(BINDER_DEBUG_TRANSACTION,
2569 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2570 src_rdata.debug_id, src_rdata.desc,
2571 dest_rdata.debug_id, dest_rdata.desc,
2575 binder_put_node(node);
2579 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2580 struct binder_transaction *t,
2581 struct binder_thread *thread,
2582 struct binder_transaction *in_reply_to)
2584 struct binder_proc *proc = thread->proc;
2585 struct binder_proc *target_proc = t->to_proc;
2586 struct binder_txn_fd_fixup *fixup;
2589 bool target_allows_fd;
2592 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2594 target_allows_fd = t->buffer->target_node->accept_fds;
2595 if (!target_allows_fd) {
2596 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2597 proc->pid, thread->pid,
2598 in_reply_to ? "reply" : "transaction",
2601 goto err_fd_not_accepted;
2606 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2607 proc->pid, thread->pid, fd);
2611 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2618 * Add fixup record for this transaction. The allocation
2619 * of the fd in the target needs to be done from a
2622 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2628 fixup->offset = fd_offset;
2629 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2630 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2638 err_fd_not_accepted:
2642 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2643 struct binder_buffer_object *parent,
2644 struct binder_transaction *t,
2645 struct binder_thread *thread,
2646 struct binder_transaction *in_reply_to)
2648 binder_size_t fdi, fd_buf_size;
2649 binder_size_t fda_offset;
2650 struct binder_proc *proc = thread->proc;
2651 struct binder_proc *target_proc = t->to_proc;
2653 fd_buf_size = sizeof(u32) * fda->num_fds;
2654 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2655 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2656 proc->pid, thread->pid, (u64)fda->num_fds);
2659 if (fd_buf_size > parent->length ||
2660 fda->parent_offset > parent->length - fd_buf_size) {
2661 /* No space for all file descriptors here. */
2662 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2663 proc->pid, thread->pid, (u64)fda->num_fds);
2667 * the source data for binder_buffer_object is visible
2668 * to user-space and the @buffer element is the user
2669 * pointer to the buffer_object containing the fd_array.
2670 * Convert the address to an offset relative to
2671 * the base of the transaction buffer.
2673 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2675 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2676 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2677 proc->pid, thread->pid);
2680 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2683 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2685 binder_alloc_copy_from_buffer(&target_proc->alloc,
2687 offset, sizeof(fd));
2688 ret = binder_translate_fd(fd, offset, t, thread,
2696 static int binder_fixup_parent(struct binder_transaction *t,
2697 struct binder_thread *thread,
2698 struct binder_buffer_object *bp,
2699 binder_size_t off_start_offset,
2700 binder_size_t num_valid,
2701 binder_size_t last_fixup_obj_off,
2702 binder_size_t last_fixup_min_off)
2704 struct binder_buffer_object *parent;
2705 struct binder_buffer *b = t->buffer;
2706 struct binder_proc *proc = thread->proc;
2707 struct binder_proc *target_proc = t->to_proc;
2708 struct binder_object object;
2709 binder_size_t buffer_offset;
2710 binder_size_t parent_offset;
2712 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2715 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2716 off_start_offset, &parent_offset,
2719 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2720 proc->pid, thread->pid);
2724 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2725 parent_offset, bp->parent_offset,
2727 last_fixup_min_off)) {
2728 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2729 proc->pid, thread->pid);
2733 if (parent->length < sizeof(binder_uintptr_t) ||
2734 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2735 /* No space for a pointer here! */
2736 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2737 proc->pid, thread->pid);
2740 buffer_offset = bp->parent_offset +
2741 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2742 binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2743 &bp->buffer, sizeof(bp->buffer));
2749 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2750 * @t: transaction to send
2751 * @proc: process to send the transaction to
2752 * @thread: thread in @proc to send the transaction to (may be NULL)
2754 * This function queues a transaction to the specified process. It will try
2755 * to find a thread in the target process to handle the transaction and
2756 * wake it up. If no thread is found, the work is queued to the proc
2759 * If the @thread parameter is not NULL, the transaction is always queued
2760 * to the waitlist of that specific thread.
2762 * Return: true if the transactions was successfully queued
2763 * false if the target process or thread is dead
2765 static bool binder_proc_transaction(struct binder_transaction *t,
2766 struct binder_proc *proc,
2767 struct binder_thread *thread)
2769 struct binder_node *node = t->buffer->target_node;
2770 bool oneway = !!(t->flags & TF_ONE_WAY);
2771 bool pending_async = false;
2774 binder_node_lock(node);
2777 if (node->has_async_transaction) {
2778 pending_async = true;
2780 node->has_async_transaction = true;
2784 binder_inner_proc_lock(proc);
2786 if (proc->is_dead || (thread && thread->is_dead)) {
2787 binder_inner_proc_unlock(proc);
2788 binder_node_unlock(node);
2792 if (!thread && !pending_async)
2793 thread = binder_select_thread_ilocked(proc);
2796 binder_enqueue_thread_work_ilocked(thread, &t->work);
2797 else if (!pending_async)
2798 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2800 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2803 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2805 binder_inner_proc_unlock(proc);
2806 binder_node_unlock(node);
2812 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2813 * @node: struct binder_node for which to get refs
2814 * @proc: returns @node->proc if valid
2815 * @error: if no @proc then returns BR_DEAD_REPLY
2817 * User-space normally keeps the node alive when creating a transaction
2818 * since it has a reference to the target. The local strong ref keeps it
2819 * alive if the sending process dies before the target process processes
2820 * the transaction. If the source process is malicious or has a reference
2821 * counting bug, relying on the local strong ref can fail.
2823 * Since user-space can cause the local strong ref to go away, we also take
2824 * a tmpref on the node to ensure it survives while we are constructing
2825 * the transaction. We also need a tmpref on the proc while we are
2826 * constructing the transaction, so we take that here as well.
2828 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2829 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2830 * target proc has died, @error is set to BR_DEAD_REPLY
2832 static struct binder_node *binder_get_node_refs_for_txn(
2833 struct binder_node *node,
2834 struct binder_proc **procp,
2837 struct binder_node *target_node = NULL;
2839 binder_node_inner_lock(node);
2842 binder_inc_node_nilocked(node, 1, 0, NULL);
2843 binder_inc_node_tmpref_ilocked(node);
2844 node->proc->tmp_ref++;
2845 *procp = node->proc;
2847 *error = BR_DEAD_REPLY;
2848 binder_node_inner_unlock(node);
2853 static void binder_transaction(struct binder_proc *proc,
2854 struct binder_thread *thread,
2855 struct binder_transaction_data *tr, int reply,
2856 binder_size_t extra_buffers_size)
2859 struct binder_transaction *t;
2860 struct binder_work *w;
2861 struct binder_work *tcomplete;
2862 binder_size_t buffer_offset = 0;
2863 binder_size_t off_start_offset, off_end_offset;
2864 binder_size_t off_min;
2865 binder_size_t sg_buf_offset, sg_buf_end_offset;
2866 struct binder_proc *target_proc = NULL;
2867 struct binder_thread *target_thread = NULL;
2868 struct binder_node *target_node = NULL;
2869 struct binder_transaction *in_reply_to = NULL;
2870 struct binder_transaction_log_entry *e;
2871 uint32_t return_error = 0;
2872 uint32_t return_error_param = 0;
2873 uint32_t return_error_line = 0;
2874 binder_size_t last_fixup_obj_off = 0;
2875 binder_size_t last_fixup_min_off = 0;
2876 struct binder_context *context = proc->context;
2877 int t_debug_id = atomic_inc_return(&binder_last_id);
2878 char *secctx = NULL;
2881 e = binder_transaction_log_add(&binder_transaction_log);
2882 e->debug_id = t_debug_id;
2883 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2884 e->from_proc = proc->pid;
2885 e->from_thread = thread->pid;
2886 e->target_handle = tr->target.handle;
2887 e->data_size = tr->data_size;
2888 e->offsets_size = tr->offsets_size;
2889 e->context_name = proc->context->name;
2892 binder_inner_proc_lock(proc);
2893 in_reply_to = thread->transaction_stack;
2894 if (in_reply_to == NULL) {
2895 binder_inner_proc_unlock(proc);
2896 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2897 proc->pid, thread->pid);
2898 return_error = BR_FAILED_REPLY;
2899 return_error_param = -EPROTO;
2900 return_error_line = __LINE__;
2901 goto err_empty_call_stack;
2903 if (in_reply_to->to_thread != thread) {
2904 spin_lock(&in_reply_to->lock);
2905 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2906 proc->pid, thread->pid, in_reply_to->debug_id,
2907 in_reply_to->to_proc ?
2908 in_reply_to->to_proc->pid : 0,
2909 in_reply_to->to_thread ?
2910 in_reply_to->to_thread->pid : 0);
2911 spin_unlock(&in_reply_to->lock);
2912 binder_inner_proc_unlock(proc);
2913 return_error = BR_FAILED_REPLY;
2914 return_error_param = -EPROTO;
2915 return_error_line = __LINE__;
2917 goto err_bad_call_stack;
2919 thread->transaction_stack = in_reply_to->to_parent;
2920 binder_inner_proc_unlock(proc);
2921 binder_set_nice(in_reply_to->saved_priority);
2922 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2923 if (target_thread == NULL) {
2924 /* annotation for sparse */
2925 __release(&target_thread->proc->inner_lock);
2926 return_error = BR_DEAD_REPLY;
2927 return_error_line = __LINE__;
2928 goto err_dead_binder;
2930 if (target_thread->transaction_stack != in_reply_to) {
2931 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2932 proc->pid, thread->pid,
2933 target_thread->transaction_stack ?
2934 target_thread->transaction_stack->debug_id : 0,
2935 in_reply_to->debug_id);
2936 binder_inner_proc_unlock(target_thread->proc);
2937 return_error = BR_FAILED_REPLY;
2938 return_error_param = -EPROTO;
2939 return_error_line = __LINE__;
2941 target_thread = NULL;
2942 goto err_dead_binder;
2944 target_proc = target_thread->proc;
2945 target_proc->tmp_ref++;
2946 binder_inner_proc_unlock(target_thread->proc);
2948 if (tr->target.handle) {
2949 struct binder_ref *ref;
2952 * There must already be a strong ref
2953 * on this node. If so, do a strong
2954 * increment on the node to ensure it
2955 * stays alive until the transaction is
2958 binder_proc_lock(proc);
2959 ref = binder_get_ref_olocked(proc, tr->target.handle,
2962 target_node = binder_get_node_refs_for_txn(
2963 ref->node, &target_proc,
2966 binder_user_error("%d:%d got transaction to invalid handle\n",
2967 proc->pid, thread->pid);
2968 return_error = BR_FAILED_REPLY;
2970 binder_proc_unlock(proc);
2972 mutex_lock(&context->context_mgr_node_lock);
2973 target_node = context->binder_context_mgr_node;
2975 target_node = binder_get_node_refs_for_txn(
2976 target_node, &target_proc,
2979 return_error = BR_DEAD_REPLY;
2980 mutex_unlock(&context->context_mgr_node_lock);
2981 if (target_node && target_proc == proc) {
2982 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2983 proc->pid, thread->pid);
2984 return_error = BR_FAILED_REPLY;
2985 return_error_param = -EINVAL;
2986 return_error_line = __LINE__;
2987 goto err_invalid_target_handle;
2992 * return_error is set above
2994 return_error_param = -EINVAL;
2995 return_error_line = __LINE__;
2996 goto err_dead_binder;
2998 e->to_node = target_node->debug_id;
2999 if (security_binder_transaction(proc->tsk,
3000 target_proc->tsk) < 0) {
3001 return_error = BR_FAILED_REPLY;
3002 return_error_param = -EPERM;
3003 return_error_line = __LINE__;
3004 goto err_invalid_target_handle;
3006 binder_inner_proc_lock(proc);
3008 w = list_first_entry_or_null(&thread->todo,
3009 struct binder_work, entry);
3010 if (!(tr->flags & TF_ONE_WAY) && w &&
3011 w->type == BINDER_WORK_TRANSACTION) {
3013 * Do not allow new outgoing transaction from a
3014 * thread that has a transaction at the head of
3015 * its todo list. Only need to check the head
3016 * because binder_select_thread_ilocked picks a
3017 * thread from proc->waiting_threads to enqueue
3018 * the transaction, and nothing is queued to the
3019 * todo list while the thread is on waiting_threads.
3021 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3022 proc->pid, thread->pid);
3023 binder_inner_proc_unlock(proc);
3024 return_error = BR_FAILED_REPLY;
3025 return_error_param = -EPROTO;
3026 return_error_line = __LINE__;
3027 goto err_bad_todo_list;
3030 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3031 struct binder_transaction *tmp;
3033 tmp = thread->transaction_stack;
3034 if (tmp->to_thread != thread) {
3035 spin_lock(&tmp->lock);
3036 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3037 proc->pid, thread->pid, tmp->debug_id,
3038 tmp->to_proc ? tmp->to_proc->pid : 0,
3040 tmp->to_thread->pid : 0);
3041 spin_unlock(&tmp->lock);
3042 binder_inner_proc_unlock(proc);
3043 return_error = BR_FAILED_REPLY;
3044 return_error_param = -EPROTO;
3045 return_error_line = __LINE__;
3046 goto err_bad_call_stack;
3049 struct binder_thread *from;
3051 spin_lock(&tmp->lock);
3053 if (from && from->proc == target_proc) {
3054 atomic_inc(&from->tmp_ref);
3055 target_thread = from;
3056 spin_unlock(&tmp->lock);
3059 spin_unlock(&tmp->lock);
3060 tmp = tmp->from_parent;
3063 binder_inner_proc_unlock(proc);
3066 e->to_thread = target_thread->pid;
3067 e->to_proc = target_proc->pid;
3069 /* TODO: reuse incoming transaction for reply */
3070 t = kzalloc(sizeof(*t), GFP_KERNEL);
3072 return_error = BR_FAILED_REPLY;
3073 return_error_param = -ENOMEM;
3074 return_error_line = __LINE__;
3075 goto err_alloc_t_failed;
3077 INIT_LIST_HEAD(&t->fd_fixups);
3078 binder_stats_created(BINDER_STAT_TRANSACTION);
3079 spin_lock_init(&t->lock);
3081 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3082 if (tcomplete == NULL) {
3083 return_error = BR_FAILED_REPLY;
3084 return_error_param = -ENOMEM;
3085 return_error_line = __LINE__;
3086 goto err_alloc_tcomplete_failed;
3088 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3090 t->debug_id = t_debug_id;
3093 binder_debug(BINDER_DEBUG_TRANSACTION,
3094 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3095 proc->pid, thread->pid, t->debug_id,
3096 target_proc->pid, target_thread->pid,
3097 (u64)tr->data.ptr.buffer,
3098 (u64)tr->data.ptr.offsets,
3099 (u64)tr->data_size, (u64)tr->offsets_size,
3100 (u64)extra_buffers_size);
3102 binder_debug(BINDER_DEBUG_TRANSACTION,
3103 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3104 proc->pid, thread->pid, t->debug_id,
3105 target_proc->pid, target_node->debug_id,
3106 (u64)tr->data.ptr.buffer,
3107 (u64)tr->data.ptr.offsets,
3108 (u64)tr->data_size, (u64)tr->offsets_size,
3109 (u64)extra_buffers_size);
3111 if (!reply && !(tr->flags & TF_ONE_WAY))
3115 t->sender_euid = task_euid(proc->tsk);
3116 t->to_proc = target_proc;
3117 t->to_thread = target_thread;
3119 t->flags = tr->flags;
3120 t->priority = task_nice(current);
3122 if (target_node && target_node->txn_security_ctx) {
3126 security_task_getsecid(proc->tsk, &secid);
3127 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3129 return_error = BR_FAILED_REPLY;
3130 return_error_param = ret;
3131 return_error_line = __LINE__;
3132 goto err_get_secctx_failed;
3134 added_size = ALIGN(secctx_sz, sizeof(u64));
3135 extra_buffers_size += added_size;
3136 if (extra_buffers_size < added_size) {
3137 /* integer overflow of extra_buffers_size */
3138 return_error = BR_FAILED_REPLY;
3139 return_error_param = EINVAL;
3140 return_error_line = __LINE__;
3141 goto err_bad_extra_size;
3145 trace_binder_transaction(reply, t, target_node);
3147 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3148 tr->offsets_size, extra_buffers_size,
3149 !reply && (t->flags & TF_ONE_WAY));
3150 if (IS_ERR(t->buffer)) {
3152 * -ESRCH indicates VMA cleared. The target is dying.
3154 return_error_param = PTR_ERR(t->buffer);
3155 return_error = return_error_param == -ESRCH ?
3156 BR_DEAD_REPLY : BR_FAILED_REPLY;
3157 return_error_line = __LINE__;
3159 goto err_binder_alloc_buf_failed;
3162 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3163 ALIGN(tr->offsets_size, sizeof(void *)) +
3164 ALIGN(extra_buffers_size, sizeof(void *)) -
3165 ALIGN(secctx_sz, sizeof(u64));
3167 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3168 binder_alloc_copy_to_buffer(&target_proc->alloc,
3169 t->buffer, buf_offset,
3171 security_release_secctx(secctx, secctx_sz);
3174 t->buffer->debug_id = t->debug_id;
3175 t->buffer->transaction = t;
3176 t->buffer->target_node = target_node;
3177 trace_binder_transaction_alloc_buf(t->buffer);
3179 if (binder_alloc_copy_user_to_buffer(
3180 &target_proc->alloc,
3182 (const void __user *)
3183 (uintptr_t)tr->data.ptr.buffer,
3185 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3186 proc->pid, thread->pid);
3187 return_error = BR_FAILED_REPLY;
3188 return_error_param = -EFAULT;
3189 return_error_line = __LINE__;
3190 goto err_copy_data_failed;
3192 if (binder_alloc_copy_user_to_buffer(
3193 &target_proc->alloc,
3195 ALIGN(tr->data_size, sizeof(void *)),
3196 (const void __user *)
3197 (uintptr_t)tr->data.ptr.offsets,
3198 tr->offsets_size)) {
3199 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3200 proc->pid, thread->pid);
3201 return_error = BR_FAILED_REPLY;
3202 return_error_param = -EFAULT;
3203 return_error_line = __LINE__;
3204 goto err_copy_data_failed;
3206 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3207 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3208 proc->pid, thread->pid, (u64)tr->offsets_size);
3209 return_error = BR_FAILED_REPLY;
3210 return_error_param = -EINVAL;
3211 return_error_line = __LINE__;
3212 goto err_bad_offset;
3214 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3215 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3216 proc->pid, thread->pid,
3217 (u64)extra_buffers_size);
3218 return_error = BR_FAILED_REPLY;
3219 return_error_param = -EINVAL;
3220 return_error_line = __LINE__;
3221 goto err_bad_offset;
3223 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3224 buffer_offset = off_start_offset;
3225 off_end_offset = off_start_offset + tr->offsets_size;
3226 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3227 sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
3229 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3230 buffer_offset += sizeof(binder_size_t)) {
3231 struct binder_object_header *hdr;
3233 struct binder_object object;
3234 binder_size_t object_offset;
3236 binder_alloc_copy_from_buffer(&target_proc->alloc,
3240 sizeof(object_offset));
3241 object_size = binder_get_object(target_proc, t->buffer,
3242 object_offset, &object);
3243 if (object_size == 0 || object_offset < off_min) {
3244 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3245 proc->pid, thread->pid,
3248 (u64)t->buffer->data_size);
3249 return_error = BR_FAILED_REPLY;
3250 return_error_param = -EINVAL;
3251 return_error_line = __LINE__;
3252 goto err_bad_offset;
3256 off_min = object_offset + object_size;
3257 switch (hdr->type) {
3258 case BINDER_TYPE_BINDER:
3259 case BINDER_TYPE_WEAK_BINDER: {
3260 struct flat_binder_object *fp;
3262 fp = to_flat_binder_object(hdr);
3263 ret = binder_translate_binder(fp, t, thread);
3265 return_error = BR_FAILED_REPLY;
3266 return_error_param = ret;
3267 return_error_line = __LINE__;
3268 goto err_translate_failed;
3270 binder_alloc_copy_to_buffer(&target_proc->alloc,
3271 t->buffer, object_offset,
3274 case BINDER_TYPE_HANDLE:
3275 case BINDER_TYPE_WEAK_HANDLE: {
3276 struct flat_binder_object *fp;
3278 fp = to_flat_binder_object(hdr);
3279 ret = binder_translate_handle(fp, t, thread);
3281 return_error = BR_FAILED_REPLY;
3282 return_error_param = ret;
3283 return_error_line = __LINE__;
3284 goto err_translate_failed;
3286 binder_alloc_copy_to_buffer(&target_proc->alloc,
3287 t->buffer, object_offset,
3291 case BINDER_TYPE_FD: {
3292 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3293 binder_size_t fd_offset = object_offset +
3294 (uintptr_t)&fp->fd - (uintptr_t)fp;
3295 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3296 thread, in_reply_to);
3299 return_error = BR_FAILED_REPLY;
3300 return_error_param = ret;
3301 return_error_line = __LINE__;
3302 goto err_translate_failed;
3305 binder_alloc_copy_to_buffer(&target_proc->alloc,
3306 t->buffer, object_offset,
3309 case BINDER_TYPE_FDA: {
3310 struct binder_object ptr_object;
3311 binder_size_t parent_offset;
3312 struct binder_fd_array_object *fda =
3313 to_binder_fd_array_object(hdr);
3314 size_t num_valid = (buffer_offset - off_start_offset) *
3315 sizeof(binder_size_t);
3316 struct binder_buffer_object *parent =
3317 binder_validate_ptr(target_proc, t->buffer,
3318 &ptr_object, fda->parent,
3323 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3324 proc->pid, thread->pid);
3325 return_error = BR_FAILED_REPLY;
3326 return_error_param = -EINVAL;
3327 return_error_line = __LINE__;
3328 goto err_bad_parent;
3330 if (!binder_validate_fixup(target_proc, t->buffer,
3335 last_fixup_min_off)) {
3336 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3337 proc->pid, thread->pid);
3338 return_error = BR_FAILED_REPLY;
3339 return_error_param = -EINVAL;
3340 return_error_line = __LINE__;
3341 goto err_bad_parent;
3343 ret = binder_translate_fd_array(fda, parent, t, thread,
3346 return_error = BR_FAILED_REPLY;
3347 return_error_param = ret;
3348 return_error_line = __LINE__;
3349 goto err_translate_failed;
3351 last_fixup_obj_off = parent_offset;
3352 last_fixup_min_off =
3353 fda->parent_offset + sizeof(u32) * fda->num_fds;
3355 case BINDER_TYPE_PTR: {
3356 struct binder_buffer_object *bp =
3357 to_binder_buffer_object(hdr);
3358 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3361 if (bp->length > buf_left) {
3362 binder_user_error("%d:%d got transaction with too large buffer\n",
3363 proc->pid, thread->pid);
3364 return_error = BR_FAILED_REPLY;
3365 return_error_param = -EINVAL;
3366 return_error_line = __LINE__;
3367 goto err_bad_offset;
3369 if (binder_alloc_copy_user_to_buffer(
3370 &target_proc->alloc,
3373 (const void __user *)
3374 (uintptr_t)bp->buffer,
3376 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3377 proc->pid, thread->pid);
3378 return_error_param = -EFAULT;
3379 return_error = BR_FAILED_REPLY;
3380 return_error_line = __LINE__;
3381 goto err_copy_data_failed;
3383 /* Fixup buffer pointer to target proc address space */
3384 bp->buffer = (uintptr_t)
3385 t->buffer->user_data + sg_buf_offset;
3386 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3388 num_valid = (buffer_offset - off_start_offset) *
3389 sizeof(binder_size_t);
3390 ret = binder_fixup_parent(t, thread, bp,
3394 last_fixup_min_off);
3396 return_error = BR_FAILED_REPLY;
3397 return_error_param = ret;
3398 return_error_line = __LINE__;
3399 goto err_translate_failed;
3401 binder_alloc_copy_to_buffer(&target_proc->alloc,
3402 t->buffer, object_offset,
3404 last_fixup_obj_off = object_offset;
3405 last_fixup_min_off = 0;
3408 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3409 proc->pid, thread->pid, hdr->type);
3410 return_error = BR_FAILED_REPLY;
3411 return_error_param = -EINVAL;
3412 return_error_line = __LINE__;
3413 goto err_bad_object_type;
3416 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3417 t->work.type = BINDER_WORK_TRANSACTION;
3420 binder_enqueue_thread_work(thread, tcomplete);
3421 binder_inner_proc_lock(target_proc);
3422 if (target_thread->is_dead) {
3423 binder_inner_proc_unlock(target_proc);
3424 goto err_dead_proc_or_thread;
3426 BUG_ON(t->buffer->async_transaction != 0);
3427 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3428 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3429 binder_inner_proc_unlock(target_proc);
3430 wake_up_interruptible_sync(&target_thread->wait);
3431 binder_free_transaction(in_reply_to);
3432 } else if (!(t->flags & TF_ONE_WAY)) {
3433 BUG_ON(t->buffer->async_transaction != 0);
3434 binder_inner_proc_lock(proc);
3436 * Defer the TRANSACTION_COMPLETE, so we don't return to
3437 * userspace immediately; this allows the target process to
3438 * immediately start processing this transaction, reducing
3439 * latency. We will then return the TRANSACTION_COMPLETE when
3440 * the target replies (or there is an error).
3442 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3444 t->from_parent = thread->transaction_stack;
3445 thread->transaction_stack = t;
3446 binder_inner_proc_unlock(proc);
3447 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3448 binder_inner_proc_lock(proc);
3449 binder_pop_transaction_ilocked(thread, t);
3450 binder_inner_proc_unlock(proc);
3451 goto err_dead_proc_or_thread;
3454 BUG_ON(target_node == NULL);
3455 BUG_ON(t->buffer->async_transaction != 1);
3456 binder_enqueue_thread_work(thread, tcomplete);
3457 if (!binder_proc_transaction(t, target_proc, NULL))
3458 goto err_dead_proc_or_thread;
3461 binder_thread_dec_tmpref(target_thread);
3462 binder_proc_dec_tmpref(target_proc);
3464 binder_dec_node_tmpref(target_node);
3466 * write barrier to synchronize with initialization
3470 WRITE_ONCE(e->debug_id_done, t_debug_id);
3473 err_dead_proc_or_thread:
3474 return_error = BR_DEAD_REPLY;
3475 return_error_line = __LINE__;
3476 binder_dequeue_work(proc, tcomplete);
3477 err_translate_failed:
3478 err_bad_object_type:
3481 err_copy_data_failed:
3482 binder_free_txn_fixups(t);
3483 trace_binder_transaction_failed_buffer_release(t->buffer);
3484 binder_transaction_buffer_release(target_proc, t->buffer,
3485 buffer_offset, true);
3487 binder_dec_node_tmpref(target_node);
3489 t->buffer->transaction = NULL;
3490 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3491 err_binder_alloc_buf_failed:
3494 security_release_secctx(secctx, secctx_sz);
3495 err_get_secctx_failed:
3497 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3498 err_alloc_tcomplete_failed:
3500 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3504 err_empty_call_stack:
3506 err_invalid_target_handle:
3508 binder_thread_dec_tmpref(target_thread);
3510 binder_proc_dec_tmpref(target_proc);
3512 binder_dec_node(target_node, 1, 0);
3513 binder_dec_node_tmpref(target_node);
3516 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3517 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3518 proc->pid, thread->pid, return_error, return_error_param,
3519 (u64)tr->data_size, (u64)tr->offsets_size,
3523 struct binder_transaction_log_entry *fe;
3525 e->return_error = return_error;
3526 e->return_error_param = return_error_param;
3527 e->return_error_line = return_error_line;
3528 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3531 * write barrier to synchronize with initialization
3535 WRITE_ONCE(e->debug_id_done, t_debug_id);
3536 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3539 BUG_ON(thread->return_error.cmd != BR_OK);
3541 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3542 binder_enqueue_thread_work(thread, &thread->return_error.work);
3543 binder_send_failed_reply(in_reply_to, return_error);
3545 thread->return_error.cmd = return_error;
3546 binder_enqueue_thread_work(thread, &thread->return_error.work);
3551 * binder_free_buf() - free the specified buffer
3552 * @proc: binder proc that owns buffer
3553 * @buffer: buffer to be freed
3555 * If buffer for an async transaction, enqueue the next async
3556 * transaction from the node.
3558 * Cleanup buffer and free it.
3561 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3563 if (buffer->transaction) {
3564 buffer->transaction->buffer = NULL;
3565 buffer->transaction = NULL;
3567 if (buffer->async_transaction && buffer->target_node) {
3568 struct binder_node *buf_node;
3569 struct binder_work *w;
3571 buf_node = buffer->target_node;
3572 binder_node_inner_lock(buf_node);
3573 BUG_ON(!buf_node->has_async_transaction);
3574 BUG_ON(buf_node->proc != proc);
3575 w = binder_dequeue_work_head_ilocked(
3576 &buf_node->async_todo);
3578 buf_node->has_async_transaction = false;
3580 binder_enqueue_work_ilocked(
3582 binder_wakeup_proc_ilocked(proc);
3584 binder_node_inner_unlock(buf_node);
3586 trace_binder_transaction_buffer_release(buffer);
3587 binder_transaction_buffer_release(proc, buffer, 0, false);
3588 binder_alloc_free_buf(&proc->alloc, buffer);
3591 static int binder_thread_write(struct binder_proc *proc,
3592 struct binder_thread *thread,
3593 binder_uintptr_t binder_buffer, size_t size,
3594 binder_size_t *consumed)
3597 struct binder_context *context = proc->context;
3598 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3599 void __user *ptr = buffer + *consumed;
3600 void __user *end = buffer + size;
3602 while (ptr < end && thread->return_error.cmd == BR_OK) {
3605 if (get_user(cmd, (uint32_t __user *)ptr))
3607 ptr += sizeof(uint32_t);
3608 trace_binder_command(cmd);
3609 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3610 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3611 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3612 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3620 const char *debug_string;
3621 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3622 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3623 struct binder_ref_data rdata;
3625 if (get_user(target, (uint32_t __user *)ptr))
3628 ptr += sizeof(uint32_t);
3630 if (increment && !target) {
3631 struct binder_node *ctx_mgr_node;
3632 mutex_lock(&context->context_mgr_node_lock);
3633 ctx_mgr_node = context->binder_context_mgr_node;
3635 ret = binder_inc_ref_for_node(
3637 strong, NULL, &rdata);
3638 mutex_unlock(&context->context_mgr_node_lock);
3641 ret = binder_update_ref_for_handle(
3642 proc, target, increment, strong,
3644 if (!ret && rdata.desc != target) {
3645 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3646 proc->pid, thread->pid,
3647 target, rdata.desc);
3651 debug_string = "IncRefs";
3654 debug_string = "Acquire";
3657 debug_string = "Release";
3661 debug_string = "DecRefs";
3665 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3666 proc->pid, thread->pid, debug_string,
3667 strong, target, ret);
3670 binder_debug(BINDER_DEBUG_USER_REFS,
3671 "%d:%d %s ref %d desc %d s %d w %d\n",
3672 proc->pid, thread->pid, debug_string,
3673 rdata.debug_id, rdata.desc, rdata.strong,
3677 case BC_INCREFS_DONE:
3678 case BC_ACQUIRE_DONE: {
3679 binder_uintptr_t node_ptr;
3680 binder_uintptr_t cookie;
3681 struct binder_node *node;
3684 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3686 ptr += sizeof(binder_uintptr_t);
3687 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3689 ptr += sizeof(binder_uintptr_t);
3690 node = binder_get_node(proc, node_ptr);
3692 binder_user_error("%d:%d %s u%016llx no match\n",
3693 proc->pid, thread->pid,
3694 cmd == BC_INCREFS_DONE ?
3700 if (cookie != node->cookie) {
3701 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3702 proc->pid, thread->pid,
3703 cmd == BC_INCREFS_DONE ?
3704 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3705 (u64)node_ptr, node->debug_id,
3706 (u64)cookie, (u64)node->cookie);
3707 binder_put_node(node);
3710 binder_node_inner_lock(node);
3711 if (cmd == BC_ACQUIRE_DONE) {
3712 if (node->pending_strong_ref == 0) {
3713 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3714 proc->pid, thread->pid,
3716 binder_node_inner_unlock(node);
3717 binder_put_node(node);
3720 node->pending_strong_ref = 0;
3722 if (node->pending_weak_ref == 0) {
3723 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3724 proc->pid, thread->pid,
3726 binder_node_inner_unlock(node);
3727 binder_put_node(node);
3730 node->pending_weak_ref = 0;
3732 free_node = binder_dec_node_nilocked(node,
3733 cmd == BC_ACQUIRE_DONE, 0);
3735 binder_debug(BINDER_DEBUG_USER_REFS,
3736 "%d:%d %s node %d ls %d lw %d tr %d\n",
3737 proc->pid, thread->pid,
3738 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3739 node->debug_id, node->local_strong_refs,
3740 node->local_weak_refs, node->tmp_refs);
3741 binder_node_inner_unlock(node);
3742 binder_put_node(node);
3745 case BC_ATTEMPT_ACQUIRE:
3746 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3748 case BC_ACQUIRE_RESULT:
3749 pr_err("BC_ACQUIRE_RESULT not supported\n");
3752 case BC_FREE_BUFFER: {
3753 binder_uintptr_t data_ptr;
3754 struct binder_buffer *buffer;
3756 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3758 ptr += sizeof(binder_uintptr_t);
3760 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3762 if (IS_ERR_OR_NULL(buffer)) {
3763 if (PTR_ERR(buffer) == -EPERM) {
3765 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3766 proc->pid, thread->pid,
3770 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3771 proc->pid, thread->pid,
3776 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3777 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3778 proc->pid, thread->pid, (u64)data_ptr,
3780 buffer->transaction ? "active" : "finished");
3781 binder_free_buf(proc, buffer);
3785 case BC_TRANSACTION_SG:
3787 struct binder_transaction_data_sg tr;
3789 if (copy_from_user(&tr, ptr, sizeof(tr)))
3792 binder_transaction(proc, thread, &tr.transaction_data,
3793 cmd == BC_REPLY_SG, tr.buffers_size);
3796 case BC_TRANSACTION:
3798 struct binder_transaction_data tr;
3800 if (copy_from_user(&tr, ptr, sizeof(tr)))
3803 binder_transaction(proc, thread, &tr,
3804 cmd == BC_REPLY, 0);
3808 case BC_REGISTER_LOOPER:
3809 binder_debug(BINDER_DEBUG_THREADS,
3810 "%d:%d BC_REGISTER_LOOPER\n",
3811 proc->pid, thread->pid);
3812 binder_inner_proc_lock(proc);
3813 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3814 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3815 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3816 proc->pid, thread->pid);
3817 } else if (proc->requested_threads == 0) {
3818 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3819 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3820 proc->pid, thread->pid);
3822 proc->requested_threads--;
3823 proc->requested_threads_started++;
3825 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3826 binder_inner_proc_unlock(proc);
3828 case BC_ENTER_LOOPER:
3829 binder_debug(BINDER_DEBUG_THREADS,
3830 "%d:%d BC_ENTER_LOOPER\n",
3831 proc->pid, thread->pid);
3832 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3833 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3834 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3835 proc->pid, thread->pid);
3837 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3839 case BC_EXIT_LOOPER:
3840 binder_debug(BINDER_DEBUG_THREADS,
3841 "%d:%d BC_EXIT_LOOPER\n",
3842 proc->pid, thread->pid);
3843 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3846 case BC_REQUEST_DEATH_NOTIFICATION:
3847 case BC_CLEAR_DEATH_NOTIFICATION: {
3849 binder_uintptr_t cookie;
3850 struct binder_ref *ref;
3851 struct binder_ref_death *death = NULL;
3853 if (get_user(target, (uint32_t __user *)ptr))
3855 ptr += sizeof(uint32_t);
3856 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3858 ptr += sizeof(binder_uintptr_t);
3859 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3861 * Allocate memory for death notification
3862 * before taking lock
3864 death = kzalloc(sizeof(*death), GFP_KERNEL);
3865 if (death == NULL) {
3866 WARN_ON(thread->return_error.cmd !=
3868 thread->return_error.cmd = BR_ERROR;
3869 binder_enqueue_thread_work(
3871 &thread->return_error.work);
3873 BINDER_DEBUG_FAILED_TRANSACTION,
3874 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3875 proc->pid, thread->pid);
3879 binder_proc_lock(proc);
3880 ref = binder_get_ref_olocked(proc, target, false);
3882 binder_user_error("%d:%d %s invalid ref %d\n",
3883 proc->pid, thread->pid,
3884 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3885 "BC_REQUEST_DEATH_NOTIFICATION" :
3886 "BC_CLEAR_DEATH_NOTIFICATION",
3888 binder_proc_unlock(proc);
3893 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3894 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3895 proc->pid, thread->pid,
3896 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3897 "BC_REQUEST_DEATH_NOTIFICATION" :
3898 "BC_CLEAR_DEATH_NOTIFICATION",
3899 (u64)cookie, ref->data.debug_id,
3900 ref->data.desc, ref->data.strong,
3901 ref->data.weak, ref->node->debug_id);
3903 binder_node_lock(ref->node);
3904 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3906 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3907 proc->pid, thread->pid);
3908 binder_node_unlock(ref->node);
3909 binder_proc_unlock(proc);
3913 binder_stats_created(BINDER_STAT_DEATH);
3914 INIT_LIST_HEAD(&death->work.entry);
3915 death->cookie = cookie;
3917 if (ref->node->proc == NULL) {
3918 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3920 binder_inner_proc_lock(proc);
3921 binder_enqueue_work_ilocked(
3922 &ref->death->work, &proc->todo);
3923 binder_wakeup_proc_ilocked(proc);
3924 binder_inner_proc_unlock(proc);
3927 if (ref->death == NULL) {
3928 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3929 proc->pid, thread->pid);
3930 binder_node_unlock(ref->node);
3931 binder_proc_unlock(proc);
3935 if (death->cookie != cookie) {
3936 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3937 proc->pid, thread->pid,
3940 binder_node_unlock(ref->node);
3941 binder_proc_unlock(proc);
3945 binder_inner_proc_lock(proc);
3946 if (list_empty(&death->work.entry)) {
3947 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3948 if (thread->looper &
3949 (BINDER_LOOPER_STATE_REGISTERED |
3950 BINDER_LOOPER_STATE_ENTERED))
3951 binder_enqueue_thread_work_ilocked(
3955 binder_enqueue_work_ilocked(
3958 binder_wakeup_proc_ilocked(
3962 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3963 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3965 binder_inner_proc_unlock(proc);
3967 binder_node_unlock(ref->node);
3968 binder_proc_unlock(proc);
3970 case BC_DEAD_BINDER_DONE: {
3971 struct binder_work *w;
3972 binder_uintptr_t cookie;
3973 struct binder_ref_death *death = NULL;
3975 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3978 ptr += sizeof(cookie);
3979 binder_inner_proc_lock(proc);
3980 list_for_each_entry(w, &proc->delivered_death,
3982 struct binder_ref_death *tmp_death =
3984 struct binder_ref_death,
3987 if (tmp_death->cookie == cookie) {
3992 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3993 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3994 proc->pid, thread->pid, (u64)cookie,
3996 if (death == NULL) {
3997 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3998 proc->pid, thread->pid, (u64)cookie);
3999 binder_inner_proc_unlock(proc);
4002 binder_dequeue_work_ilocked(&death->work);
4003 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4004 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4005 if (thread->looper &
4006 (BINDER_LOOPER_STATE_REGISTERED |
4007 BINDER_LOOPER_STATE_ENTERED))
4008 binder_enqueue_thread_work_ilocked(
4009 thread, &death->work);
4011 binder_enqueue_work_ilocked(
4014 binder_wakeup_proc_ilocked(proc);
4017 binder_inner_proc_unlock(proc);
4021 pr_err("%d:%d unknown command %d\n",
4022 proc->pid, thread->pid, cmd);
4025 *consumed = ptr - buffer;
4030 static void binder_stat_br(struct binder_proc *proc,
4031 struct binder_thread *thread, uint32_t cmd)
4033 trace_binder_return(cmd);
4034 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4035 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4036 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4037 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4041 static int binder_put_node_cmd(struct binder_proc *proc,
4042 struct binder_thread *thread,
4044 binder_uintptr_t node_ptr,
4045 binder_uintptr_t node_cookie,
4047 uint32_t cmd, const char *cmd_name)
4049 void __user *ptr = *ptrp;
4051 if (put_user(cmd, (uint32_t __user *)ptr))
4053 ptr += sizeof(uint32_t);
4055 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4057 ptr += sizeof(binder_uintptr_t);
4059 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4061 ptr += sizeof(binder_uintptr_t);
4063 binder_stat_br(proc, thread, cmd);
4064 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4065 proc->pid, thread->pid, cmd_name, node_debug_id,
4066 (u64)node_ptr, (u64)node_cookie);
4072 static int binder_wait_for_work(struct binder_thread *thread,
4076 struct binder_proc *proc = thread->proc;
4079 freezer_do_not_count();
4080 binder_inner_proc_lock(proc);
4082 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4083 if (binder_has_work_ilocked(thread, do_proc_work))
4086 list_add(&thread->waiting_thread_node,
4087 &proc->waiting_threads);
4088 binder_inner_proc_unlock(proc);
4090 binder_inner_proc_lock(proc);
4091 list_del_init(&thread->waiting_thread_node);
4092 if (signal_pending(current)) {
4097 finish_wait(&thread->wait, &wait);
4098 binder_inner_proc_unlock(proc);
4105 * binder_apply_fd_fixups() - finish fd translation
4106 * @proc: binder_proc associated @t->buffer
4107 * @t: binder transaction with list of fd fixups
4109 * Now that we are in the context of the transaction target
4110 * process, we can allocate and install fds. Process the
4111 * list of fds to translate and fixup the buffer with the
4114 * If we fail to allocate an fd, then free the resources by
4115 * fput'ing files that have not been processed and ksys_close'ing
4116 * any fds that have already been allocated.
4118 static int binder_apply_fd_fixups(struct binder_proc *proc,
4119 struct binder_transaction *t)
4121 struct binder_txn_fd_fixup *fixup, *tmp;
4124 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4125 int fd = get_unused_fd_flags(O_CLOEXEC);
4128 binder_debug(BINDER_DEBUG_TRANSACTION,
4129 "failed fd fixup txn %d fd %d\n",
4134 binder_debug(BINDER_DEBUG_TRANSACTION,
4135 "fd fixup txn %d fd %d\n",
4137 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4138 fd_install(fd, fixup->file);
4140 binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4144 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4150 binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4151 t->buffer, fixup->offset,
4153 binder_deferred_fd_close(fd);
4155 list_del(&fixup->fixup_entry);
4162 static int binder_thread_read(struct binder_proc *proc,
4163 struct binder_thread *thread,
4164 binder_uintptr_t binder_buffer, size_t size,
4165 binder_size_t *consumed, int non_block)
4167 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4168 void __user *ptr = buffer + *consumed;
4169 void __user *end = buffer + size;
4172 int wait_for_proc_work;
4174 if (*consumed == 0) {
4175 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4177 ptr += sizeof(uint32_t);
4181 binder_inner_proc_lock(proc);
4182 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4183 binder_inner_proc_unlock(proc);
4185 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4187 trace_binder_wait_for_work(wait_for_proc_work,
4188 !!thread->transaction_stack,
4189 !binder_worklist_empty(proc, &thread->todo));
4190 if (wait_for_proc_work) {
4191 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4192 BINDER_LOOPER_STATE_ENTERED))) {
4193 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4194 proc->pid, thread->pid, thread->looper);
4195 wait_event_interruptible(binder_user_error_wait,
4196 binder_stop_on_user_error < 2);
4198 binder_set_nice(proc->default_priority);
4202 if (!binder_has_work(thread, wait_for_proc_work))
4205 ret = binder_wait_for_work(thread, wait_for_proc_work);
4208 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4215 struct binder_transaction_data_secctx tr;
4216 struct binder_transaction_data *trd = &tr.transaction_data;
4217 struct binder_work *w = NULL;
4218 struct list_head *list = NULL;
4219 struct binder_transaction *t = NULL;
4220 struct binder_thread *t_from;
4221 size_t trsize = sizeof(*trd);
4223 binder_inner_proc_lock(proc);
4224 if (!binder_worklist_empty_ilocked(&thread->todo))
4225 list = &thread->todo;
4226 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4230 binder_inner_proc_unlock(proc);
4233 if (ptr - buffer == 4 && !thread->looper_need_return)
4238 if (end - ptr < sizeof(tr) + 4) {
4239 binder_inner_proc_unlock(proc);
4242 w = binder_dequeue_work_head_ilocked(list);
4243 if (binder_worklist_empty_ilocked(&thread->todo))
4244 thread->process_todo = false;
4247 case BINDER_WORK_TRANSACTION: {
4248 binder_inner_proc_unlock(proc);
4249 t = container_of(w, struct binder_transaction, work);
4251 case BINDER_WORK_RETURN_ERROR: {
4252 struct binder_error *e = container_of(
4253 w, struct binder_error, work);
4255 WARN_ON(e->cmd == BR_OK);
4256 binder_inner_proc_unlock(proc);
4257 if (put_user(e->cmd, (uint32_t __user *)ptr))
4261 ptr += sizeof(uint32_t);
4263 binder_stat_br(proc, thread, cmd);
4265 case BINDER_WORK_TRANSACTION_COMPLETE: {
4266 binder_inner_proc_unlock(proc);
4267 cmd = BR_TRANSACTION_COMPLETE;
4268 if (put_user(cmd, (uint32_t __user *)ptr))
4270 ptr += sizeof(uint32_t);
4272 binder_stat_br(proc, thread, cmd);
4273 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4274 "%d:%d BR_TRANSACTION_COMPLETE\n",
4275 proc->pid, thread->pid);
4277 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4279 case BINDER_WORK_NODE: {
4280 struct binder_node *node = container_of(w, struct binder_node, work);
4282 binder_uintptr_t node_ptr = node->ptr;
4283 binder_uintptr_t node_cookie = node->cookie;
4284 int node_debug_id = node->debug_id;
4287 void __user *orig_ptr = ptr;
4289 BUG_ON(proc != node->proc);
4290 strong = node->internal_strong_refs ||
4291 node->local_strong_refs;
4292 weak = !hlist_empty(&node->refs) ||
4293 node->local_weak_refs ||
4294 node->tmp_refs || strong;
4295 has_strong_ref = node->has_strong_ref;
4296 has_weak_ref = node->has_weak_ref;
4298 if (weak && !has_weak_ref) {
4299 node->has_weak_ref = 1;
4300 node->pending_weak_ref = 1;
4301 node->local_weak_refs++;
4303 if (strong && !has_strong_ref) {
4304 node->has_strong_ref = 1;
4305 node->pending_strong_ref = 1;
4306 node->local_strong_refs++;
4308 if (!strong && has_strong_ref)
4309 node->has_strong_ref = 0;
4310 if (!weak && has_weak_ref)
4311 node->has_weak_ref = 0;
4312 if (!weak && !strong) {
4313 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4314 "%d:%d node %d u%016llx c%016llx deleted\n",
4315 proc->pid, thread->pid,
4319 rb_erase(&node->rb_node, &proc->nodes);
4320 binder_inner_proc_unlock(proc);
4321 binder_node_lock(node);
4323 * Acquire the node lock before freeing the
4324 * node to serialize with other threads that
4325 * may have been holding the node lock while
4326 * decrementing this node (avoids race where
4327 * this thread frees while the other thread
4328 * is unlocking the node after the final
4331 binder_node_unlock(node);
4332 binder_free_node(node);
4334 binder_inner_proc_unlock(proc);
4336 if (weak && !has_weak_ref)
4337 ret = binder_put_node_cmd(
4338 proc, thread, &ptr, node_ptr,
4339 node_cookie, node_debug_id,
4340 BR_INCREFS, "BR_INCREFS");
4341 if (!ret && strong && !has_strong_ref)
4342 ret = binder_put_node_cmd(
4343 proc, thread, &ptr, node_ptr,
4344 node_cookie, node_debug_id,
4345 BR_ACQUIRE, "BR_ACQUIRE");
4346 if (!ret && !strong && has_strong_ref)
4347 ret = binder_put_node_cmd(
4348 proc, thread, &ptr, node_ptr,
4349 node_cookie, node_debug_id,
4350 BR_RELEASE, "BR_RELEASE");
4351 if (!ret && !weak && has_weak_ref)
4352 ret = binder_put_node_cmd(
4353 proc, thread, &ptr, node_ptr,
4354 node_cookie, node_debug_id,
4355 BR_DECREFS, "BR_DECREFS");
4356 if (orig_ptr == ptr)
4357 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4358 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4359 proc->pid, thread->pid,
4366 case BINDER_WORK_DEAD_BINDER:
4367 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4368 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4369 struct binder_ref_death *death;
4371 binder_uintptr_t cookie;
4373 death = container_of(w, struct binder_ref_death, work);
4374 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4375 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4377 cmd = BR_DEAD_BINDER;
4378 cookie = death->cookie;
4380 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4381 "%d:%d %s %016llx\n",
4382 proc->pid, thread->pid,
4383 cmd == BR_DEAD_BINDER ?
4385 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4387 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4388 binder_inner_proc_unlock(proc);
4390 binder_stats_deleted(BINDER_STAT_DEATH);
4392 binder_enqueue_work_ilocked(
4393 w, &proc->delivered_death);
4394 binder_inner_proc_unlock(proc);
4396 if (put_user(cmd, (uint32_t __user *)ptr))
4398 ptr += sizeof(uint32_t);
4399 if (put_user(cookie,
4400 (binder_uintptr_t __user *)ptr))
4402 ptr += sizeof(binder_uintptr_t);
4403 binder_stat_br(proc, thread, cmd);
4404 if (cmd == BR_DEAD_BINDER)
4405 goto done; /* DEAD_BINDER notifications can cause transactions */
4408 binder_inner_proc_unlock(proc);
4409 pr_err("%d:%d: bad work type %d\n",
4410 proc->pid, thread->pid, w->type);
4417 BUG_ON(t->buffer == NULL);
4418 if (t->buffer->target_node) {
4419 struct binder_node *target_node = t->buffer->target_node;
4421 trd->target.ptr = target_node->ptr;
4422 trd->cookie = target_node->cookie;
4423 t->saved_priority = task_nice(current);
4424 if (t->priority < target_node->min_priority &&
4425 !(t->flags & TF_ONE_WAY))
4426 binder_set_nice(t->priority);
4427 else if (!(t->flags & TF_ONE_WAY) ||
4428 t->saved_priority > target_node->min_priority)
4429 binder_set_nice(target_node->min_priority);
4430 cmd = BR_TRANSACTION;
4432 trd->target.ptr = 0;
4436 trd->code = t->code;
4437 trd->flags = t->flags;
4438 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4440 t_from = binder_get_txn_from(t);
4442 struct task_struct *sender = t_from->proc->tsk;
4445 task_tgid_nr_ns(sender,
4446 task_active_pid_ns(current));
4448 trd->sender_pid = 0;
4451 ret = binder_apply_fd_fixups(proc, t);
4453 struct binder_buffer *buffer = t->buffer;
4454 bool oneway = !!(t->flags & TF_ONE_WAY);
4455 int tid = t->debug_id;
4458 binder_thread_dec_tmpref(t_from);
4459 buffer->transaction = NULL;
4460 binder_cleanup_transaction(t, "fd fixups failed",
4462 binder_free_buf(proc, buffer);
4463 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4464 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4465 proc->pid, thread->pid,
4467 (cmd == BR_REPLY ? "reply " : ""),
4468 tid, BR_FAILED_REPLY, ret, __LINE__);
4469 if (cmd == BR_REPLY) {
4470 cmd = BR_FAILED_REPLY;
4471 if (put_user(cmd, (uint32_t __user *)ptr))
4473 ptr += sizeof(uint32_t);
4474 binder_stat_br(proc, thread, cmd);
4479 trd->data_size = t->buffer->data_size;
4480 trd->offsets_size = t->buffer->offsets_size;
4481 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4482 trd->data.ptr.offsets = trd->data.ptr.buffer +
4483 ALIGN(t->buffer->data_size,
4486 tr.secctx = t->security_ctx;
4487 if (t->security_ctx) {
4488 cmd = BR_TRANSACTION_SEC_CTX;
4489 trsize = sizeof(tr);
4491 if (put_user(cmd, (uint32_t __user *)ptr)) {
4493 binder_thread_dec_tmpref(t_from);
4495 binder_cleanup_transaction(t, "put_user failed",
4500 ptr += sizeof(uint32_t);
4501 if (copy_to_user(ptr, &tr, trsize)) {
4503 binder_thread_dec_tmpref(t_from);
4505 binder_cleanup_transaction(t, "copy_to_user failed",
4512 trace_binder_transaction_received(t);
4513 binder_stat_br(proc, thread, cmd);
4514 binder_debug(BINDER_DEBUG_TRANSACTION,
4515 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4516 proc->pid, thread->pid,
4517 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4518 (cmd == BR_TRANSACTION_SEC_CTX) ?
4519 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4520 t->debug_id, t_from ? t_from->proc->pid : 0,
4521 t_from ? t_from->pid : 0, cmd,
4522 t->buffer->data_size, t->buffer->offsets_size,
4523 (u64)trd->data.ptr.buffer,
4524 (u64)trd->data.ptr.offsets);
4527 binder_thread_dec_tmpref(t_from);
4528 t->buffer->allow_user_free = 1;
4529 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4530 binder_inner_proc_lock(thread->proc);
4531 t->to_parent = thread->transaction_stack;
4532 t->to_thread = thread;
4533 thread->transaction_stack = t;
4534 binder_inner_proc_unlock(thread->proc);
4536 binder_free_transaction(t);
4543 *consumed = ptr - buffer;
4544 binder_inner_proc_lock(proc);
4545 if (proc->requested_threads == 0 &&
4546 list_empty(&thread->proc->waiting_threads) &&
4547 proc->requested_threads_started < proc->max_threads &&
4548 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4549 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4550 /*spawn a new thread if we leave this out */) {
4551 proc->requested_threads++;
4552 binder_inner_proc_unlock(proc);
4553 binder_debug(BINDER_DEBUG_THREADS,
4554 "%d:%d BR_SPAWN_LOOPER\n",
4555 proc->pid, thread->pid);
4556 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4558 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4560 binder_inner_proc_unlock(proc);
4564 static void binder_release_work(struct binder_proc *proc,
4565 struct list_head *list)
4567 struct binder_work *w;
4570 w = binder_dequeue_work_head(proc, list);
4575 case BINDER_WORK_TRANSACTION: {
4576 struct binder_transaction *t;
4578 t = container_of(w, struct binder_transaction, work);
4580 binder_cleanup_transaction(t, "process died.",
4583 case BINDER_WORK_RETURN_ERROR: {
4584 struct binder_error *e = container_of(
4585 w, struct binder_error, work);
4587 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4588 "undelivered TRANSACTION_ERROR: %u\n",
4591 case BINDER_WORK_TRANSACTION_COMPLETE: {
4592 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4593 "undelivered TRANSACTION_COMPLETE\n");
4595 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4597 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4598 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4599 struct binder_ref_death *death;
4601 death = container_of(w, struct binder_ref_death, work);
4602 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4603 "undelivered death notification, %016llx\n",
4604 (u64)death->cookie);
4606 binder_stats_deleted(BINDER_STAT_DEATH);
4609 pr_err("unexpected work type, %d, not freed\n",
4617 static struct binder_thread *binder_get_thread_ilocked(
4618 struct binder_proc *proc, struct binder_thread *new_thread)
4620 struct binder_thread *thread = NULL;
4621 struct rb_node *parent = NULL;
4622 struct rb_node **p = &proc->threads.rb_node;
4626 thread = rb_entry(parent, struct binder_thread, rb_node);
4628 if (current->pid < thread->pid)
4630 else if (current->pid > thread->pid)
4631 p = &(*p)->rb_right;
4637 thread = new_thread;
4638 binder_stats_created(BINDER_STAT_THREAD);
4639 thread->proc = proc;
4640 thread->pid = current->pid;
4641 atomic_set(&thread->tmp_ref, 0);
4642 init_waitqueue_head(&thread->wait);
4643 INIT_LIST_HEAD(&thread->todo);
4644 rb_link_node(&thread->rb_node, parent, p);
4645 rb_insert_color(&thread->rb_node, &proc->threads);
4646 thread->looper_need_return = true;
4647 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4648 thread->return_error.cmd = BR_OK;
4649 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4650 thread->reply_error.cmd = BR_OK;
4651 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4655 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4657 struct binder_thread *thread;
4658 struct binder_thread *new_thread;
4660 binder_inner_proc_lock(proc);
4661 thread = binder_get_thread_ilocked(proc, NULL);
4662 binder_inner_proc_unlock(proc);
4664 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4665 if (new_thread == NULL)
4667 binder_inner_proc_lock(proc);
4668 thread = binder_get_thread_ilocked(proc, new_thread);
4669 binder_inner_proc_unlock(proc);
4670 if (thread != new_thread)
4676 static void binder_free_proc(struct binder_proc *proc)
4678 BUG_ON(!list_empty(&proc->todo));
4679 BUG_ON(!list_empty(&proc->delivered_death));
4680 binder_alloc_deferred_release(&proc->alloc);
4681 put_task_struct(proc->tsk);
4682 binder_stats_deleted(BINDER_STAT_PROC);
4686 static void binder_free_thread(struct binder_thread *thread)
4688 BUG_ON(!list_empty(&thread->todo));
4689 binder_stats_deleted(BINDER_STAT_THREAD);
4690 binder_proc_dec_tmpref(thread->proc);
4694 static int binder_thread_release(struct binder_proc *proc,
4695 struct binder_thread *thread)
4697 struct binder_transaction *t;
4698 struct binder_transaction *send_reply = NULL;
4699 int active_transactions = 0;
4700 struct binder_transaction *last_t = NULL;
4702 binder_inner_proc_lock(thread->proc);
4704 * take a ref on the proc so it survives
4705 * after we remove this thread from proc->threads.
4706 * The corresponding dec is when we actually
4707 * free the thread in binder_free_thread()
4711 * take a ref on this thread to ensure it
4712 * survives while we are releasing it
4714 atomic_inc(&thread->tmp_ref);
4715 rb_erase(&thread->rb_node, &proc->threads);
4716 t = thread->transaction_stack;
4718 spin_lock(&t->lock);
4719 if (t->to_thread == thread)
4722 __acquire(&t->lock);
4724 thread->is_dead = true;
4728 active_transactions++;
4729 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4730 "release %d:%d transaction %d %s, still active\n",
4731 proc->pid, thread->pid,
4733 (t->to_thread == thread) ? "in" : "out");
4735 if (t->to_thread == thread) {
4737 t->to_thread = NULL;
4739 t->buffer->transaction = NULL;
4743 } else if (t->from == thread) {
4748 spin_unlock(&last_t->lock);
4750 spin_lock(&t->lock);
4752 __acquire(&t->lock);
4754 /* annotation for sparse, lock not acquired in last iteration above */
4755 __release(&t->lock);
4758 * If this thread used poll, make sure we remove the waitqueue
4759 * from any epoll data structures holding it with POLLFREE.
4760 * waitqueue_active() is safe to use here because we're holding
4763 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4764 waitqueue_active(&thread->wait)) {
4765 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4768 binder_inner_proc_unlock(thread->proc);
4771 * This is needed to avoid races between wake_up_poll() above and
4772 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4773 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4774 * lock, so we can be sure it's done after calling synchronize_rcu().
4776 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4780 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4781 binder_release_work(proc, &thread->todo);
4782 binder_thread_dec_tmpref(thread);
4783 return active_transactions;
4786 static __poll_t binder_poll(struct file *filp,
4787 struct poll_table_struct *wait)
4789 struct binder_proc *proc = filp->private_data;
4790 struct binder_thread *thread = NULL;
4791 bool wait_for_proc_work;
4793 thread = binder_get_thread(proc);
4797 binder_inner_proc_lock(thread->proc);
4798 thread->looper |= BINDER_LOOPER_STATE_POLL;
4799 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4801 binder_inner_proc_unlock(thread->proc);
4803 poll_wait(filp, &thread->wait, wait);
4805 if (binder_has_work(thread, wait_for_proc_work))
4811 static int binder_ioctl_write_read(struct file *filp,
4812 unsigned int cmd, unsigned long arg,
4813 struct binder_thread *thread)
4816 struct binder_proc *proc = filp->private_data;
4817 unsigned int size = _IOC_SIZE(cmd);
4818 void __user *ubuf = (void __user *)arg;
4819 struct binder_write_read bwr;
4821 if (size != sizeof(struct binder_write_read)) {
4825 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4829 binder_debug(BINDER_DEBUG_READ_WRITE,
4830 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4831 proc->pid, thread->pid,
4832 (u64)bwr.write_size, (u64)bwr.write_buffer,
4833 (u64)bwr.read_size, (u64)bwr.read_buffer);
4835 if (bwr.write_size > 0) {
4836 ret = binder_thread_write(proc, thread,
4839 &bwr.write_consumed);
4840 trace_binder_write_done(ret);
4842 bwr.read_consumed = 0;
4843 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4848 if (bwr.read_size > 0) {
4849 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4852 filp->f_flags & O_NONBLOCK);
4853 trace_binder_read_done(ret);
4854 binder_inner_proc_lock(proc);
4855 if (!binder_worklist_empty_ilocked(&proc->todo))
4856 binder_wakeup_proc_ilocked(proc);
4857 binder_inner_proc_unlock(proc);
4859 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4864 binder_debug(BINDER_DEBUG_READ_WRITE,
4865 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4866 proc->pid, thread->pid,
4867 (u64)bwr.write_consumed, (u64)bwr.write_size,
4868 (u64)bwr.read_consumed, (u64)bwr.read_size);
4869 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4877 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4878 struct flat_binder_object *fbo)
4881 struct binder_proc *proc = filp->private_data;
4882 struct binder_context *context = proc->context;
4883 struct binder_node *new_node;
4884 kuid_t curr_euid = current_euid();
4886 mutex_lock(&context->context_mgr_node_lock);
4887 if (context->binder_context_mgr_node) {
4888 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4892 ret = security_binder_set_context_mgr(proc->tsk);
4895 if (uid_valid(context->binder_context_mgr_uid)) {
4896 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4897 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4898 from_kuid(&init_user_ns, curr_euid),
4899 from_kuid(&init_user_ns,
4900 context->binder_context_mgr_uid));
4905 context->binder_context_mgr_uid = curr_euid;
4907 new_node = binder_new_node(proc, fbo);
4912 binder_node_lock(new_node);
4913 new_node->local_weak_refs++;
4914 new_node->local_strong_refs++;
4915 new_node->has_strong_ref = 1;
4916 new_node->has_weak_ref = 1;
4917 context->binder_context_mgr_node = new_node;
4918 binder_node_unlock(new_node);
4919 binder_put_node(new_node);
4921 mutex_unlock(&context->context_mgr_node_lock);
4925 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4926 struct binder_node_info_for_ref *info)
4928 struct binder_node *node;
4929 struct binder_context *context = proc->context;
4930 __u32 handle = info->handle;
4932 if (info->strong_count || info->weak_count || info->reserved1 ||
4933 info->reserved2 || info->reserved3) {
4934 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4939 /* This ioctl may only be used by the context manager */
4940 mutex_lock(&context->context_mgr_node_lock);
4941 if (!context->binder_context_mgr_node ||
4942 context->binder_context_mgr_node->proc != proc) {
4943 mutex_unlock(&context->context_mgr_node_lock);
4946 mutex_unlock(&context->context_mgr_node_lock);
4948 node = binder_get_node_from_ref(proc, handle, true, NULL);
4952 info->strong_count = node->local_strong_refs +
4953 node->internal_strong_refs;
4954 info->weak_count = node->local_weak_refs;
4956 binder_put_node(node);
4961 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4962 struct binder_node_debug_info *info)
4965 binder_uintptr_t ptr = info->ptr;
4967 memset(info, 0, sizeof(*info));
4969 binder_inner_proc_lock(proc);
4970 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4971 struct binder_node *node = rb_entry(n, struct binder_node,
4973 if (node->ptr > ptr) {
4974 info->ptr = node->ptr;
4975 info->cookie = node->cookie;
4976 info->has_strong_ref = node->has_strong_ref;
4977 info->has_weak_ref = node->has_weak_ref;
4981 binder_inner_proc_unlock(proc);
4986 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4989 struct binder_proc *proc = filp->private_data;
4990 struct binder_thread *thread;
4991 unsigned int size = _IOC_SIZE(cmd);
4992 void __user *ubuf = (void __user *)arg;
4994 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4995 proc->pid, current->pid, cmd, arg);*/
4997 binder_selftest_alloc(&proc->alloc);
4999 trace_binder_ioctl(cmd, arg);
5001 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5005 thread = binder_get_thread(proc);
5006 if (thread == NULL) {
5012 case BINDER_WRITE_READ:
5013 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5017 case BINDER_SET_MAX_THREADS: {
5020 if (copy_from_user(&max_threads, ubuf,
5021 sizeof(max_threads))) {
5025 binder_inner_proc_lock(proc);
5026 proc->max_threads = max_threads;
5027 binder_inner_proc_unlock(proc);
5030 case BINDER_SET_CONTEXT_MGR_EXT: {
5031 struct flat_binder_object fbo;
5033 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5037 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5042 case BINDER_SET_CONTEXT_MGR:
5043 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5047 case BINDER_THREAD_EXIT:
5048 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5049 proc->pid, thread->pid);
5050 binder_thread_release(proc, thread);
5053 case BINDER_VERSION: {
5054 struct binder_version __user *ver = ubuf;
5056 if (size != sizeof(struct binder_version)) {
5060 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5061 &ver->protocol_version)) {
5067 case BINDER_GET_NODE_INFO_FOR_REF: {
5068 struct binder_node_info_for_ref info;
5070 if (copy_from_user(&info, ubuf, sizeof(info))) {
5075 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5079 if (copy_to_user(ubuf, &info, sizeof(info))) {
5086 case BINDER_GET_NODE_DEBUG_INFO: {
5087 struct binder_node_debug_info info;
5089 if (copy_from_user(&info, ubuf, sizeof(info))) {
5094 ret = binder_ioctl_get_node_debug_info(proc, &info);
5098 if (copy_to_user(ubuf, &info, sizeof(info))) {
5111 thread->looper_need_return = false;
5112 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5113 if (ret && ret != -ERESTARTSYS)
5114 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5116 trace_binder_ioctl_done(ret);
5120 static void binder_vma_open(struct vm_area_struct *vma)
5122 struct binder_proc *proc = vma->vm_private_data;
5124 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5125 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5126 proc->pid, vma->vm_start, vma->vm_end,
5127 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5128 (unsigned long)pgprot_val(vma->vm_page_prot));
5131 static void binder_vma_close(struct vm_area_struct *vma)
5133 struct binder_proc *proc = vma->vm_private_data;
5135 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5136 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5137 proc->pid, vma->vm_start, vma->vm_end,
5138 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5139 (unsigned long)pgprot_val(vma->vm_page_prot));
5140 binder_alloc_vma_close(&proc->alloc);
5143 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5145 return VM_FAULT_SIGBUS;
5148 static const struct vm_operations_struct binder_vm_ops = {
5149 .open = binder_vma_open,
5150 .close = binder_vma_close,
5151 .fault = binder_vm_fault,
5154 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5157 struct binder_proc *proc = filp->private_data;
5158 const char *failure_string;
5160 if (proc->tsk != current->group_leader)
5163 if ((vma->vm_end - vma->vm_start) > SZ_4M)
5164 vma->vm_end = vma->vm_start + SZ_4M;
5166 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5167 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5168 __func__, proc->pid, vma->vm_start, vma->vm_end,
5169 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5170 (unsigned long)pgprot_val(vma->vm_page_prot));
5172 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5174 failure_string = "bad vm_flags";
5177 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5178 vma->vm_flags &= ~VM_MAYWRITE;
5180 vma->vm_ops = &binder_vm_ops;
5181 vma->vm_private_data = proc;
5183 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5189 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5190 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5194 static int binder_open(struct inode *nodp, struct file *filp)
5196 struct binder_proc *proc;
5197 struct binder_device *binder_dev;
5199 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5200 current->group_leader->pid, current->pid);
5202 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5205 spin_lock_init(&proc->inner_lock);
5206 spin_lock_init(&proc->outer_lock);
5207 get_task_struct(current->group_leader);
5208 proc->tsk = current->group_leader;
5209 INIT_LIST_HEAD(&proc->todo);
5210 proc->default_priority = task_nice(current);
5211 /* binderfs stashes devices in i_private */
5212 if (is_binderfs_device(nodp))
5213 binder_dev = nodp->i_private;
5215 binder_dev = container_of(filp->private_data,
5216 struct binder_device, miscdev);
5217 proc->context = &binder_dev->context;
5218 binder_alloc_init(&proc->alloc);
5220 binder_stats_created(BINDER_STAT_PROC);
5221 proc->pid = current->group_leader->pid;
5222 INIT_LIST_HEAD(&proc->delivered_death);
5223 INIT_LIST_HEAD(&proc->waiting_threads);
5224 filp->private_data = proc;
5226 mutex_lock(&binder_procs_lock);
5227 hlist_add_head(&proc->proc_node, &binder_procs);
5228 mutex_unlock(&binder_procs_lock);
5230 if (binder_debugfs_dir_entry_proc) {
5233 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5235 * proc debug entries are shared between contexts, so
5236 * this will fail if the process tries to open the driver
5237 * again with a different context. The priting code will
5238 * anyway print all contexts that a given PID has, so this
5241 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5242 binder_debugfs_dir_entry_proc,
5243 (void *)(unsigned long)proc->pid,
5250 static int binder_flush(struct file *filp, fl_owner_t id)
5252 struct binder_proc *proc = filp->private_data;
5254 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5259 static void binder_deferred_flush(struct binder_proc *proc)
5264 binder_inner_proc_lock(proc);
5265 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5266 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5268 thread->looper_need_return = true;
5269 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5270 wake_up_interruptible(&thread->wait);
5274 binder_inner_proc_unlock(proc);
5276 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5277 "binder_flush: %d woke %d threads\n", proc->pid,
5281 static int binder_release(struct inode *nodp, struct file *filp)
5283 struct binder_proc *proc = filp->private_data;
5285 debugfs_remove(proc->debugfs_entry);
5286 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5291 static int binder_node_release(struct binder_node *node, int refs)
5293 struct binder_ref *ref;
5295 struct binder_proc *proc = node->proc;
5297 binder_release_work(proc, &node->async_todo);
5299 binder_node_lock(node);
5300 binder_inner_proc_lock(proc);
5301 binder_dequeue_work_ilocked(&node->work);
5303 * The caller must have taken a temporary ref on the node,
5305 BUG_ON(!node->tmp_refs);
5306 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5307 binder_inner_proc_unlock(proc);
5308 binder_node_unlock(node);
5309 binder_free_node(node);
5315 node->local_strong_refs = 0;
5316 node->local_weak_refs = 0;
5317 binder_inner_proc_unlock(proc);
5319 spin_lock(&binder_dead_nodes_lock);
5320 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5321 spin_unlock(&binder_dead_nodes_lock);
5323 hlist_for_each_entry(ref, &node->refs, node_entry) {
5326 * Need the node lock to synchronize
5327 * with new notification requests and the
5328 * inner lock to synchronize with queued
5329 * death notifications.
5331 binder_inner_proc_lock(ref->proc);
5333 binder_inner_proc_unlock(ref->proc);
5339 BUG_ON(!list_empty(&ref->death->work.entry));
5340 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5341 binder_enqueue_work_ilocked(&ref->death->work,
5343 binder_wakeup_proc_ilocked(ref->proc);
5344 binder_inner_proc_unlock(ref->proc);
5347 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5348 "node %d now dead, refs %d, death %d\n",
5349 node->debug_id, refs, death);
5350 binder_node_unlock(node);
5351 binder_put_node(node);
5356 static void binder_deferred_release(struct binder_proc *proc)
5358 struct binder_context *context = proc->context;
5360 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5362 mutex_lock(&binder_procs_lock);
5363 hlist_del(&proc->proc_node);
5364 mutex_unlock(&binder_procs_lock);
5366 mutex_lock(&context->context_mgr_node_lock);
5367 if (context->binder_context_mgr_node &&
5368 context->binder_context_mgr_node->proc == proc) {
5369 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5370 "%s: %d context_mgr_node gone\n",
5371 __func__, proc->pid);
5372 context->binder_context_mgr_node = NULL;
5374 mutex_unlock(&context->context_mgr_node_lock);
5375 binder_inner_proc_lock(proc);
5377 * Make sure proc stays alive after we
5378 * remove all the threads
5382 proc->is_dead = true;
5384 active_transactions = 0;
5385 while ((n = rb_first(&proc->threads))) {
5386 struct binder_thread *thread;
5388 thread = rb_entry(n, struct binder_thread, rb_node);
5389 binder_inner_proc_unlock(proc);
5391 active_transactions += binder_thread_release(proc, thread);
5392 binder_inner_proc_lock(proc);
5397 while ((n = rb_first(&proc->nodes))) {
5398 struct binder_node *node;
5400 node = rb_entry(n, struct binder_node, rb_node);
5403 * take a temporary ref on the node before
5404 * calling binder_node_release() which will either
5405 * kfree() the node or call binder_put_node()
5407 binder_inc_node_tmpref_ilocked(node);
5408 rb_erase(&node->rb_node, &proc->nodes);
5409 binder_inner_proc_unlock(proc);
5410 incoming_refs = binder_node_release(node, incoming_refs);
5411 binder_inner_proc_lock(proc);
5413 binder_inner_proc_unlock(proc);
5416 binder_proc_lock(proc);
5417 while ((n = rb_first(&proc->refs_by_desc))) {
5418 struct binder_ref *ref;
5420 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5422 binder_cleanup_ref_olocked(ref);
5423 binder_proc_unlock(proc);
5424 binder_free_ref(ref);
5425 binder_proc_lock(proc);
5427 binder_proc_unlock(proc);
5429 binder_release_work(proc, &proc->todo);
5430 binder_release_work(proc, &proc->delivered_death);
5432 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5433 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5434 __func__, proc->pid, threads, nodes, incoming_refs,
5435 outgoing_refs, active_transactions);
5437 binder_proc_dec_tmpref(proc);
5440 static void binder_deferred_func(struct work_struct *work)
5442 struct binder_proc *proc;
5447 mutex_lock(&binder_deferred_lock);
5448 if (!hlist_empty(&binder_deferred_list)) {
5449 proc = hlist_entry(binder_deferred_list.first,
5450 struct binder_proc, deferred_work_node);
5451 hlist_del_init(&proc->deferred_work_node);
5452 defer = proc->deferred_work;
5453 proc->deferred_work = 0;
5458 mutex_unlock(&binder_deferred_lock);
5460 if (defer & BINDER_DEFERRED_FLUSH)
5461 binder_deferred_flush(proc);
5463 if (defer & BINDER_DEFERRED_RELEASE)
5464 binder_deferred_release(proc); /* frees proc */
5467 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5470 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5472 mutex_lock(&binder_deferred_lock);
5473 proc->deferred_work |= defer;
5474 if (hlist_unhashed(&proc->deferred_work_node)) {
5475 hlist_add_head(&proc->deferred_work_node,
5476 &binder_deferred_list);
5477 schedule_work(&binder_deferred_work);
5479 mutex_unlock(&binder_deferred_lock);
5482 static void print_binder_transaction_ilocked(struct seq_file *m,
5483 struct binder_proc *proc,
5485 struct binder_transaction *t)
5487 struct binder_proc *to_proc;
5488 struct binder_buffer *buffer = t->buffer;
5490 spin_lock(&t->lock);
5491 to_proc = t->to_proc;
5493 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5494 prefix, t->debug_id, t,
5495 t->from ? t->from->proc->pid : 0,
5496 t->from ? t->from->pid : 0,
5497 to_proc ? to_proc->pid : 0,
5498 t->to_thread ? t->to_thread->pid : 0,
5499 t->code, t->flags, t->priority, t->need_reply);
5500 spin_unlock(&t->lock);
5502 if (proc != to_proc) {
5504 * Can only safely deref buffer if we are holding the
5505 * correct proc inner lock for this node
5511 if (buffer == NULL) {
5512 seq_puts(m, " buffer free\n");
5515 if (buffer->target_node)
5516 seq_printf(m, " node %d", buffer->target_node->debug_id);
5517 seq_printf(m, " size %zd:%zd data %pK\n",
5518 buffer->data_size, buffer->offsets_size,
5522 static void print_binder_work_ilocked(struct seq_file *m,
5523 struct binder_proc *proc,
5525 const char *transaction_prefix,
5526 struct binder_work *w)
5528 struct binder_node *node;
5529 struct binder_transaction *t;
5532 case BINDER_WORK_TRANSACTION:
5533 t = container_of(w, struct binder_transaction, work);
5534 print_binder_transaction_ilocked(
5535 m, proc, transaction_prefix, t);
5537 case BINDER_WORK_RETURN_ERROR: {
5538 struct binder_error *e = container_of(
5539 w, struct binder_error, work);
5541 seq_printf(m, "%stransaction error: %u\n",
5544 case BINDER_WORK_TRANSACTION_COMPLETE:
5545 seq_printf(m, "%stransaction complete\n", prefix);
5547 case BINDER_WORK_NODE:
5548 node = container_of(w, struct binder_node, work);
5549 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5550 prefix, node->debug_id,
5551 (u64)node->ptr, (u64)node->cookie);
5553 case BINDER_WORK_DEAD_BINDER:
5554 seq_printf(m, "%shas dead binder\n", prefix);
5556 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5557 seq_printf(m, "%shas cleared dead binder\n", prefix);
5559 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5560 seq_printf(m, "%shas cleared death notification\n", prefix);
5563 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5568 static void print_binder_thread_ilocked(struct seq_file *m,
5569 struct binder_thread *thread,
5572 struct binder_transaction *t;
5573 struct binder_work *w;
5574 size_t start_pos = m->count;
5577 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5578 thread->pid, thread->looper,
5579 thread->looper_need_return,
5580 atomic_read(&thread->tmp_ref));
5581 header_pos = m->count;
5582 t = thread->transaction_stack;
5584 if (t->from == thread) {
5585 print_binder_transaction_ilocked(m, thread->proc,
5586 " outgoing transaction", t);
5588 } else if (t->to_thread == thread) {
5589 print_binder_transaction_ilocked(m, thread->proc,
5590 " incoming transaction", t);
5593 print_binder_transaction_ilocked(m, thread->proc,
5594 " bad transaction", t);
5598 list_for_each_entry(w, &thread->todo, entry) {
5599 print_binder_work_ilocked(m, thread->proc, " ",
5600 " pending transaction", w);
5602 if (!print_always && m->count == header_pos)
5603 m->count = start_pos;
5606 static void print_binder_node_nilocked(struct seq_file *m,
5607 struct binder_node *node)
5609 struct binder_ref *ref;
5610 struct binder_work *w;
5614 hlist_for_each_entry(ref, &node->refs, node_entry)
5617 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5618 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5619 node->has_strong_ref, node->has_weak_ref,
5620 node->local_strong_refs, node->local_weak_refs,
5621 node->internal_strong_refs, count, node->tmp_refs);
5623 seq_puts(m, " proc");
5624 hlist_for_each_entry(ref, &node->refs, node_entry)
5625 seq_printf(m, " %d", ref->proc->pid);
5629 list_for_each_entry(w, &node->async_todo, entry)
5630 print_binder_work_ilocked(m, node->proc, " ",
5631 " pending async transaction", w);
5635 static void print_binder_ref_olocked(struct seq_file *m,
5636 struct binder_ref *ref)
5638 binder_node_lock(ref->node);
5639 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5640 ref->data.debug_id, ref->data.desc,
5641 ref->node->proc ? "" : "dead ",
5642 ref->node->debug_id, ref->data.strong,
5643 ref->data.weak, ref->death);
5644 binder_node_unlock(ref->node);
5647 static void print_binder_proc(struct seq_file *m,
5648 struct binder_proc *proc, int print_all)
5650 struct binder_work *w;
5652 size_t start_pos = m->count;
5654 struct binder_node *last_node = NULL;
5656 seq_printf(m, "proc %d\n", proc->pid);
5657 seq_printf(m, "context %s\n", proc->context->name);
5658 header_pos = m->count;
5660 binder_inner_proc_lock(proc);
5661 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5662 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5663 rb_node), print_all);
5665 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5666 struct binder_node *node = rb_entry(n, struct binder_node,
5668 if (!print_all && !node->has_async_transaction)
5672 * take a temporary reference on the node so it
5673 * survives and isn't removed from the tree
5674 * while we print it.
5676 binder_inc_node_tmpref_ilocked(node);
5677 /* Need to drop inner lock to take node lock */
5678 binder_inner_proc_unlock(proc);
5680 binder_put_node(last_node);
5681 binder_node_inner_lock(node);
5682 print_binder_node_nilocked(m, node);
5683 binder_node_inner_unlock(node);
5685 binder_inner_proc_lock(proc);
5687 binder_inner_proc_unlock(proc);
5689 binder_put_node(last_node);
5692 binder_proc_lock(proc);
5693 for (n = rb_first(&proc->refs_by_desc);
5696 print_binder_ref_olocked(m, rb_entry(n,
5699 binder_proc_unlock(proc);
5701 binder_alloc_print_allocated(m, &proc->alloc);
5702 binder_inner_proc_lock(proc);
5703 list_for_each_entry(w, &proc->todo, entry)
5704 print_binder_work_ilocked(m, proc, " ",
5705 " pending transaction", w);
5706 list_for_each_entry(w, &proc->delivered_death, entry) {
5707 seq_puts(m, " has delivered dead binder\n");
5710 binder_inner_proc_unlock(proc);
5711 if (!print_all && m->count == header_pos)
5712 m->count = start_pos;
5715 static const char * const binder_return_strings[] = {
5720 "BR_ACQUIRE_RESULT",
5722 "BR_TRANSACTION_COMPLETE",
5727 "BR_ATTEMPT_ACQUIRE",
5732 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5736 static const char * const binder_command_strings[] = {
5739 "BC_ACQUIRE_RESULT",
5747 "BC_ATTEMPT_ACQUIRE",
5748 "BC_REGISTER_LOOPER",
5751 "BC_REQUEST_DEATH_NOTIFICATION",
5752 "BC_CLEAR_DEATH_NOTIFICATION",
5753 "BC_DEAD_BINDER_DONE",
5754 "BC_TRANSACTION_SG",
5758 static const char * const binder_objstat_strings[] = {
5765 "transaction_complete"
5768 static void print_binder_stats(struct seq_file *m, const char *prefix,
5769 struct binder_stats *stats)
5773 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5774 ARRAY_SIZE(binder_command_strings));
5775 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5776 int temp = atomic_read(&stats->bc[i]);
5779 seq_printf(m, "%s%s: %d\n", prefix,
5780 binder_command_strings[i], temp);
5783 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5784 ARRAY_SIZE(binder_return_strings));
5785 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5786 int temp = atomic_read(&stats->br[i]);
5789 seq_printf(m, "%s%s: %d\n", prefix,
5790 binder_return_strings[i], temp);
5793 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5794 ARRAY_SIZE(binder_objstat_strings));
5795 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5796 ARRAY_SIZE(stats->obj_deleted));
5797 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5798 int created = atomic_read(&stats->obj_created[i]);
5799 int deleted = atomic_read(&stats->obj_deleted[i]);
5801 if (created || deleted)
5802 seq_printf(m, "%s%s: active %d total %d\n",
5804 binder_objstat_strings[i],
5810 static void print_binder_proc_stats(struct seq_file *m,
5811 struct binder_proc *proc)
5813 struct binder_work *w;
5814 struct binder_thread *thread;
5816 int count, strong, weak, ready_threads;
5817 size_t free_async_space =
5818 binder_alloc_get_free_async_space(&proc->alloc);
5820 seq_printf(m, "proc %d\n", proc->pid);
5821 seq_printf(m, "context %s\n", proc->context->name);
5824 binder_inner_proc_lock(proc);
5825 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5828 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5831 seq_printf(m, " threads: %d\n", count);
5832 seq_printf(m, " requested threads: %d+%d/%d\n"
5833 " ready threads %d\n"
5834 " free async space %zd\n", proc->requested_threads,
5835 proc->requested_threads_started, proc->max_threads,
5839 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5841 binder_inner_proc_unlock(proc);
5842 seq_printf(m, " nodes: %d\n", count);
5846 binder_proc_lock(proc);
5847 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5848 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5851 strong += ref->data.strong;
5852 weak += ref->data.weak;
5854 binder_proc_unlock(proc);
5855 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5857 count = binder_alloc_get_allocated_count(&proc->alloc);
5858 seq_printf(m, " buffers: %d\n", count);
5860 binder_alloc_print_pages(m, &proc->alloc);
5863 binder_inner_proc_lock(proc);
5864 list_for_each_entry(w, &proc->todo, entry) {
5865 if (w->type == BINDER_WORK_TRANSACTION)
5868 binder_inner_proc_unlock(proc);
5869 seq_printf(m, " pending transactions: %d\n", count);
5871 print_binder_stats(m, " ", &proc->stats);
5875 static int state_show(struct seq_file *m, void *unused)
5877 struct binder_proc *proc;
5878 struct binder_node *node;
5879 struct binder_node *last_node = NULL;
5881 seq_puts(m, "binder state:\n");
5883 spin_lock(&binder_dead_nodes_lock);
5884 if (!hlist_empty(&binder_dead_nodes))
5885 seq_puts(m, "dead nodes:\n");
5886 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5888 * take a temporary reference on the node so it
5889 * survives and isn't removed from the list
5890 * while we print it.
5893 spin_unlock(&binder_dead_nodes_lock);
5895 binder_put_node(last_node);
5896 binder_node_lock(node);
5897 print_binder_node_nilocked(m, node);
5898 binder_node_unlock(node);
5900 spin_lock(&binder_dead_nodes_lock);
5902 spin_unlock(&binder_dead_nodes_lock);
5904 binder_put_node(last_node);
5906 mutex_lock(&binder_procs_lock);
5907 hlist_for_each_entry(proc, &binder_procs, proc_node)
5908 print_binder_proc(m, proc, 1);
5909 mutex_unlock(&binder_procs_lock);
5914 static int stats_show(struct seq_file *m, void *unused)
5916 struct binder_proc *proc;
5918 seq_puts(m, "binder stats:\n");
5920 print_binder_stats(m, "", &binder_stats);
5922 mutex_lock(&binder_procs_lock);
5923 hlist_for_each_entry(proc, &binder_procs, proc_node)
5924 print_binder_proc_stats(m, proc);
5925 mutex_unlock(&binder_procs_lock);
5930 static int transactions_show(struct seq_file *m, void *unused)
5932 struct binder_proc *proc;
5934 seq_puts(m, "binder transactions:\n");
5935 mutex_lock(&binder_procs_lock);
5936 hlist_for_each_entry(proc, &binder_procs, proc_node)
5937 print_binder_proc(m, proc, 0);
5938 mutex_unlock(&binder_procs_lock);
5943 static int proc_show(struct seq_file *m, void *unused)
5945 struct binder_proc *itr;
5946 int pid = (unsigned long)m->private;
5948 mutex_lock(&binder_procs_lock);
5949 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5950 if (itr->pid == pid) {
5951 seq_puts(m, "binder proc state:\n");
5952 print_binder_proc(m, itr, 1);
5955 mutex_unlock(&binder_procs_lock);
5960 static void print_binder_transaction_log_entry(struct seq_file *m,
5961 struct binder_transaction_log_entry *e)
5963 int debug_id = READ_ONCE(e->debug_id_done);
5965 * read barrier to guarantee debug_id_done read before
5966 * we print the log values
5970 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5971 e->debug_id, (e->call_type == 2) ? "reply" :
5972 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5973 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5974 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5975 e->return_error, e->return_error_param,
5976 e->return_error_line);
5978 * read-barrier to guarantee read of debug_id_done after
5979 * done printing the fields of the entry
5982 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5983 "\n" : " (incomplete)\n");
5986 static int transaction_log_show(struct seq_file *m, void *unused)
5988 struct binder_transaction_log *log = m->private;
5989 unsigned int log_cur = atomic_read(&log->cur);
5994 count = log_cur + 1;
5995 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5996 0 : count % ARRAY_SIZE(log->entry);
5997 if (count > ARRAY_SIZE(log->entry) || log->full)
5998 count = ARRAY_SIZE(log->entry);
5999 for (i = 0; i < count; i++) {
6000 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6002 print_binder_transaction_log_entry(m, &log->entry[index]);
6007 const struct file_operations binder_fops = {
6008 .owner = THIS_MODULE,
6009 .poll = binder_poll,
6010 .unlocked_ioctl = binder_ioctl,
6011 .compat_ioctl = binder_ioctl,
6012 .mmap = binder_mmap,
6013 .open = binder_open,
6014 .flush = binder_flush,
6015 .release = binder_release,
6018 DEFINE_SHOW_ATTRIBUTE(state);
6019 DEFINE_SHOW_ATTRIBUTE(stats);
6020 DEFINE_SHOW_ATTRIBUTE(transactions);
6021 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6023 static int __init init_binder_device(const char *name)
6026 struct binder_device *binder_device;
6028 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6032 binder_device->miscdev.fops = &binder_fops;
6033 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6034 binder_device->miscdev.name = name;
6036 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6037 binder_device->context.name = name;
6038 mutex_init(&binder_device->context.context_mgr_node_lock);
6040 ret = misc_register(&binder_device->miscdev);
6042 kfree(binder_device);
6046 hlist_add_head(&binder_device->hlist, &binder_devices);
6051 static int __init binder_init(void)
6054 char *device_name, *device_tmp;
6055 struct binder_device *device;
6056 struct hlist_node *tmp;
6057 char *device_names = NULL;
6059 ret = binder_alloc_shrinker_init();
6063 atomic_set(&binder_transaction_log.cur, ~0U);
6064 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6066 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6067 if (binder_debugfs_dir_entry_root)
6068 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6069 binder_debugfs_dir_entry_root);
6071 if (binder_debugfs_dir_entry_root) {
6072 debugfs_create_file("state",
6074 binder_debugfs_dir_entry_root,
6077 debugfs_create_file("stats",
6079 binder_debugfs_dir_entry_root,
6082 debugfs_create_file("transactions",
6084 binder_debugfs_dir_entry_root,
6086 &transactions_fops);
6087 debugfs_create_file("transaction_log",
6089 binder_debugfs_dir_entry_root,
6090 &binder_transaction_log,
6091 &transaction_log_fops);
6092 debugfs_create_file("failed_transaction_log",
6094 binder_debugfs_dir_entry_root,
6095 &binder_transaction_log_failed,
6096 &transaction_log_fops);
6099 if (strcmp(binder_devices_param, "") != 0) {
6101 * Copy the module_parameter string, because we don't want to
6102 * tokenize it in-place.
6104 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6105 if (!device_names) {
6107 goto err_alloc_device_names_failed;
6110 device_tmp = device_names;
6111 while ((device_name = strsep(&device_tmp, ","))) {
6112 ret = init_binder_device(device_name);
6114 goto err_init_binder_device_failed;
6118 ret = init_binderfs();
6120 goto err_init_binder_device_failed;
6124 err_init_binder_device_failed:
6125 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6126 misc_deregister(&device->miscdev);
6127 hlist_del(&device->hlist);
6131 kfree(device_names);
6133 err_alloc_device_names_failed:
6134 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6139 device_initcall(binder_init);
6141 #define CREATE_TRACE_POINTS
6142 #include "binder_trace.h"
6144 MODULE_LICENSE("GPL v2");