binder: move structs from core file to header file
[linux-2.6-microblaze.git] / drivers / android / binder.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69
70 #include <uapi/linux/android/binder.h>
71
72 #include <asm/cacheflush.h>
73
74 #include "binder_internal.h"
75 #include "binder_trace.h"
76
77 static HLIST_HEAD(binder_deferred_list);
78 static DEFINE_MUTEX(binder_deferred_lock);
79
80 static HLIST_HEAD(binder_devices);
81 static HLIST_HEAD(binder_procs);
82 static DEFINE_MUTEX(binder_procs_lock);
83
84 static HLIST_HEAD(binder_dead_nodes);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86
87 static struct dentry *binder_debugfs_dir_entry_root;
88 static struct dentry *binder_debugfs_dir_entry_proc;
89 static atomic_t binder_last_id;
90
91 static int proc_show(struct seq_file *m, void *unused);
92 DEFINE_SHOW_ATTRIBUTE(proc);
93
94 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
95
96 enum {
97         BINDER_DEBUG_USER_ERROR             = 1U << 0,
98         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
99         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
100         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
101         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
102         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
103         BINDER_DEBUG_READ_WRITE             = 1U << 6,
104         BINDER_DEBUG_USER_REFS              = 1U << 7,
105         BINDER_DEBUG_THREADS                = 1U << 8,
106         BINDER_DEBUG_TRANSACTION            = 1U << 9,
107         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
108         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
109         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
110         BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
111         BINDER_DEBUG_SPINLOCKS              = 1U << 14,
112 };
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
116
117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 module_param_named(devices, binder_devices_param, charp, 0444);
119
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
122
123 static int binder_set_stop_on_user_error(const char *val,
124                                          const struct kernel_param *kp)
125 {
126         int ret;
127
128         ret = param_set_int(val, kp);
129         if (binder_stop_on_user_error < 2)
130                 wake_up(&binder_user_error_wait);
131         return ret;
132 }
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134         param_get_int, &binder_stop_on_user_error, 0644);
135
136 #define binder_debug(mask, x...) \
137         do { \
138                 if (binder_debug_mask & mask) \
139                         pr_info_ratelimited(x); \
140         } while (0)
141
142 #define binder_user_error(x...) \
143         do { \
144                 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145                         pr_info_ratelimited(x); \
146                 if (binder_stop_on_user_error) \
147                         binder_stop_on_user_error = 2; \
148         } while (0)
149
150 #define to_flat_binder_object(hdr) \
151         container_of(hdr, struct flat_binder_object, hdr)
152
153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
154
155 #define to_binder_buffer_object(hdr) \
156         container_of(hdr, struct binder_buffer_object, hdr)
157
158 #define to_binder_fd_array_object(hdr) \
159         container_of(hdr, struct binder_fd_array_object, hdr)
160
161 static struct binder_stats binder_stats;
162
163 static inline void binder_stats_deleted(enum binder_stat_types type)
164 {
165         atomic_inc(&binder_stats.obj_deleted[type]);
166 }
167
168 static inline void binder_stats_created(enum binder_stat_types type)
169 {
170         atomic_inc(&binder_stats.obj_created[type]);
171 }
172
173 struct binder_transaction_log binder_transaction_log;
174 struct binder_transaction_log binder_transaction_log_failed;
175
176 static struct binder_transaction_log_entry *binder_transaction_log_add(
177         struct binder_transaction_log *log)
178 {
179         struct binder_transaction_log_entry *e;
180         unsigned int cur = atomic_inc_return(&log->cur);
181
182         if (cur >= ARRAY_SIZE(log->entry))
183                 log->full = true;
184         e = &log->entry[cur % ARRAY_SIZE(log->entry)];
185         WRITE_ONCE(e->debug_id_done, 0);
186         /*
187          * write-barrier to synchronize access to e->debug_id_done.
188          * We make sure the initialized 0 value is seen before
189          * memset() other fields are zeroed by memset.
190          */
191         smp_wmb();
192         memset(e, 0, sizeof(*e));
193         return e;
194 }
195
196 enum binder_deferred_state {
197         BINDER_DEFERRED_FLUSH        = 0x01,
198         BINDER_DEFERRED_RELEASE      = 0x02,
199 };
200
201 enum {
202         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
203         BINDER_LOOPER_STATE_ENTERED     = 0x02,
204         BINDER_LOOPER_STATE_EXITED      = 0x04,
205         BINDER_LOOPER_STATE_INVALID     = 0x08,
206         BINDER_LOOPER_STATE_WAITING     = 0x10,
207         BINDER_LOOPER_STATE_POLL        = 0x20,
208 };
209
210 /**
211  * binder_proc_lock() - Acquire outer lock for given binder_proc
212  * @proc:         struct binder_proc to acquire
213  *
214  * Acquires proc->outer_lock. Used to protect binder_ref
215  * structures associated with the given proc.
216  */
217 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
218 static void
219 _binder_proc_lock(struct binder_proc *proc, int line)
220         __acquires(&proc->outer_lock)
221 {
222         binder_debug(BINDER_DEBUG_SPINLOCKS,
223                      "%s: line=%d\n", __func__, line);
224         spin_lock(&proc->outer_lock);
225 }
226
227 /**
228  * binder_proc_unlock() - Release spinlock for given binder_proc
229  * @proc:         struct binder_proc to acquire
230  *
231  * Release lock acquired via binder_proc_lock()
232  */
233 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
234 static void
235 _binder_proc_unlock(struct binder_proc *proc, int line)
236         __releases(&proc->outer_lock)
237 {
238         binder_debug(BINDER_DEBUG_SPINLOCKS,
239                      "%s: line=%d\n", __func__, line);
240         spin_unlock(&proc->outer_lock);
241 }
242
243 /**
244  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
245  * @proc:         struct binder_proc to acquire
246  *
247  * Acquires proc->inner_lock. Used to protect todo lists
248  */
249 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
250 static void
251 _binder_inner_proc_lock(struct binder_proc *proc, int line)
252         __acquires(&proc->inner_lock)
253 {
254         binder_debug(BINDER_DEBUG_SPINLOCKS,
255                      "%s: line=%d\n", __func__, line);
256         spin_lock(&proc->inner_lock);
257 }
258
259 /**
260  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
261  * @proc:         struct binder_proc to acquire
262  *
263  * Release lock acquired via binder_inner_proc_lock()
264  */
265 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
266 static void
267 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
268         __releases(&proc->inner_lock)
269 {
270         binder_debug(BINDER_DEBUG_SPINLOCKS,
271                      "%s: line=%d\n", __func__, line);
272         spin_unlock(&proc->inner_lock);
273 }
274
275 /**
276  * binder_node_lock() - Acquire spinlock for given binder_node
277  * @node:         struct binder_node to acquire
278  *
279  * Acquires node->lock. Used to protect binder_node fields
280  */
281 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
282 static void
283 _binder_node_lock(struct binder_node *node, int line)
284         __acquires(&node->lock)
285 {
286         binder_debug(BINDER_DEBUG_SPINLOCKS,
287                      "%s: line=%d\n", __func__, line);
288         spin_lock(&node->lock);
289 }
290
291 /**
292  * binder_node_unlock() - Release spinlock for given binder_proc
293  * @node:         struct binder_node to acquire
294  *
295  * Release lock acquired via binder_node_lock()
296  */
297 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
298 static void
299 _binder_node_unlock(struct binder_node *node, int line)
300         __releases(&node->lock)
301 {
302         binder_debug(BINDER_DEBUG_SPINLOCKS,
303                      "%s: line=%d\n", __func__, line);
304         spin_unlock(&node->lock);
305 }
306
307 /**
308  * binder_node_inner_lock() - Acquire node and inner locks
309  * @node:         struct binder_node to acquire
310  *
311  * Acquires node->lock. If node->proc also acquires
312  * proc->inner_lock. Used to protect binder_node fields
313  */
314 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
315 static void
316 _binder_node_inner_lock(struct binder_node *node, int line)
317         __acquires(&node->lock) __acquires(&node->proc->inner_lock)
318 {
319         binder_debug(BINDER_DEBUG_SPINLOCKS,
320                      "%s: line=%d\n", __func__, line);
321         spin_lock(&node->lock);
322         if (node->proc)
323                 binder_inner_proc_lock(node->proc);
324         else
325                 /* annotation for sparse */
326                 __acquire(&node->proc->inner_lock);
327 }
328
329 /**
330  * binder_node_unlock() - Release node and inner locks
331  * @node:         struct binder_node to acquire
332  *
333  * Release lock acquired via binder_node_lock()
334  */
335 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
336 static void
337 _binder_node_inner_unlock(struct binder_node *node, int line)
338         __releases(&node->lock) __releases(&node->proc->inner_lock)
339 {
340         struct binder_proc *proc = node->proc;
341
342         binder_debug(BINDER_DEBUG_SPINLOCKS,
343                      "%s: line=%d\n", __func__, line);
344         if (proc)
345                 binder_inner_proc_unlock(proc);
346         else
347                 /* annotation for sparse */
348                 __release(&node->proc->inner_lock);
349         spin_unlock(&node->lock);
350 }
351
352 static bool binder_worklist_empty_ilocked(struct list_head *list)
353 {
354         return list_empty(list);
355 }
356
357 /**
358  * binder_worklist_empty() - Check if no items on the work list
359  * @proc:       binder_proc associated with list
360  * @list:       list to check
361  *
362  * Return: true if there are no items on list, else false
363  */
364 static bool binder_worklist_empty(struct binder_proc *proc,
365                                   struct list_head *list)
366 {
367         bool ret;
368
369         binder_inner_proc_lock(proc);
370         ret = binder_worklist_empty_ilocked(list);
371         binder_inner_proc_unlock(proc);
372         return ret;
373 }
374
375 /**
376  * binder_enqueue_work_ilocked() - Add an item to the work list
377  * @work:         struct binder_work to add to list
378  * @target_list:  list to add work to
379  *
380  * Adds the work to the specified list. Asserts that work
381  * is not already on a list.
382  *
383  * Requires the proc->inner_lock to be held.
384  */
385 static void
386 binder_enqueue_work_ilocked(struct binder_work *work,
387                            struct list_head *target_list)
388 {
389         BUG_ON(target_list == NULL);
390         BUG_ON(work->entry.next && !list_empty(&work->entry));
391         list_add_tail(&work->entry, target_list);
392 }
393
394 /**
395  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
396  * @thread:       thread to queue work to
397  * @work:         struct binder_work to add to list
398  *
399  * Adds the work to the todo list of the thread. Doesn't set the process_todo
400  * flag, which means that (if it wasn't already set) the thread will go to
401  * sleep without handling this work when it calls read.
402  *
403  * Requires the proc->inner_lock to be held.
404  */
405 static void
406 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
407                                             struct binder_work *work)
408 {
409         WARN_ON(!list_empty(&thread->waiting_thread_node));
410         binder_enqueue_work_ilocked(work, &thread->todo);
411 }
412
413 /**
414  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
415  * @thread:       thread to queue work to
416  * @work:         struct binder_work to add to list
417  *
418  * Adds the work to the todo list of the thread, and enables processing
419  * of the todo queue.
420  *
421  * Requires the proc->inner_lock to be held.
422  */
423 static void
424 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
425                                    struct binder_work *work)
426 {
427         WARN_ON(!list_empty(&thread->waiting_thread_node));
428         binder_enqueue_work_ilocked(work, &thread->todo);
429         thread->process_todo = true;
430 }
431
432 /**
433  * binder_enqueue_thread_work() - Add an item to the thread work list
434  * @thread:       thread to queue work to
435  * @work:         struct binder_work to add to list
436  *
437  * Adds the work to the todo list of the thread, and enables processing
438  * of the todo queue.
439  */
440 static void
441 binder_enqueue_thread_work(struct binder_thread *thread,
442                            struct binder_work *work)
443 {
444         binder_inner_proc_lock(thread->proc);
445         binder_enqueue_thread_work_ilocked(thread, work);
446         binder_inner_proc_unlock(thread->proc);
447 }
448
449 static void
450 binder_dequeue_work_ilocked(struct binder_work *work)
451 {
452         list_del_init(&work->entry);
453 }
454
455 /**
456  * binder_dequeue_work() - Removes an item from the work list
457  * @proc:         binder_proc associated with list
458  * @work:         struct binder_work to remove from list
459  *
460  * Removes the specified work item from whatever list it is on.
461  * Can safely be called if work is not on any list.
462  */
463 static void
464 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
465 {
466         binder_inner_proc_lock(proc);
467         binder_dequeue_work_ilocked(work);
468         binder_inner_proc_unlock(proc);
469 }
470
471 static struct binder_work *binder_dequeue_work_head_ilocked(
472                                         struct list_head *list)
473 {
474         struct binder_work *w;
475
476         w = list_first_entry_or_null(list, struct binder_work, entry);
477         if (w)
478                 list_del_init(&w->entry);
479         return w;
480 }
481
482 static void
483 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
484 static void binder_free_thread(struct binder_thread *thread);
485 static void binder_free_proc(struct binder_proc *proc);
486 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
487
488 static bool binder_has_work_ilocked(struct binder_thread *thread,
489                                     bool do_proc_work)
490 {
491         return thread->process_todo ||
492                 thread->looper_need_return ||
493                 (do_proc_work &&
494                  !binder_worklist_empty_ilocked(&thread->proc->todo));
495 }
496
497 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
498 {
499         bool has_work;
500
501         binder_inner_proc_lock(thread->proc);
502         has_work = binder_has_work_ilocked(thread, do_proc_work);
503         binder_inner_proc_unlock(thread->proc);
504
505         return has_work;
506 }
507
508 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
509 {
510         return !thread->transaction_stack &&
511                 binder_worklist_empty_ilocked(&thread->todo) &&
512                 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
513                                    BINDER_LOOPER_STATE_REGISTERED));
514 }
515
516 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
517                                                bool sync)
518 {
519         struct rb_node *n;
520         struct binder_thread *thread;
521
522         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
523                 thread = rb_entry(n, struct binder_thread, rb_node);
524                 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
525                     binder_available_for_proc_work_ilocked(thread)) {
526                         if (sync)
527                                 wake_up_interruptible_sync(&thread->wait);
528                         else
529                                 wake_up_interruptible(&thread->wait);
530                 }
531         }
532 }
533
534 /**
535  * binder_select_thread_ilocked() - selects a thread for doing proc work.
536  * @proc:       process to select a thread from
537  *
538  * Note that calling this function moves the thread off the waiting_threads
539  * list, so it can only be woken up by the caller of this function, or a
540  * signal. Therefore, callers *should* always wake up the thread this function
541  * returns.
542  *
543  * Return:      If there's a thread currently waiting for process work,
544  *              returns that thread. Otherwise returns NULL.
545  */
546 static struct binder_thread *
547 binder_select_thread_ilocked(struct binder_proc *proc)
548 {
549         struct binder_thread *thread;
550
551         assert_spin_locked(&proc->inner_lock);
552         thread = list_first_entry_or_null(&proc->waiting_threads,
553                                           struct binder_thread,
554                                           waiting_thread_node);
555
556         if (thread)
557                 list_del_init(&thread->waiting_thread_node);
558
559         return thread;
560 }
561
562 /**
563  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
564  * @proc:       process to wake up a thread in
565  * @thread:     specific thread to wake-up (may be NULL)
566  * @sync:       whether to do a synchronous wake-up
567  *
568  * This function wakes up a thread in the @proc process.
569  * The caller may provide a specific thread to wake-up in
570  * the @thread parameter. If @thread is NULL, this function
571  * will wake up threads that have called poll().
572  *
573  * Note that for this function to work as expected, callers
574  * should first call binder_select_thread() to find a thread
575  * to handle the work (if they don't have a thread already),
576  * and pass the result into the @thread parameter.
577  */
578 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
579                                          struct binder_thread *thread,
580                                          bool sync)
581 {
582         assert_spin_locked(&proc->inner_lock);
583
584         if (thread) {
585                 if (sync)
586                         wake_up_interruptible_sync(&thread->wait);
587                 else
588                         wake_up_interruptible(&thread->wait);
589                 return;
590         }
591
592         /* Didn't find a thread waiting for proc work; this can happen
593          * in two scenarios:
594          * 1. All threads are busy handling transactions
595          *    In that case, one of those threads should call back into
596          *    the kernel driver soon and pick up this work.
597          * 2. Threads are using the (e)poll interface, in which case
598          *    they may be blocked on the waitqueue without having been
599          *    added to waiting_threads. For this case, we just iterate
600          *    over all threads not handling transaction work, and
601          *    wake them all up. We wake all because we don't know whether
602          *    a thread that called into (e)poll is handling non-binder
603          *    work currently.
604          */
605         binder_wakeup_poll_threads_ilocked(proc, sync);
606 }
607
608 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
609 {
610         struct binder_thread *thread = binder_select_thread_ilocked(proc);
611
612         binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
613 }
614
615 static void binder_set_nice(long nice)
616 {
617         long min_nice;
618
619         if (can_nice(current, nice)) {
620                 set_user_nice(current, nice);
621                 return;
622         }
623         min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
624         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
625                      "%d: nice value %ld not allowed use %ld instead\n",
626                       current->pid, nice, min_nice);
627         set_user_nice(current, min_nice);
628         if (min_nice <= MAX_NICE)
629                 return;
630         binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
631 }
632
633 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
634                                                    binder_uintptr_t ptr)
635 {
636         struct rb_node *n = proc->nodes.rb_node;
637         struct binder_node *node;
638
639         assert_spin_locked(&proc->inner_lock);
640
641         while (n) {
642                 node = rb_entry(n, struct binder_node, rb_node);
643
644                 if (ptr < node->ptr)
645                         n = n->rb_left;
646                 else if (ptr > node->ptr)
647                         n = n->rb_right;
648                 else {
649                         /*
650                          * take an implicit weak reference
651                          * to ensure node stays alive until
652                          * call to binder_put_node()
653                          */
654                         binder_inc_node_tmpref_ilocked(node);
655                         return node;
656                 }
657         }
658         return NULL;
659 }
660
661 static struct binder_node *binder_get_node(struct binder_proc *proc,
662                                            binder_uintptr_t ptr)
663 {
664         struct binder_node *node;
665
666         binder_inner_proc_lock(proc);
667         node = binder_get_node_ilocked(proc, ptr);
668         binder_inner_proc_unlock(proc);
669         return node;
670 }
671
672 static struct binder_node *binder_init_node_ilocked(
673                                                 struct binder_proc *proc,
674                                                 struct binder_node *new_node,
675                                                 struct flat_binder_object *fp)
676 {
677         struct rb_node **p = &proc->nodes.rb_node;
678         struct rb_node *parent = NULL;
679         struct binder_node *node;
680         binder_uintptr_t ptr = fp ? fp->binder : 0;
681         binder_uintptr_t cookie = fp ? fp->cookie : 0;
682         __u32 flags = fp ? fp->flags : 0;
683
684         assert_spin_locked(&proc->inner_lock);
685
686         while (*p) {
687
688                 parent = *p;
689                 node = rb_entry(parent, struct binder_node, rb_node);
690
691                 if (ptr < node->ptr)
692                         p = &(*p)->rb_left;
693                 else if (ptr > node->ptr)
694                         p = &(*p)->rb_right;
695                 else {
696                         /*
697                          * A matching node is already in
698                          * the rb tree. Abandon the init
699                          * and return it.
700                          */
701                         binder_inc_node_tmpref_ilocked(node);
702                         return node;
703                 }
704         }
705         node = new_node;
706         binder_stats_created(BINDER_STAT_NODE);
707         node->tmp_refs++;
708         rb_link_node(&node->rb_node, parent, p);
709         rb_insert_color(&node->rb_node, &proc->nodes);
710         node->debug_id = atomic_inc_return(&binder_last_id);
711         node->proc = proc;
712         node->ptr = ptr;
713         node->cookie = cookie;
714         node->work.type = BINDER_WORK_NODE;
715         node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
716         node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
717         node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
718         spin_lock_init(&node->lock);
719         INIT_LIST_HEAD(&node->work.entry);
720         INIT_LIST_HEAD(&node->async_todo);
721         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
722                      "%d:%d node %d u%016llx c%016llx created\n",
723                      proc->pid, current->pid, node->debug_id,
724                      (u64)node->ptr, (u64)node->cookie);
725
726         return node;
727 }
728
729 static struct binder_node *binder_new_node(struct binder_proc *proc,
730                                            struct flat_binder_object *fp)
731 {
732         struct binder_node *node;
733         struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
734
735         if (!new_node)
736                 return NULL;
737         binder_inner_proc_lock(proc);
738         node = binder_init_node_ilocked(proc, new_node, fp);
739         binder_inner_proc_unlock(proc);
740         if (node != new_node)
741                 /*
742                  * The node was already added by another thread
743                  */
744                 kfree(new_node);
745
746         return node;
747 }
748
749 static void binder_free_node(struct binder_node *node)
750 {
751         kfree(node);
752         binder_stats_deleted(BINDER_STAT_NODE);
753 }
754
755 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
756                                     int internal,
757                                     struct list_head *target_list)
758 {
759         struct binder_proc *proc = node->proc;
760
761         assert_spin_locked(&node->lock);
762         if (proc)
763                 assert_spin_locked(&proc->inner_lock);
764         if (strong) {
765                 if (internal) {
766                         if (target_list == NULL &&
767                             node->internal_strong_refs == 0 &&
768                             !(node->proc &&
769                               node == node->proc->context->binder_context_mgr_node &&
770                               node->has_strong_ref)) {
771                                 pr_err("invalid inc strong node for %d\n",
772                                         node->debug_id);
773                                 return -EINVAL;
774                         }
775                         node->internal_strong_refs++;
776                 } else
777                         node->local_strong_refs++;
778                 if (!node->has_strong_ref && target_list) {
779                         struct binder_thread *thread = container_of(target_list,
780                                                     struct binder_thread, todo);
781                         binder_dequeue_work_ilocked(&node->work);
782                         BUG_ON(&thread->todo != target_list);
783                         binder_enqueue_deferred_thread_work_ilocked(thread,
784                                                                    &node->work);
785                 }
786         } else {
787                 if (!internal)
788                         node->local_weak_refs++;
789                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
790                         if (target_list == NULL) {
791                                 pr_err("invalid inc weak node for %d\n",
792                                         node->debug_id);
793                                 return -EINVAL;
794                         }
795                         /*
796                          * See comment above
797                          */
798                         binder_enqueue_work_ilocked(&node->work, target_list);
799                 }
800         }
801         return 0;
802 }
803
804 static int binder_inc_node(struct binder_node *node, int strong, int internal,
805                            struct list_head *target_list)
806 {
807         int ret;
808
809         binder_node_inner_lock(node);
810         ret = binder_inc_node_nilocked(node, strong, internal, target_list);
811         binder_node_inner_unlock(node);
812
813         return ret;
814 }
815
816 static bool binder_dec_node_nilocked(struct binder_node *node,
817                                      int strong, int internal)
818 {
819         struct binder_proc *proc = node->proc;
820
821         assert_spin_locked(&node->lock);
822         if (proc)
823                 assert_spin_locked(&proc->inner_lock);
824         if (strong) {
825                 if (internal)
826                         node->internal_strong_refs--;
827                 else
828                         node->local_strong_refs--;
829                 if (node->local_strong_refs || node->internal_strong_refs)
830                         return false;
831         } else {
832                 if (!internal)
833                         node->local_weak_refs--;
834                 if (node->local_weak_refs || node->tmp_refs ||
835                                 !hlist_empty(&node->refs))
836                         return false;
837         }
838
839         if (proc && (node->has_strong_ref || node->has_weak_ref)) {
840                 if (list_empty(&node->work.entry)) {
841                         binder_enqueue_work_ilocked(&node->work, &proc->todo);
842                         binder_wakeup_proc_ilocked(proc);
843                 }
844         } else {
845                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
846                     !node->local_weak_refs && !node->tmp_refs) {
847                         if (proc) {
848                                 binder_dequeue_work_ilocked(&node->work);
849                                 rb_erase(&node->rb_node, &proc->nodes);
850                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
851                                              "refless node %d deleted\n",
852                                              node->debug_id);
853                         } else {
854                                 BUG_ON(!list_empty(&node->work.entry));
855                                 spin_lock(&binder_dead_nodes_lock);
856                                 /*
857                                  * tmp_refs could have changed so
858                                  * check it again
859                                  */
860                                 if (node->tmp_refs) {
861                                         spin_unlock(&binder_dead_nodes_lock);
862                                         return false;
863                                 }
864                                 hlist_del(&node->dead_node);
865                                 spin_unlock(&binder_dead_nodes_lock);
866                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
867                                              "dead node %d deleted\n",
868                                              node->debug_id);
869                         }
870                         return true;
871                 }
872         }
873         return false;
874 }
875
876 static void binder_dec_node(struct binder_node *node, int strong, int internal)
877 {
878         bool free_node;
879
880         binder_node_inner_lock(node);
881         free_node = binder_dec_node_nilocked(node, strong, internal);
882         binder_node_inner_unlock(node);
883         if (free_node)
884                 binder_free_node(node);
885 }
886
887 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
888 {
889         /*
890          * No call to binder_inc_node() is needed since we
891          * don't need to inform userspace of any changes to
892          * tmp_refs
893          */
894         node->tmp_refs++;
895 }
896
897 /**
898  * binder_inc_node_tmpref() - take a temporary reference on node
899  * @node:       node to reference
900  *
901  * Take reference on node to prevent the node from being freed
902  * while referenced only by a local variable. The inner lock is
903  * needed to serialize with the node work on the queue (which
904  * isn't needed after the node is dead). If the node is dead
905  * (node->proc is NULL), use binder_dead_nodes_lock to protect
906  * node->tmp_refs against dead-node-only cases where the node
907  * lock cannot be acquired (eg traversing the dead node list to
908  * print nodes)
909  */
910 static void binder_inc_node_tmpref(struct binder_node *node)
911 {
912         binder_node_lock(node);
913         if (node->proc)
914                 binder_inner_proc_lock(node->proc);
915         else
916                 spin_lock(&binder_dead_nodes_lock);
917         binder_inc_node_tmpref_ilocked(node);
918         if (node->proc)
919                 binder_inner_proc_unlock(node->proc);
920         else
921                 spin_unlock(&binder_dead_nodes_lock);
922         binder_node_unlock(node);
923 }
924
925 /**
926  * binder_dec_node_tmpref() - remove a temporary reference on node
927  * @node:       node to reference
928  *
929  * Release temporary reference on node taken via binder_inc_node_tmpref()
930  */
931 static void binder_dec_node_tmpref(struct binder_node *node)
932 {
933         bool free_node;
934
935         binder_node_inner_lock(node);
936         if (!node->proc)
937                 spin_lock(&binder_dead_nodes_lock);
938         else
939                 __acquire(&binder_dead_nodes_lock);
940         node->tmp_refs--;
941         BUG_ON(node->tmp_refs < 0);
942         if (!node->proc)
943                 spin_unlock(&binder_dead_nodes_lock);
944         else
945                 __release(&binder_dead_nodes_lock);
946         /*
947          * Call binder_dec_node() to check if all refcounts are 0
948          * and cleanup is needed. Calling with strong=0 and internal=1
949          * causes no actual reference to be released in binder_dec_node().
950          * If that changes, a change is needed here too.
951          */
952         free_node = binder_dec_node_nilocked(node, 0, 1);
953         binder_node_inner_unlock(node);
954         if (free_node)
955                 binder_free_node(node);
956 }
957
958 static void binder_put_node(struct binder_node *node)
959 {
960         binder_dec_node_tmpref(node);
961 }
962
963 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
964                                                  u32 desc, bool need_strong_ref)
965 {
966         struct rb_node *n = proc->refs_by_desc.rb_node;
967         struct binder_ref *ref;
968
969         while (n) {
970                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
971
972                 if (desc < ref->data.desc) {
973                         n = n->rb_left;
974                 } else if (desc > ref->data.desc) {
975                         n = n->rb_right;
976                 } else if (need_strong_ref && !ref->data.strong) {
977                         binder_user_error("tried to use weak ref as strong ref\n");
978                         return NULL;
979                 } else {
980                         return ref;
981                 }
982         }
983         return NULL;
984 }
985
986 /**
987  * binder_get_ref_for_node_olocked() - get the ref associated with given node
988  * @proc:       binder_proc that owns the ref
989  * @node:       binder_node of target
990  * @new_ref:    newly allocated binder_ref to be initialized or %NULL
991  *
992  * Look up the ref for the given node and return it if it exists
993  *
994  * If it doesn't exist and the caller provides a newly allocated
995  * ref, initialize the fields of the newly allocated ref and insert
996  * into the given proc rb_trees and node refs list.
997  *
998  * Return:      the ref for node. It is possible that another thread
999  *              allocated/initialized the ref first in which case the
1000  *              returned ref would be different than the passed-in
1001  *              new_ref. new_ref must be kfree'd by the caller in
1002  *              this case.
1003  */
1004 static struct binder_ref *binder_get_ref_for_node_olocked(
1005                                         struct binder_proc *proc,
1006                                         struct binder_node *node,
1007                                         struct binder_ref *new_ref)
1008 {
1009         struct binder_context *context = proc->context;
1010         struct rb_node **p = &proc->refs_by_node.rb_node;
1011         struct rb_node *parent = NULL;
1012         struct binder_ref *ref;
1013         struct rb_node *n;
1014
1015         while (*p) {
1016                 parent = *p;
1017                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1018
1019                 if (node < ref->node)
1020                         p = &(*p)->rb_left;
1021                 else if (node > ref->node)
1022                         p = &(*p)->rb_right;
1023                 else
1024                         return ref;
1025         }
1026         if (!new_ref)
1027                 return NULL;
1028
1029         binder_stats_created(BINDER_STAT_REF);
1030         new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1031         new_ref->proc = proc;
1032         new_ref->node = node;
1033         rb_link_node(&new_ref->rb_node_node, parent, p);
1034         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1035
1036         new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1037         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1038                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1039                 if (ref->data.desc > new_ref->data.desc)
1040                         break;
1041                 new_ref->data.desc = ref->data.desc + 1;
1042         }
1043
1044         p = &proc->refs_by_desc.rb_node;
1045         while (*p) {
1046                 parent = *p;
1047                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1048
1049                 if (new_ref->data.desc < ref->data.desc)
1050                         p = &(*p)->rb_left;
1051                 else if (new_ref->data.desc > ref->data.desc)
1052                         p = &(*p)->rb_right;
1053                 else
1054                         BUG();
1055         }
1056         rb_link_node(&new_ref->rb_node_desc, parent, p);
1057         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1058
1059         binder_node_lock(node);
1060         hlist_add_head(&new_ref->node_entry, &node->refs);
1061
1062         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1063                      "%d new ref %d desc %d for node %d\n",
1064                       proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1065                       node->debug_id);
1066         binder_node_unlock(node);
1067         return new_ref;
1068 }
1069
1070 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1071 {
1072         bool delete_node = false;
1073
1074         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1075                      "%d delete ref %d desc %d for node %d\n",
1076                       ref->proc->pid, ref->data.debug_id, ref->data.desc,
1077                       ref->node->debug_id);
1078
1079         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1080         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1081
1082         binder_node_inner_lock(ref->node);
1083         if (ref->data.strong)
1084                 binder_dec_node_nilocked(ref->node, 1, 1);
1085
1086         hlist_del(&ref->node_entry);
1087         delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1088         binder_node_inner_unlock(ref->node);
1089         /*
1090          * Clear ref->node unless we want the caller to free the node
1091          */
1092         if (!delete_node) {
1093                 /*
1094                  * The caller uses ref->node to determine
1095                  * whether the node needs to be freed. Clear
1096                  * it since the node is still alive.
1097                  */
1098                 ref->node = NULL;
1099         }
1100
1101         if (ref->death) {
1102                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1103                              "%d delete ref %d desc %d has death notification\n",
1104                               ref->proc->pid, ref->data.debug_id,
1105                               ref->data.desc);
1106                 binder_dequeue_work(ref->proc, &ref->death->work);
1107                 binder_stats_deleted(BINDER_STAT_DEATH);
1108         }
1109         binder_stats_deleted(BINDER_STAT_REF);
1110 }
1111
1112 /**
1113  * binder_inc_ref_olocked() - increment the ref for given handle
1114  * @ref:         ref to be incremented
1115  * @strong:      if true, strong increment, else weak
1116  * @target_list: list to queue node work on
1117  *
1118  * Increment the ref. @ref->proc->outer_lock must be held on entry
1119  *
1120  * Return: 0, if successful, else errno
1121  */
1122 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1123                                   struct list_head *target_list)
1124 {
1125         int ret;
1126
1127         if (strong) {
1128                 if (ref->data.strong == 0) {
1129                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1130                         if (ret)
1131                                 return ret;
1132                 }
1133                 ref->data.strong++;
1134         } else {
1135                 if (ref->data.weak == 0) {
1136                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1137                         if (ret)
1138                                 return ret;
1139                 }
1140                 ref->data.weak++;
1141         }
1142         return 0;
1143 }
1144
1145 /**
1146  * binder_dec_ref() - dec the ref for given handle
1147  * @ref:        ref to be decremented
1148  * @strong:     if true, strong decrement, else weak
1149  *
1150  * Decrement the ref.
1151  *
1152  * Return: true if ref is cleaned up and ready to be freed
1153  */
1154 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1155 {
1156         if (strong) {
1157                 if (ref->data.strong == 0) {
1158                         binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1159                                           ref->proc->pid, ref->data.debug_id,
1160                                           ref->data.desc, ref->data.strong,
1161                                           ref->data.weak);
1162                         return false;
1163                 }
1164                 ref->data.strong--;
1165                 if (ref->data.strong == 0)
1166                         binder_dec_node(ref->node, strong, 1);
1167         } else {
1168                 if (ref->data.weak == 0) {
1169                         binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1170                                           ref->proc->pid, ref->data.debug_id,
1171                                           ref->data.desc, ref->data.strong,
1172                                           ref->data.weak);
1173                         return false;
1174                 }
1175                 ref->data.weak--;
1176         }
1177         if (ref->data.strong == 0 && ref->data.weak == 0) {
1178                 binder_cleanup_ref_olocked(ref);
1179                 return true;
1180         }
1181         return false;
1182 }
1183
1184 /**
1185  * binder_get_node_from_ref() - get the node from the given proc/desc
1186  * @proc:       proc containing the ref
1187  * @desc:       the handle associated with the ref
1188  * @need_strong_ref: if true, only return node if ref is strong
1189  * @rdata:      the id/refcount data for the ref
1190  *
1191  * Given a proc and ref handle, return the associated binder_node
1192  *
1193  * Return: a binder_node or NULL if not found or not strong when strong required
1194  */
1195 static struct binder_node *binder_get_node_from_ref(
1196                 struct binder_proc *proc,
1197                 u32 desc, bool need_strong_ref,
1198                 struct binder_ref_data *rdata)
1199 {
1200         struct binder_node *node;
1201         struct binder_ref *ref;
1202
1203         binder_proc_lock(proc);
1204         ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1205         if (!ref)
1206                 goto err_no_ref;
1207         node = ref->node;
1208         /*
1209          * Take an implicit reference on the node to ensure
1210          * it stays alive until the call to binder_put_node()
1211          */
1212         binder_inc_node_tmpref(node);
1213         if (rdata)
1214                 *rdata = ref->data;
1215         binder_proc_unlock(proc);
1216
1217         return node;
1218
1219 err_no_ref:
1220         binder_proc_unlock(proc);
1221         return NULL;
1222 }
1223
1224 /**
1225  * binder_free_ref() - free the binder_ref
1226  * @ref:        ref to free
1227  *
1228  * Free the binder_ref. Free the binder_node indicated by ref->node
1229  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1230  */
1231 static void binder_free_ref(struct binder_ref *ref)
1232 {
1233         if (ref->node)
1234                 binder_free_node(ref->node);
1235         kfree(ref->death);
1236         kfree(ref);
1237 }
1238
1239 /**
1240  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1241  * @proc:       proc containing the ref
1242  * @desc:       the handle associated with the ref
1243  * @increment:  true=inc reference, false=dec reference
1244  * @strong:     true=strong reference, false=weak reference
1245  * @rdata:      the id/refcount data for the ref
1246  *
1247  * Given a proc and ref handle, increment or decrement the ref
1248  * according to "increment" arg.
1249  *
1250  * Return: 0 if successful, else errno
1251  */
1252 static int binder_update_ref_for_handle(struct binder_proc *proc,
1253                 uint32_t desc, bool increment, bool strong,
1254                 struct binder_ref_data *rdata)
1255 {
1256         int ret = 0;
1257         struct binder_ref *ref;
1258         bool delete_ref = false;
1259
1260         binder_proc_lock(proc);
1261         ref = binder_get_ref_olocked(proc, desc, strong);
1262         if (!ref) {
1263                 ret = -EINVAL;
1264                 goto err_no_ref;
1265         }
1266         if (increment)
1267                 ret = binder_inc_ref_olocked(ref, strong, NULL);
1268         else
1269                 delete_ref = binder_dec_ref_olocked(ref, strong);
1270
1271         if (rdata)
1272                 *rdata = ref->data;
1273         binder_proc_unlock(proc);
1274
1275         if (delete_ref)
1276                 binder_free_ref(ref);
1277         return ret;
1278
1279 err_no_ref:
1280         binder_proc_unlock(proc);
1281         return ret;
1282 }
1283
1284 /**
1285  * binder_dec_ref_for_handle() - dec the ref for given handle
1286  * @proc:       proc containing the ref
1287  * @desc:       the handle associated with the ref
1288  * @strong:     true=strong reference, false=weak reference
1289  * @rdata:      the id/refcount data for the ref
1290  *
1291  * Just calls binder_update_ref_for_handle() to decrement the ref.
1292  *
1293  * Return: 0 if successful, else errno
1294  */
1295 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1296                 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1297 {
1298         return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1299 }
1300
1301
1302 /**
1303  * binder_inc_ref_for_node() - increment the ref for given proc/node
1304  * @proc:        proc containing the ref
1305  * @node:        target node
1306  * @strong:      true=strong reference, false=weak reference
1307  * @target_list: worklist to use if node is incremented
1308  * @rdata:       the id/refcount data for the ref
1309  *
1310  * Given a proc and node, increment the ref. Create the ref if it
1311  * doesn't already exist
1312  *
1313  * Return: 0 if successful, else errno
1314  */
1315 static int binder_inc_ref_for_node(struct binder_proc *proc,
1316                         struct binder_node *node,
1317                         bool strong,
1318                         struct list_head *target_list,
1319                         struct binder_ref_data *rdata)
1320 {
1321         struct binder_ref *ref;
1322         struct binder_ref *new_ref = NULL;
1323         int ret = 0;
1324
1325         binder_proc_lock(proc);
1326         ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1327         if (!ref) {
1328                 binder_proc_unlock(proc);
1329                 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1330                 if (!new_ref)
1331                         return -ENOMEM;
1332                 binder_proc_lock(proc);
1333                 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1334         }
1335         ret = binder_inc_ref_olocked(ref, strong, target_list);
1336         *rdata = ref->data;
1337         binder_proc_unlock(proc);
1338         if (new_ref && ref != new_ref)
1339                 /*
1340                  * Another thread created the ref first so
1341                  * free the one we allocated
1342                  */
1343                 kfree(new_ref);
1344         return ret;
1345 }
1346
1347 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1348                                            struct binder_transaction *t)
1349 {
1350         BUG_ON(!target_thread);
1351         assert_spin_locked(&target_thread->proc->inner_lock);
1352         BUG_ON(target_thread->transaction_stack != t);
1353         BUG_ON(target_thread->transaction_stack->from != target_thread);
1354         target_thread->transaction_stack =
1355                 target_thread->transaction_stack->from_parent;
1356         t->from = NULL;
1357 }
1358
1359 /**
1360  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1361  * @thread:     thread to decrement
1362  *
1363  * A thread needs to be kept alive while being used to create or
1364  * handle a transaction. binder_get_txn_from() is used to safely
1365  * extract t->from from a binder_transaction and keep the thread
1366  * indicated by t->from from being freed. When done with that
1367  * binder_thread, this function is called to decrement the
1368  * tmp_ref and free if appropriate (thread has been released
1369  * and no transaction being processed by the driver)
1370  */
1371 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1372 {
1373         /*
1374          * atomic is used to protect the counter value while
1375          * it cannot reach zero or thread->is_dead is false
1376          */
1377         binder_inner_proc_lock(thread->proc);
1378         atomic_dec(&thread->tmp_ref);
1379         if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1380                 binder_inner_proc_unlock(thread->proc);
1381                 binder_free_thread(thread);
1382                 return;
1383         }
1384         binder_inner_proc_unlock(thread->proc);
1385 }
1386
1387 /**
1388  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1389  * @proc:       proc to decrement
1390  *
1391  * A binder_proc needs to be kept alive while being used to create or
1392  * handle a transaction. proc->tmp_ref is incremented when
1393  * creating a new transaction or the binder_proc is currently in-use
1394  * by threads that are being released. When done with the binder_proc,
1395  * this function is called to decrement the counter and free the
1396  * proc if appropriate (proc has been released, all threads have
1397  * been released and not currenly in-use to process a transaction).
1398  */
1399 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1400 {
1401         binder_inner_proc_lock(proc);
1402         proc->tmp_ref--;
1403         if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1404                         !proc->tmp_ref) {
1405                 binder_inner_proc_unlock(proc);
1406                 binder_free_proc(proc);
1407                 return;
1408         }
1409         binder_inner_proc_unlock(proc);
1410 }
1411
1412 /**
1413  * binder_get_txn_from() - safely extract the "from" thread in transaction
1414  * @t:  binder transaction for t->from
1415  *
1416  * Atomically return the "from" thread and increment the tmp_ref
1417  * count for the thread to ensure it stays alive until
1418  * binder_thread_dec_tmpref() is called.
1419  *
1420  * Return: the value of t->from
1421  */
1422 static struct binder_thread *binder_get_txn_from(
1423                 struct binder_transaction *t)
1424 {
1425         struct binder_thread *from;
1426
1427         spin_lock(&t->lock);
1428         from = t->from;
1429         if (from)
1430                 atomic_inc(&from->tmp_ref);
1431         spin_unlock(&t->lock);
1432         return from;
1433 }
1434
1435 /**
1436  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1437  * @t:  binder transaction for t->from
1438  *
1439  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1440  * to guarantee that the thread cannot be released while operating on it.
1441  * The caller must call binder_inner_proc_unlock() to release the inner lock
1442  * as well as call binder_dec_thread_txn() to release the reference.
1443  *
1444  * Return: the value of t->from
1445  */
1446 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1447                 struct binder_transaction *t)
1448         __acquires(&t->from->proc->inner_lock)
1449 {
1450         struct binder_thread *from;
1451
1452         from = binder_get_txn_from(t);
1453         if (!from) {
1454                 __acquire(&from->proc->inner_lock);
1455                 return NULL;
1456         }
1457         binder_inner_proc_lock(from->proc);
1458         if (t->from) {
1459                 BUG_ON(from != t->from);
1460                 return from;
1461         }
1462         binder_inner_proc_unlock(from->proc);
1463         __acquire(&from->proc->inner_lock);
1464         binder_thread_dec_tmpref(from);
1465         return NULL;
1466 }
1467
1468 /**
1469  * binder_free_txn_fixups() - free unprocessed fd fixups
1470  * @t:  binder transaction for t->from
1471  *
1472  * If the transaction is being torn down prior to being
1473  * processed by the target process, free all of the
1474  * fd fixups and fput the file structs. It is safe to
1475  * call this function after the fixups have been
1476  * processed -- in that case, the list will be empty.
1477  */
1478 static void binder_free_txn_fixups(struct binder_transaction *t)
1479 {
1480         struct binder_txn_fd_fixup *fixup, *tmp;
1481
1482         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1483                 fput(fixup->file);
1484                 list_del(&fixup->fixup_entry);
1485                 kfree(fixup);
1486         }
1487 }
1488
1489 static void binder_free_transaction(struct binder_transaction *t)
1490 {
1491         struct binder_proc *target_proc = t->to_proc;
1492
1493         if (target_proc) {
1494                 binder_inner_proc_lock(target_proc);
1495                 if (t->buffer)
1496                         t->buffer->transaction = NULL;
1497                 binder_inner_proc_unlock(target_proc);
1498         }
1499         /*
1500          * If the transaction has no target_proc, then
1501          * t->buffer->transaction has already been cleared.
1502          */
1503         binder_free_txn_fixups(t);
1504         kfree(t);
1505         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1506 }
1507
1508 static void binder_send_failed_reply(struct binder_transaction *t,
1509                                      uint32_t error_code)
1510 {
1511         struct binder_thread *target_thread;
1512         struct binder_transaction *next;
1513
1514         BUG_ON(t->flags & TF_ONE_WAY);
1515         while (1) {
1516                 target_thread = binder_get_txn_from_and_acq_inner(t);
1517                 if (target_thread) {
1518                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1519                                      "send failed reply for transaction %d to %d:%d\n",
1520                                       t->debug_id,
1521                                       target_thread->proc->pid,
1522                                       target_thread->pid);
1523
1524                         binder_pop_transaction_ilocked(target_thread, t);
1525                         if (target_thread->reply_error.cmd == BR_OK) {
1526                                 target_thread->reply_error.cmd = error_code;
1527                                 binder_enqueue_thread_work_ilocked(
1528                                         target_thread,
1529                                         &target_thread->reply_error.work);
1530                                 wake_up_interruptible(&target_thread->wait);
1531                         } else {
1532                                 /*
1533                                  * Cannot get here for normal operation, but
1534                                  * we can if multiple synchronous transactions
1535                                  * are sent without blocking for responses.
1536                                  * Just ignore the 2nd error in this case.
1537                                  */
1538                                 pr_warn("Unexpected reply error: %u\n",
1539                                         target_thread->reply_error.cmd);
1540                         }
1541                         binder_inner_proc_unlock(target_thread->proc);
1542                         binder_thread_dec_tmpref(target_thread);
1543                         binder_free_transaction(t);
1544                         return;
1545                 }
1546                 __release(&target_thread->proc->inner_lock);
1547                 next = t->from_parent;
1548
1549                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1550                              "send failed reply for transaction %d, target dead\n",
1551                              t->debug_id);
1552
1553                 binder_free_transaction(t);
1554                 if (next == NULL) {
1555                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1556                                      "reply failed, no target thread at root\n");
1557                         return;
1558                 }
1559                 t = next;
1560                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1561                              "reply failed, no target thread -- retry %d\n",
1562                               t->debug_id);
1563         }
1564 }
1565
1566 /**
1567  * binder_cleanup_transaction() - cleans up undelivered transaction
1568  * @t:          transaction that needs to be cleaned up
1569  * @reason:     reason the transaction wasn't delivered
1570  * @error_code: error to return to caller (if synchronous call)
1571  */
1572 static void binder_cleanup_transaction(struct binder_transaction *t,
1573                                        const char *reason,
1574                                        uint32_t error_code)
1575 {
1576         if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1577                 binder_send_failed_reply(t, error_code);
1578         } else {
1579                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1580                         "undelivered transaction %d, %s\n",
1581                         t->debug_id, reason);
1582                 binder_free_transaction(t);
1583         }
1584 }
1585
1586 /**
1587  * binder_get_object() - gets object and checks for valid metadata
1588  * @proc:       binder_proc owning the buffer
1589  * @buffer:     binder_buffer that we're parsing.
1590  * @offset:     offset in the @buffer at which to validate an object.
1591  * @object:     struct binder_object to read into
1592  *
1593  * Return:      If there's a valid metadata object at @offset in @buffer, the
1594  *              size of that object. Otherwise, it returns zero. The object
1595  *              is read into the struct binder_object pointed to by @object.
1596  */
1597 static size_t binder_get_object(struct binder_proc *proc,
1598                                 struct binder_buffer *buffer,
1599                                 unsigned long offset,
1600                                 struct binder_object *object)
1601 {
1602         size_t read_size;
1603         struct binder_object_header *hdr;
1604         size_t object_size = 0;
1605
1606         read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1607         if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1608             binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1609                                           offset, read_size))
1610                 return 0;
1611
1612         /* Ok, now see if we read a complete object. */
1613         hdr = &object->hdr;
1614         switch (hdr->type) {
1615         case BINDER_TYPE_BINDER:
1616         case BINDER_TYPE_WEAK_BINDER:
1617         case BINDER_TYPE_HANDLE:
1618         case BINDER_TYPE_WEAK_HANDLE:
1619                 object_size = sizeof(struct flat_binder_object);
1620                 break;
1621         case BINDER_TYPE_FD:
1622                 object_size = sizeof(struct binder_fd_object);
1623                 break;
1624         case BINDER_TYPE_PTR:
1625                 object_size = sizeof(struct binder_buffer_object);
1626                 break;
1627         case BINDER_TYPE_FDA:
1628                 object_size = sizeof(struct binder_fd_array_object);
1629                 break;
1630         default:
1631                 return 0;
1632         }
1633         if (offset <= buffer->data_size - object_size &&
1634             buffer->data_size >= object_size)
1635                 return object_size;
1636         else
1637                 return 0;
1638 }
1639
1640 /**
1641  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1642  * @proc:       binder_proc owning the buffer
1643  * @b:          binder_buffer containing the object
1644  * @object:     struct binder_object to read into
1645  * @index:      index in offset array at which the binder_buffer_object is
1646  *              located
1647  * @start_offset: points to the start of the offset array
1648  * @object_offsetp: offset of @object read from @b
1649  * @num_valid:  the number of valid offsets in the offset array
1650  *
1651  * Return:      If @index is within the valid range of the offset array
1652  *              described by @start and @num_valid, and if there's a valid
1653  *              binder_buffer_object at the offset found in index @index
1654  *              of the offset array, that object is returned. Otherwise,
1655  *              %NULL is returned.
1656  *              Note that the offset found in index @index itself is not
1657  *              verified; this function assumes that @num_valid elements
1658  *              from @start were previously verified to have valid offsets.
1659  *              If @object_offsetp is non-NULL, then the offset within
1660  *              @b is written to it.
1661  */
1662 static struct binder_buffer_object *binder_validate_ptr(
1663                                                 struct binder_proc *proc,
1664                                                 struct binder_buffer *b,
1665                                                 struct binder_object *object,
1666                                                 binder_size_t index,
1667                                                 binder_size_t start_offset,
1668                                                 binder_size_t *object_offsetp,
1669                                                 binder_size_t num_valid)
1670 {
1671         size_t object_size;
1672         binder_size_t object_offset;
1673         unsigned long buffer_offset;
1674
1675         if (index >= num_valid)
1676                 return NULL;
1677
1678         buffer_offset = start_offset + sizeof(binder_size_t) * index;
1679         if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1680                                           b, buffer_offset,
1681                                           sizeof(object_offset)))
1682                 return NULL;
1683         object_size = binder_get_object(proc, b, object_offset, object);
1684         if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1685                 return NULL;
1686         if (object_offsetp)
1687                 *object_offsetp = object_offset;
1688
1689         return &object->bbo;
1690 }
1691
1692 /**
1693  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1694  * @proc:               binder_proc owning the buffer
1695  * @b:                  transaction buffer
1696  * @objects_start_offset: offset to start of objects buffer
1697  * @buffer_obj_offset:  offset to binder_buffer_object in which to fix up
1698  * @fixup_offset:       start offset in @buffer to fix up
1699  * @last_obj_offset:    offset to last binder_buffer_object that we fixed
1700  * @last_min_offset:    minimum fixup offset in object at @last_obj_offset
1701  *
1702  * Return:              %true if a fixup in buffer @buffer at offset @offset is
1703  *                      allowed.
1704  *
1705  * For safety reasons, we only allow fixups inside a buffer to happen
1706  * at increasing offsets; additionally, we only allow fixup on the last
1707  * buffer object that was verified, or one of its parents.
1708  *
1709  * Example of what is allowed:
1710  *
1711  * A
1712  *   B (parent = A, offset = 0)
1713  *   C (parent = A, offset = 16)
1714  *     D (parent = C, offset = 0)
1715  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1716  *
1717  * Examples of what is not allowed:
1718  *
1719  * Decreasing offsets within the same parent:
1720  * A
1721  *   C (parent = A, offset = 16)
1722  *   B (parent = A, offset = 0) // decreasing offset within A
1723  *
1724  * Referring to a parent that wasn't the last object or any of its parents:
1725  * A
1726  *   B (parent = A, offset = 0)
1727  *   C (parent = A, offset = 0)
1728  *   C (parent = A, offset = 16)
1729  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1730  */
1731 static bool binder_validate_fixup(struct binder_proc *proc,
1732                                   struct binder_buffer *b,
1733                                   binder_size_t objects_start_offset,
1734                                   binder_size_t buffer_obj_offset,
1735                                   binder_size_t fixup_offset,
1736                                   binder_size_t last_obj_offset,
1737                                   binder_size_t last_min_offset)
1738 {
1739         if (!last_obj_offset) {
1740                 /* Nothing to fix up in */
1741                 return false;
1742         }
1743
1744         while (last_obj_offset != buffer_obj_offset) {
1745                 unsigned long buffer_offset;
1746                 struct binder_object last_object;
1747                 struct binder_buffer_object *last_bbo;
1748                 size_t object_size = binder_get_object(proc, b, last_obj_offset,
1749                                                        &last_object);
1750                 if (object_size != sizeof(*last_bbo))
1751                         return false;
1752
1753                 last_bbo = &last_object.bbo;
1754                 /*
1755                  * Safe to retrieve the parent of last_obj, since it
1756                  * was already previously verified by the driver.
1757                  */
1758                 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1759                         return false;
1760                 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1761                 buffer_offset = objects_start_offset +
1762                         sizeof(binder_size_t) * last_bbo->parent;
1763                 if (binder_alloc_copy_from_buffer(&proc->alloc,
1764                                                   &last_obj_offset,
1765                                                   b, buffer_offset,
1766                                                   sizeof(last_obj_offset)))
1767                         return false;
1768         }
1769         return (fixup_offset >= last_min_offset);
1770 }
1771
1772 /**
1773  * struct binder_task_work_cb - for deferred close
1774  *
1775  * @twork:                callback_head for task work
1776  * @fd:                   fd to close
1777  *
1778  * Structure to pass task work to be handled after
1779  * returning from binder_ioctl() via task_work_add().
1780  */
1781 struct binder_task_work_cb {
1782         struct callback_head twork;
1783         struct file *file;
1784 };
1785
1786 /**
1787  * binder_do_fd_close() - close list of file descriptors
1788  * @twork:      callback head for task work
1789  *
1790  * It is not safe to call ksys_close() during the binder_ioctl()
1791  * function if there is a chance that binder's own file descriptor
1792  * might be closed. This is to meet the requirements for using
1793  * fdget() (see comments for __fget_light()). Therefore use
1794  * task_work_add() to schedule the close operation once we have
1795  * returned from binder_ioctl(). This function is a callback
1796  * for that mechanism and does the actual ksys_close() on the
1797  * given file descriptor.
1798  */
1799 static void binder_do_fd_close(struct callback_head *twork)
1800 {
1801         struct binder_task_work_cb *twcb = container_of(twork,
1802                         struct binder_task_work_cb, twork);
1803
1804         fput(twcb->file);
1805         kfree(twcb);
1806 }
1807
1808 /**
1809  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1810  * @fd:         file-descriptor to close
1811  *
1812  * See comments in binder_do_fd_close(). This function is used to schedule
1813  * a file-descriptor to be closed after returning from binder_ioctl().
1814  */
1815 static void binder_deferred_fd_close(int fd)
1816 {
1817         struct binder_task_work_cb *twcb;
1818
1819         twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1820         if (!twcb)
1821                 return;
1822         init_task_work(&twcb->twork, binder_do_fd_close);
1823         __close_fd_get_file(fd, &twcb->file);
1824         if (twcb->file) {
1825                 filp_close(twcb->file, current->files);
1826                 task_work_add(current, &twcb->twork, TWA_RESUME);
1827         } else {
1828                 kfree(twcb);
1829         }
1830 }
1831
1832 static void binder_transaction_buffer_release(struct binder_proc *proc,
1833                                               struct binder_buffer *buffer,
1834                                               binder_size_t failed_at,
1835                                               bool is_failure)
1836 {
1837         int debug_id = buffer->debug_id;
1838         binder_size_t off_start_offset, buffer_offset, off_end_offset;
1839
1840         binder_debug(BINDER_DEBUG_TRANSACTION,
1841                      "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1842                      proc->pid, buffer->debug_id,
1843                      buffer->data_size, buffer->offsets_size,
1844                      (unsigned long long)failed_at);
1845
1846         if (buffer->target_node)
1847                 binder_dec_node(buffer->target_node, 1, 0);
1848
1849         off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1850         off_end_offset = is_failure ? failed_at :
1851                                 off_start_offset + buffer->offsets_size;
1852         for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1853              buffer_offset += sizeof(binder_size_t)) {
1854                 struct binder_object_header *hdr;
1855                 size_t object_size = 0;
1856                 struct binder_object object;
1857                 binder_size_t object_offset;
1858
1859                 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1860                                                    buffer, buffer_offset,
1861                                                    sizeof(object_offset)))
1862                         object_size = binder_get_object(proc, buffer,
1863                                                         object_offset, &object);
1864                 if (object_size == 0) {
1865                         pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1866                                debug_id, (u64)object_offset, buffer->data_size);
1867                         continue;
1868                 }
1869                 hdr = &object.hdr;
1870                 switch (hdr->type) {
1871                 case BINDER_TYPE_BINDER:
1872                 case BINDER_TYPE_WEAK_BINDER: {
1873                         struct flat_binder_object *fp;
1874                         struct binder_node *node;
1875
1876                         fp = to_flat_binder_object(hdr);
1877                         node = binder_get_node(proc, fp->binder);
1878                         if (node == NULL) {
1879                                 pr_err("transaction release %d bad node %016llx\n",
1880                                        debug_id, (u64)fp->binder);
1881                                 break;
1882                         }
1883                         binder_debug(BINDER_DEBUG_TRANSACTION,
1884                                      "        node %d u%016llx\n",
1885                                      node->debug_id, (u64)node->ptr);
1886                         binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1887                                         0);
1888                         binder_put_node(node);
1889                 } break;
1890                 case BINDER_TYPE_HANDLE:
1891                 case BINDER_TYPE_WEAK_HANDLE: {
1892                         struct flat_binder_object *fp;
1893                         struct binder_ref_data rdata;
1894                         int ret;
1895
1896                         fp = to_flat_binder_object(hdr);
1897                         ret = binder_dec_ref_for_handle(proc, fp->handle,
1898                                 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1899
1900                         if (ret) {
1901                                 pr_err("transaction release %d bad handle %d, ret = %d\n",
1902                                  debug_id, fp->handle, ret);
1903                                 break;
1904                         }
1905                         binder_debug(BINDER_DEBUG_TRANSACTION,
1906                                      "        ref %d desc %d\n",
1907                                      rdata.debug_id, rdata.desc);
1908                 } break;
1909
1910                 case BINDER_TYPE_FD: {
1911                         /*
1912                          * No need to close the file here since user-space
1913                          * closes it for for successfully delivered
1914                          * transactions. For transactions that weren't
1915                          * delivered, the new fd was never allocated so
1916                          * there is no need to close and the fput on the
1917                          * file is done when the transaction is torn
1918                          * down.
1919                          */
1920                 } break;
1921                 case BINDER_TYPE_PTR:
1922                         /*
1923                          * Nothing to do here, this will get cleaned up when the
1924                          * transaction buffer gets freed
1925                          */
1926                         break;
1927                 case BINDER_TYPE_FDA: {
1928                         struct binder_fd_array_object *fda;
1929                         struct binder_buffer_object *parent;
1930                         struct binder_object ptr_object;
1931                         binder_size_t fda_offset;
1932                         size_t fd_index;
1933                         binder_size_t fd_buf_size;
1934                         binder_size_t num_valid;
1935
1936                         if (proc->tsk != current->group_leader) {
1937                                 /*
1938                                  * Nothing to do if running in sender context
1939                                  * The fd fixups have not been applied so no
1940                                  * fds need to be closed.
1941                                  */
1942                                 continue;
1943                         }
1944
1945                         num_valid = (buffer_offset - off_start_offset) /
1946                                                 sizeof(binder_size_t);
1947                         fda = to_binder_fd_array_object(hdr);
1948                         parent = binder_validate_ptr(proc, buffer, &ptr_object,
1949                                                      fda->parent,
1950                                                      off_start_offset,
1951                                                      NULL,
1952                                                      num_valid);
1953                         if (!parent) {
1954                                 pr_err("transaction release %d bad parent offset\n",
1955                                        debug_id);
1956                                 continue;
1957                         }
1958                         fd_buf_size = sizeof(u32) * fda->num_fds;
1959                         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1960                                 pr_err("transaction release %d invalid number of fds (%lld)\n",
1961                                        debug_id, (u64)fda->num_fds);
1962                                 continue;
1963                         }
1964                         if (fd_buf_size > parent->length ||
1965                             fda->parent_offset > parent->length - fd_buf_size) {
1966                                 /* No space for all file descriptors here. */
1967                                 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1968                                        debug_id, (u64)fda->num_fds);
1969                                 continue;
1970                         }
1971                         /*
1972                          * the source data for binder_buffer_object is visible
1973                          * to user-space and the @buffer element is the user
1974                          * pointer to the buffer_object containing the fd_array.
1975                          * Convert the address to an offset relative to
1976                          * the base of the transaction buffer.
1977                          */
1978                         fda_offset =
1979                             (parent->buffer - (uintptr_t)buffer->user_data) +
1980                             fda->parent_offset;
1981                         for (fd_index = 0; fd_index < fda->num_fds;
1982                              fd_index++) {
1983                                 u32 fd;
1984                                 int err;
1985                                 binder_size_t offset = fda_offset +
1986                                         fd_index * sizeof(fd);
1987
1988                                 err = binder_alloc_copy_from_buffer(
1989                                                 &proc->alloc, &fd, buffer,
1990                                                 offset, sizeof(fd));
1991                                 WARN_ON(err);
1992                                 if (!err)
1993                                         binder_deferred_fd_close(fd);
1994                         }
1995                 } break;
1996                 default:
1997                         pr_err("transaction release %d bad object type %x\n",
1998                                 debug_id, hdr->type);
1999                         break;
2000                 }
2001         }
2002 }
2003
2004 static int binder_translate_binder(struct flat_binder_object *fp,
2005                                    struct binder_transaction *t,
2006                                    struct binder_thread *thread)
2007 {
2008         struct binder_node *node;
2009         struct binder_proc *proc = thread->proc;
2010         struct binder_proc *target_proc = t->to_proc;
2011         struct binder_ref_data rdata;
2012         int ret = 0;
2013
2014         node = binder_get_node(proc, fp->binder);
2015         if (!node) {
2016                 node = binder_new_node(proc, fp);
2017                 if (!node)
2018                         return -ENOMEM;
2019         }
2020         if (fp->cookie != node->cookie) {
2021                 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2022                                   proc->pid, thread->pid, (u64)fp->binder,
2023                                   node->debug_id, (u64)fp->cookie,
2024                                   (u64)node->cookie);
2025                 ret = -EINVAL;
2026                 goto done;
2027         }
2028         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2029                 ret = -EPERM;
2030                 goto done;
2031         }
2032
2033         ret = binder_inc_ref_for_node(target_proc, node,
2034                         fp->hdr.type == BINDER_TYPE_BINDER,
2035                         &thread->todo, &rdata);
2036         if (ret)
2037                 goto done;
2038
2039         if (fp->hdr.type == BINDER_TYPE_BINDER)
2040                 fp->hdr.type = BINDER_TYPE_HANDLE;
2041         else
2042                 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2043         fp->binder = 0;
2044         fp->handle = rdata.desc;
2045         fp->cookie = 0;
2046
2047         trace_binder_transaction_node_to_ref(t, node, &rdata);
2048         binder_debug(BINDER_DEBUG_TRANSACTION,
2049                      "        node %d u%016llx -> ref %d desc %d\n",
2050                      node->debug_id, (u64)node->ptr,
2051                      rdata.debug_id, rdata.desc);
2052 done:
2053         binder_put_node(node);
2054         return ret;
2055 }
2056
2057 static int binder_translate_handle(struct flat_binder_object *fp,
2058                                    struct binder_transaction *t,
2059                                    struct binder_thread *thread)
2060 {
2061         struct binder_proc *proc = thread->proc;
2062         struct binder_proc *target_proc = t->to_proc;
2063         struct binder_node *node;
2064         struct binder_ref_data src_rdata;
2065         int ret = 0;
2066
2067         node = binder_get_node_from_ref(proc, fp->handle,
2068                         fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2069         if (!node) {
2070                 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2071                                   proc->pid, thread->pid, fp->handle);
2072                 return -EINVAL;
2073         }
2074         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2075                 ret = -EPERM;
2076                 goto done;
2077         }
2078
2079         binder_node_lock(node);
2080         if (node->proc == target_proc) {
2081                 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2082                         fp->hdr.type = BINDER_TYPE_BINDER;
2083                 else
2084                         fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2085                 fp->binder = node->ptr;
2086                 fp->cookie = node->cookie;
2087                 if (node->proc)
2088                         binder_inner_proc_lock(node->proc);
2089                 else
2090                         __acquire(&node->proc->inner_lock);
2091                 binder_inc_node_nilocked(node,
2092                                          fp->hdr.type == BINDER_TYPE_BINDER,
2093                                          0, NULL);
2094                 if (node->proc)
2095                         binder_inner_proc_unlock(node->proc);
2096                 else
2097                         __release(&node->proc->inner_lock);
2098                 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2099                 binder_debug(BINDER_DEBUG_TRANSACTION,
2100                              "        ref %d desc %d -> node %d u%016llx\n",
2101                              src_rdata.debug_id, src_rdata.desc, node->debug_id,
2102                              (u64)node->ptr);
2103                 binder_node_unlock(node);
2104         } else {
2105                 struct binder_ref_data dest_rdata;
2106
2107                 binder_node_unlock(node);
2108                 ret = binder_inc_ref_for_node(target_proc, node,
2109                                 fp->hdr.type == BINDER_TYPE_HANDLE,
2110                                 NULL, &dest_rdata);
2111                 if (ret)
2112                         goto done;
2113
2114                 fp->binder = 0;
2115                 fp->handle = dest_rdata.desc;
2116                 fp->cookie = 0;
2117                 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2118                                                     &dest_rdata);
2119                 binder_debug(BINDER_DEBUG_TRANSACTION,
2120                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2121                              src_rdata.debug_id, src_rdata.desc,
2122                              dest_rdata.debug_id, dest_rdata.desc,
2123                              node->debug_id);
2124         }
2125 done:
2126         binder_put_node(node);
2127         return ret;
2128 }
2129
2130 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2131                                struct binder_transaction *t,
2132                                struct binder_thread *thread,
2133                                struct binder_transaction *in_reply_to)
2134 {
2135         struct binder_proc *proc = thread->proc;
2136         struct binder_proc *target_proc = t->to_proc;
2137         struct binder_txn_fd_fixup *fixup;
2138         struct file *file;
2139         int ret = 0;
2140         bool target_allows_fd;
2141
2142         if (in_reply_to)
2143                 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2144         else
2145                 target_allows_fd = t->buffer->target_node->accept_fds;
2146         if (!target_allows_fd) {
2147                 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2148                                   proc->pid, thread->pid,
2149                                   in_reply_to ? "reply" : "transaction",
2150                                   fd);
2151                 ret = -EPERM;
2152                 goto err_fd_not_accepted;
2153         }
2154
2155         file = fget(fd);
2156         if (!file) {
2157                 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2158                                   proc->pid, thread->pid, fd);
2159                 ret = -EBADF;
2160                 goto err_fget;
2161         }
2162         ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2163         if (ret < 0) {
2164                 ret = -EPERM;
2165                 goto err_security;
2166         }
2167
2168         /*
2169          * Add fixup record for this transaction. The allocation
2170          * of the fd in the target needs to be done from a
2171          * target thread.
2172          */
2173         fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2174         if (!fixup) {
2175                 ret = -ENOMEM;
2176                 goto err_alloc;
2177         }
2178         fixup->file = file;
2179         fixup->offset = fd_offset;
2180         trace_binder_transaction_fd_send(t, fd, fixup->offset);
2181         list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2182
2183         return ret;
2184
2185 err_alloc:
2186 err_security:
2187         fput(file);
2188 err_fget:
2189 err_fd_not_accepted:
2190         return ret;
2191 }
2192
2193 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2194                                      struct binder_buffer_object *parent,
2195                                      struct binder_transaction *t,
2196                                      struct binder_thread *thread,
2197                                      struct binder_transaction *in_reply_to)
2198 {
2199         binder_size_t fdi, fd_buf_size;
2200         binder_size_t fda_offset;
2201         struct binder_proc *proc = thread->proc;
2202         struct binder_proc *target_proc = t->to_proc;
2203
2204         fd_buf_size = sizeof(u32) * fda->num_fds;
2205         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2206                 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2207                                   proc->pid, thread->pid, (u64)fda->num_fds);
2208                 return -EINVAL;
2209         }
2210         if (fd_buf_size > parent->length ||
2211             fda->parent_offset > parent->length - fd_buf_size) {
2212                 /* No space for all file descriptors here. */
2213                 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2214                                   proc->pid, thread->pid, (u64)fda->num_fds);
2215                 return -EINVAL;
2216         }
2217         /*
2218          * the source data for binder_buffer_object is visible
2219          * to user-space and the @buffer element is the user
2220          * pointer to the buffer_object containing the fd_array.
2221          * Convert the address to an offset relative to
2222          * the base of the transaction buffer.
2223          */
2224         fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2225                 fda->parent_offset;
2226         if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2227                 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2228                                   proc->pid, thread->pid);
2229                 return -EINVAL;
2230         }
2231         for (fdi = 0; fdi < fda->num_fds; fdi++) {
2232                 u32 fd;
2233                 int ret;
2234                 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2235
2236                 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2237                                                     &fd, t->buffer,
2238                                                     offset, sizeof(fd));
2239                 if (!ret)
2240                         ret = binder_translate_fd(fd, offset, t, thread,
2241                                                   in_reply_to);
2242                 if (ret < 0)
2243                         return ret;
2244         }
2245         return 0;
2246 }
2247
2248 static int binder_fixup_parent(struct binder_transaction *t,
2249                                struct binder_thread *thread,
2250                                struct binder_buffer_object *bp,
2251                                binder_size_t off_start_offset,
2252                                binder_size_t num_valid,
2253                                binder_size_t last_fixup_obj_off,
2254                                binder_size_t last_fixup_min_off)
2255 {
2256         struct binder_buffer_object *parent;
2257         struct binder_buffer *b = t->buffer;
2258         struct binder_proc *proc = thread->proc;
2259         struct binder_proc *target_proc = t->to_proc;
2260         struct binder_object object;
2261         binder_size_t buffer_offset;
2262         binder_size_t parent_offset;
2263
2264         if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2265                 return 0;
2266
2267         parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2268                                      off_start_offset, &parent_offset,
2269                                      num_valid);
2270         if (!parent) {
2271                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2272                                   proc->pid, thread->pid);
2273                 return -EINVAL;
2274         }
2275
2276         if (!binder_validate_fixup(target_proc, b, off_start_offset,
2277                                    parent_offset, bp->parent_offset,
2278                                    last_fixup_obj_off,
2279                                    last_fixup_min_off)) {
2280                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2281                                   proc->pid, thread->pid);
2282                 return -EINVAL;
2283         }
2284
2285         if (parent->length < sizeof(binder_uintptr_t) ||
2286             bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2287                 /* No space for a pointer here! */
2288                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2289                                   proc->pid, thread->pid);
2290                 return -EINVAL;
2291         }
2292         buffer_offset = bp->parent_offset +
2293                         (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2294         if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2295                                         &bp->buffer, sizeof(bp->buffer))) {
2296                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2297                                   proc->pid, thread->pid);
2298                 return -EINVAL;
2299         }
2300
2301         return 0;
2302 }
2303
2304 /**
2305  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2306  * @t:          transaction to send
2307  * @proc:       process to send the transaction to
2308  * @thread:     thread in @proc to send the transaction to (may be NULL)
2309  *
2310  * This function queues a transaction to the specified process. It will try
2311  * to find a thread in the target process to handle the transaction and
2312  * wake it up. If no thread is found, the work is queued to the proc
2313  * waitqueue.
2314  *
2315  * If the @thread parameter is not NULL, the transaction is always queued
2316  * to the waitlist of that specific thread.
2317  *
2318  * Return:      true if the transactions was successfully queued
2319  *              false if the target process or thread is dead
2320  */
2321 static bool binder_proc_transaction(struct binder_transaction *t,
2322                                     struct binder_proc *proc,
2323                                     struct binder_thread *thread)
2324 {
2325         struct binder_node *node = t->buffer->target_node;
2326         bool oneway = !!(t->flags & TF_ONE_WAY);
2327         bool pending_async = false;
2328
2329         BUG_ON(!node);
2330         binder_node_lock(node);
2331         if (oneway) {
2332                 BUG_ON(thread);
2333                 if (node->has_async_transaction)
2334                         pending_async = true;
2335                 else
2336                         node->has_async_transaction = true;
2337         }
2338
2339         binder_inner_proc_lock(proc);
2340
2341         if (proc->is_dead || (thread && thread->is_dead)) {
2342                 binder_inner_proc_unlock(proc);
2343                 binder_node_unlock(node);
2344                 return false;
2345         }
2346
2347         if (!thread && !pending_async)
2348                 thread = binder_select_thread_ilocked(proc);
2349
2350         if (thread)
2351                 binder_enqueue_thread_work_ilocked(thread, &t->work);
2352         else if (!pending_async)
2353                 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2354         else
2355                 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2356
2357         if (!pending_async)
2358                 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2359
2360         binder_inner_proc_unlock(proc);
2361         binder_node_unlock(node);
2362
2363         return true;
2364 }
2365
2366 /**
2367  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2368  * @node:         struct binder_node for which to get refs
2369  * @proc:         returns @node->proc if valid
2370  * @error:        if no @proc then returns BR_DEAD_REPLY
2371  *
2372  * User-space normally keeps the node alive when creating a transaction
2373  * since it has a reference to the target. The local strong ref keeps it
2374  * alive if the sending process dies before the target process processes
2375  * the transaction. If the source process is malicious or has a reference
2376  * counting bug, relying on the local strong ref can fail.
2377  *
2378  * Since user-space can cause the local strong ref to go away, we also take
2379  * a tmpref on the node to ensure it survives while we are constructing
2380  * the transaction. We also need a tmpref on the proc while we are
2381  * constructing the transaction, so we take that here as well.
2382  *
2383  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2384  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2385  * target proc has died, @error is set to BR_DEAD_REPLY
2386  */
2387 static struct binder_node *binder_get_node_refs_for_txn(
2388                 struct binder_node *node,
2389                 struct binder_proc **procp,
2390                 uint32_t *error)
2391 {
2392         struct binder_node *target_node = NULL;
2393
2394         binder_node_inner_lock(node);
2395         if (node->proc) {
2396                 target_node = node;
2397                 binder_inc_node_nilocked(node, 1, 0, NULL);
2398                 binder_inc_node_tmpref_ilocked(node);
2399                 node->proc->tmp_ref++;
2400                 *procp = node->proc;
2401         } else
2402                 *error = BR_DEAD_REPLY;
2403         binder_node_inner_unlock(node);
2404
2405         return target_node;
2406 }
2407
2408 static void binder_transaction(struct binder_proc *proc,
2409                                struct binder_thread *thread,
2410                                struct binder_transaction_data *tr, int reply,
2411                                binder_size_t extra_buffers_size)
2412 {
2413         int ret;
2414         struct binder_transaction *t;
2415         struct binder_work *w;
2416         struct binder_work *tcomplete;
2417         binder_size_t buffer_offset = 0;
2418         binder_size_t off_start_offset, off_end_offset;
2419         binder_size_t off_min;
2420         binder_size_t sg_buf_offset, sg_buf_end_offset;
2421         struct binder_proc *target_proc = NULL;
2422         struct binder_thread *target_thread = NULL;
2423         struct binder_node *target_node = NULL;
2424         struct binder_transaction *in_reply_to = NULL;
2425         struct binder_transaction_log_entry *e;
2426         uint32_t return_error = 0;
2427         uint32_t return_error_param = 0;
2428         uint32_t return_error_line = 0;
2429         binder_size_t last_fixup_obj_off = 0;
2430         binder_size_t last_fixup_min_off = 0;
2431         struct binder_context *context = proc->context;
2432         int t_debug_id = atomic_inc_return(&binder_last_id);
2433         char *secctx = NULL;
2434         u32 secctx_sz = 0;
2435
2436         e = binder_transaction_log_add(&binder_transaction_log);
2437         e->debug_id = t_debug_id;
2438         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2439         e->from_proc = proc->pid;
2440         e->from_thread = thread->pid;
2441         e->target_handle = tr->target.handle;
2442         e->data_size = tr->data_size;
2443         e->offsets_size = tr->offsets_size;
2444         strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2445
2446         if (reply) {
2447                 binder_inner_proc_lock(proc);
2448                 in_reply_to = thread->transaction_stack;
2449                 if (in_reply_to == NULL) {
2450                         binder_inner_proc_unlock(proc);
2451                         binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2452                                           proc->pid, thread->pid);
2453                         return_error = BR_FAILED_REPLY;
2454                         return_error_param = -EPROTO;
2455                         return_error_line = __LINE__;
2456                         goto err_empty_call_stack;
2457                 }
2458                 if (in_reply_to->to_thread != thread) {
2459                         spin_lock(&in_reply_to->lock);
2460                         binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2461                                 proc->pid, thread->pid, in_reply_to->debug_id,
2462                                 in_reply_to->to_proc ?
2463                                 in_reply_to->to_proc->pid : 0,
2464                                 in_reply_to->to_thread ?
2465                                 in_reply_to->to_thread->pid : 0);
2466                         spin_unlock(&in_reply_to->lock);
2467                         binder_inner_proc_unlock(proc);
2468                         return_error = BR_FAILED_REPLY;
2469                         return_error_param = -EPROTO;
2470                         return_error_line = __LINE__;
2471                         in_reply_to = NULL;
2472                         goto err_bad_call_stack;
2473                 }
2474                 thread->transaction_stack = in_reply_to->to_parent;
2475                 binder_inner_proc_unlock(proc);
2476                 binder_set_nice(in_reply_to->saved_priority);
2477                 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2478                 if (target_thread == NULL) {
2479                         /* annotation for sparse */
2480                         __release(&target_thread->proc->inner_lock);
2481                         return_error = BR_DEAD_REPLY;
2482                         return_error_line = __LINE__;
2483                         goto err_dead_binder;
2484                 }
2485                 if (target_thread->transaction_stack != in_reply_to) {
2486                         binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2487                                 proc->pid, thread->pid,
2488                                 target_thread->transaction_stack ?
2489                                 target_thread->transaction_stack->debug_id : 0,
2490                                 in_reply_to->debug_id);
2491                         binder_inner_proc_unlock(target_thread->proc);
2492                         return_error = BR_FAILED_REPLY;
2493                         return_error_param = -EPROTO;
2494                         return_error_line = __LINE__;
2495                         in_reply_to = NULL;
2496                         target_thread = NULL;
2497                         goto err_dead_binder;
2498                 }
2499                 target_proc = target_thread->proc;
2500                 target_proc->tmp_ref++;
2501                 binder_inner_proc_unlock(target_thread->proc);
2502         } else {
2503                 if (tr->target.handle) {
2504                         struct binder_ref *ref;
2505
2506                         /*
2507                          * There must already be a strong ref
2508                          * on this node. If so, do a strong
2509                          * increment on the node to ensure it
2510                          * stays alive until the transaction is
2511                          * done.
2512                          */
2513                         binder_proc_lock(proc);
2514                         ref = binder_get_ref_olocked(proc, tr->target.handle,
2515                                                      true);
2516                         if (ref) {
2517                                 target_node = binder_get_node_refs_for_txn(
2518                                                 ref->node, &target_proc,
2519                                                 &return_error);
2520                         } else {
2521                                 binder_user_error("%d:%d got transaction to invalid handle\n",
2522                                                   proc->pid, thread->pid);
2523                                 return_error = BR_FAILED_REPLY;
2524                         }
2525                         binder_proc_unlock(proc);
2526                 } else {
2527                         mutex_lock(&context->context_mgr_node_lock);
2528                         target_node = context->binder_context_mgr_node;
2529                         if (target_node)
2530                                 target_node = binder_get_node_refs_for_txn(
2531                                                 target_node, &target_proc,
2532                                                 &return_error);
2533                         else
2534                                 return_error = BR_DEAD_REPLY;
2535                         mutex_unlock(&context->context_mgr_node_lock);
2536                         if (target_node && target_proc->pid == proc->pid) {
2537                                 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2538                                                   proc->pid, thread->pid);
2539                                 return_error = BR_FAILED_REPLY;
2540                                 return_error_param = -EINVAL;
2541                                 return_error_line = __LINE__;
2542                                 goto err_invalid_target_handle;
2543                         }
2544                 }
2545                 if (!target_node) {
2546                         /*
2547                          * return_error is set above
2548                          */
2549                         return_error_param = -EINVAL;
2550                         return_error_line = __LINE__;
2551                         goto err_dead_binder;
2552                 }
2553                 e->to_node = target_node->debug_id;
2554                 if (WARN_ON(proc == target_proc)) {
2555                         return_error = BR_FAILED_REPLY;
2556                         return_error_param = -EINVAL;
2557                         return_error_line = __LINE__;
2558                         goto err_invalid_target_handle;
2559                 }
2560                 if (security_binder_transaction(proc->tsk,
2561                                                 target_proc->tsk) < 0) {
2562                         return_error = BR_FAILED_REPLY;
2563                         return_error_param = -EPERM;
2564                         return_error_line = __LINE__;
2565                         goto err_invalid_target_handle;
2566                 }
2567                 binder_inner_proc_lock(proc);
2568
2569                 w = list_first_entry_or_null(&thread->todo,
2570                                              struct binder_work, entry);
2571                 if (!(tr->flags & TF_ONE_WAY) && w &&
2572                     w->type == BINDER_WORK_TRANSACTION) {
2573                         /*
2574                          * Do not allow new outgoing transaction from a
2575                          * thread that has a transaction at the head of
2576                          * its todo list. Only need to check the head
2577                          * because binder_select_thread_ilocked picks a
2578                          * thread from proc->waiting_threads to enqueue
2579                          * the transaction, and nothing is queued to the
2580                          * todo list while the thread is on waiting_threads.
2581                          */
2582                         binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2583                                           proc->pid, thread->pid);
2584                         binder_inner_proc_unlock(proc);
2585                         return_error = BR_FAILED_REPLY;
2586                         return_error_param = -EPROTO;
2587                         return_error_line = __LINE__;
2588                         goto err_bad_todo_list;
2589                 }
2590
2591                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2592                         struct binder_transaction *tmp;
2593
2594                         tmp = thread->transaction_stack;
2595                         if (tmp->to_thread != thread) {
2596                                 spin_lock(&tmp->lock);
2597                                 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2598                                         proc->pid, thread->pid, tmp->debug_id,
2599                                         tmp->to_proc ? tmp->to_proc->pid : 0,
2600                                         tmp->to_thread ?
2601                                         tmp->to_thread->pid : 0);
2602                                 spin_unlock(&tmp->lock);
2603                                 binder_inner_proc_unlock(proc);
2604                                 return_error = BR_FAILED_REPLY;
2605                                 return_error_param = -EPROTO;
2606                                 return_error_line = __LINE__;
2607                                 goto err_bad_call_stack;
2608                         }
2609                         while (tmp) {
2610                                 struct binder_thread *from;
2611
2612                                 spin_lock(&tmp->lock);
2613                                 from = tmp->from;
2614                                 if (from && from->proc == target_proc) {
2615                                         atomic_inc(&from->tmp_ref);
2616                                         target_thread = from;
2617                                         spin_unlock(&tmp->lock);
2618                                         break;
2619                                 }
2620                                 spin_unlock(&tmp->lock);
2621                                 tmp = tmp->from_parent;
2622                         }
2623                 }
2624                 binder_inner_proc_unlock(proc);
2625         }
2626         if (target_thread)
2627                 e->to_thread = target_thread->pid;
2628         e->to_proc = target_proc->pid;
2629
2630         /* TODO: reuse incoming transaction for reply */
2631         t = kzalloc(sizeof(*t), GFP_KERNEL);
2632         if (t == NULL) {
2633                 return_error = BR_FAILED_REPLY;
2634                 return_error_param = -ENOMEM;
2635                 return_error_line = __LINE__;
2636                 goto err_alloc_t_failed;
2637         }
2638         INIT_LIST_HEAD(&t->fd_fixups);
2639         binder_stats_created(BINDER_STAT_TRANSACTION);
2640         spin_lock_init(&t->lock);
2641
2642         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2643         if (tcomplete == NULL) {
2644                 return_error = BR_FAILED_REPLY;
2645                 return_error_param = -ENOMEM;
2646                 return_error_line = __LINE__;
2647                 goto err_alloc_tcomplete_failed;
2648         }
2649         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2650
2651         t->debug_id = t_debug_id;
2652
2653         if (reply)
2654                 binder_debug(BINDER_DEBUG_TRANSACTION,
2655                              "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2656                              proc->pid, thread->pid, t->debug_id,
2657                              target_proc->pid, target_thread->pid,
2658                              (u64)tr->data.ptr.buffer,
2659                              (u64)tr->data.ptr.offsets,
2660                              (u64)tr->data_size, (u64)tr->offsets_size,
2661                              (u64)extra_buffers_size);
2662         else
2663                 binder_debug(BINDER_DEBUG_TRANSACTION,
2664                              "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2665                              proc->pid, thread->pid, t->debug_id,
2666                              target_proc->pid, target_node->debug_id,
2667                              (u64)tr->data.ptr.buffer,
2668                              (u64)tr->data.ptr.offsets,
2669                              (u64)tr->data_size, (u64)tr->offsets_size,
2670                              (u64)extra_buffers_size);
2671
2672         if (!reply && !(tr->flags & TF_ONE_WAY))
2673                 t->from = thread;
2674         else
2675                 t->from = NULL;
2676         t->sender_euid = task_euid(proc->tsk);
2677         t->to_proc = target_proc;
2678         t->to_thread = target_thread;
2679         t->code = tr->code;
2680         t->flags = tr->flags;
2681         t->priority = task_nice(current);
2682
2683         if (target_node && target_node->txn_security_ctx) {
2684                 u32 secid;
2685                 size_t added_size;
2686
2687                 security_task_getsecid(proc->tsk, &secid);
2688                 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
2689                 if (ret) {
2690                         return_error = BR_FAILED_REPLY;
2691                         return_error_param = ret;
2692                         return_error_line = __LINE__;
2693                         goto err_get_secctx_failed;
2694                 }
2695                 added_size = ALIGN(secctx_sz, sizeof(u64));
2696                 extra_buffers_size += added_size;
2697                 if (extra_buffers_size < added_size) {
2698                         /* integer overflow of extra_buffers_size */
2699                         return_error = BR_FAILED_REPLY;
2700                         return_error_param = -EINVAL;
2701                         return_error_line = __LINE__;
2702                         goto err_bad_extra_size;
2703                 }
2704         }
2705
2706         trace_binder_transaction(reply, t, target_node);
2707
2708         t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2709                 tr->offsets_size, extra_buffers_size,
2710                 !reply && (t->flags & TF_ONE_WAY), current->tgid);
2711         if (IS_ERR(t->buffer)) {
2712                 /*
2713                  * -ESRCH indicates VMA cleared. The target is dying.
2714                  */
2715                 return_error_param = PTR_ERR(t->buffer);
2716                 return_error = return_error_param == -ESRCH ?
2717                         BR_DEAD_REPLY : BR_FAILED_REPLY;
2718                 return_error_line = __LINE__;
2719                 t->buffer = NULL;
2720                 goto err_binder_alloc_buf_failed;
2721         }
2722         if (secctx) {
2723                 int err;
2724                 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
2725                                     ALIGN(tr->offsets_size, sizeof(void *)) +
2726                                     ALIGN(extra_buffers_size, sizeof(void *)) -
2727                                     ALIGN(secctx_sz, sizeof(u64));
2728
2729                 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
2730                 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
2731                                                   t->buffer, buf_offset,
2732                                                   secctx, secctx_sz);
2733                 if (err) {
2734                         t->security_ctx = 0;
2735                         WARN_ON(1);
2736                 }
2737                 security_release_secctx(secctx, secctx_sz);
2738                 secctx = NULL;
2739         }
2740         t->buffer->debug_id = t->debug_id;
2741         t->buffer->transaction = t;
2742         t->buffer->target_node = target_node;
2743         trace_binder_transaction_alloc_buf(t->buffer);
2744
2745         if (binder_alloc_copy_user_to_buffer(
2746                                 &target_proc->alloc,
2747                                 t->buffer, 0,
2748                                 (const void __user *)
2749                                         (uintptr_t)tr->data.ptr.buffer,
2750                                 tr->data_size)) {
2751                 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2752                                 proc->pid, thread->pid);
2753                 return_error = BR_FAILED_REPLY;
2754                 return_error_param = -EFAULT;
2755                 return_error_line = __LINE__;
2756                 goto err_copy_data_failed;
2757         }
2758         if (binder_alloc_copy_user_to_buffer(
2759                                 &target_proc->alloc,
2760                                 t->buffer,
2761                                 ALIGN(tr->data_size, sizeof(void *)),
2762                                 (const void __user *)
2763                                         (uintptr_t)tr->data.ptr.offsets,
2764                                 tr->offsets_size)) {
2765                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2766                                 proc->pid, thread->pid);
2767                 return_error = BR_FAILED_REPLY;
2768                 return_error_param = -EFAULT;
2769                 return_error_line = __LINE__;
2770                 goto err_copy_data_failed;
2771         }
2772         if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2773                 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2774                                 proc->pid, thread->pid, (u64)tr->offsets_size);
2775                 return_error = BR_FAILED_REPLY;
2776                 return_error_param = -EINVAL;
2777                 return_error_line = __LINE__;
2778                 goto err_bad_offset;
2779         }
2780         if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2781                 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2782                                   proc->pid, thread->pid,
2783                                   (u64)extra_buffers_size);
2784                 return_error = BR_FAILED_REPLY;
2785                 return_error_param = -EINVAL;
2786                 return_error_line = __LINE__;
2787                 goto err_bad_offset;
2788         }
2789         off_start_offset = ALIGN(tr->data_size, sizeof(void *));
2790         buffer_offset = off_start_offset;
2791         off_end_offset = off_start_offset + tr->offsets_size;
2792         sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
2793         sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
2794                 ALIGN(secctx_sz, sizeof(u64));
2795         off_min = 0;
2796         for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2797              buffer_offset += sizeof(binder_size_t)) {
2798                 struct binder_object_header *hdr;
2799                 size_t object_size;
2800                 struct binder_object object;
2801                 binder_size_t object_offset;
2802
2803                 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
2804                                                   &object_offset,
2805                                                   t->buffer,
2806                                                   buffer_offset,
2807                                                   sizeof(object_offset))) {
2808                         return_error = BR_FAILED_REPLY;
2809                         return_error_param = -EINVAL;
2810                         return_error_line = __LINE__;
2811                         goto err_bad_offset;
2812                 }
2813                 object_size = binder_get_object(target_proc, t->buffer,
2814                                                 object_offset, &object);
2815                 if (object_size == 0 || object_offset < off_min) {
2816                         binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2817                                           proc->pid, thread->pid,
2818                                           (u64)object_offset,
2819                                           (u64)off_min,
2820                                           (u64)t->buffer->data_size);
2821                         return_error = BR_FAILED_REPLY;
2822                         return_error_param = -EINVAL;
2823                         return_error_line = __LINE__;
2824                         goto err_bad_offset;
2825                 }
2826
2827                 hdr = &object.hdr;
2828                 off_min = object_offset + object_size;
2829                 switch (hdr->type) {
2830                 case BINDER_TYPE_BINDER:
2831                 case BINDER_TYPE_WEAK_BINDER: {
2832                         struct flat_binder_object *fp;
2833
2834                         fp = to_flat_binder_object(hdr);
2835                         ret = binder_translate_binder(fp, t, thread);
2836
2837                         if (ret < 0 ||
2838                             binder_alloc_copy_to_buffer(&target_proc->alloc,
2839                                                         t->buffer,
2840                                                         object_offset,
2841                                                         fp, sizeof(*fp))) {
2842                                 return_error = BR_FAILED_REPLY;
2843                                 return_error_param = ret;
2844                                 return_error_line = __LINE__;
2845                                 goto err_translate_failed;
2846                         }
2847                 } break;
2848                 case BINDER_TYPE_HANDLE:
2849                 case BINDER_TYPE_WEAK_HANDLE: {
2850                         struct flat_binder_object *fp;
2851
2852                         fp = to_flat_binder_object(hdr);
2853                         ret = binder_translate_handle(fp, t, thread);
2854                         if (ret < 0 ||
2855                             binder_alloc_copy_to_buffer(&target_proc->alloc,
2856                                                         t->buffer,
2857                                                         object_offset,
2858                                                         fp, sizeof(*fp))) {
2859                                 return_error = BR_FAILED_REPLY;
2860                                 return_error_param = ret;
2861                                 return_error_line = __LINE__;
2862                                 goto err_translate_failed;
2863                         }
2864                 } break;
2865
2866                 case BINDER_TYPE_FD: {
2867                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
2868                         binder_size_t fd_offset = object_offset +
2869                                 (uintptr_t)&fp->fd - (uintptr_t)fp;
2870                         int ret = binder_translate_fd(fp->fd, fd_offset, t,
2871                                                       thread, in_reply_to);
2872
2873                         fp->pad_binder = 0;
2874                         if (ret < 0 ||
2875                             binder_alloc_copy_to_buffer(&target_proc->alloc,
2876                                                         t->buffer,
2877                                                         object_offset,
2878                                                         fp, sizeof(*fp))) {
2879                                 return_error = BR_FAILED_REPLY;
2880                                 return_error_param = ret;
2881                                 return_error_line = __LINE__;
2882                                 goto err_translate_failed;
2883                         }
2884                 } break;
2885                 case BINDER_TYPE_FDA: {
2886                         struct binder_object ptr_object;
2887                         binder_size_t parent_offset;
2888                         struct binder_fd_array_object *fda =
2889                                 to_binder_fd_array_object(hdr);
2890                         size_t num_valid = (buffer_offset - off_start_offset) /
2891                                                 sizeof(binder_size_t);
2892                         struct binder_buffer_object *parent =
2893                                 binder_validate_ptr(target_proc, t->buffer,
2894                                                     &ptr_object, fda->parent,
2895                                                     off_start_offset,
2896                                                     &parent_offset,
2897                                                     num_valid);
2898                         if (!parent) {
2899                                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2900                                                   proc->pid, thread->pid);
2901                                 return_error = BR_FAILED_REPLY;
2902                                 return_error_param = -EINVAL;
2903                                 return_error_line = __LINE__;
2904                                 goto err_bad_parent;
2905                         }
2906                         if (!binder_validate_fixup(target_proc, t->buffer,
2907                                                    off_start_offset,
2908                                                    parent_offset,
2909                                                    fda->parent_offset,
2910                                                    last_fixup_obj_off,
2911                                                    last_fixup_min_off)) {
2912                                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2913                                                   proc->pid, thread->pid);
2914                                 return_error = BR_FAILED_REPLY;
2915                                 return_error_param = -EINVAL;
2916                                 return_error_line = __LINE__;
2917                                 goto err_bad_parent;
2918                         }
2919                         ret = binder_translate_fd_array(fda, parent, t, thread,
2920                                                         in_reply_to);
2921                         if (ret < 0) {
2922                                 return_error = BR_FAILED_REPLY;
2923                                 return_error_param = ret;
2924                                 return_error_line = __LINE__;
2925                                 goto err_translate_failed;
2926                         }
2927                         last_fixup_obj_off = parent_offset;
2928                         last_fixup_min_off =
2929                                 fda->parent_offset + sizeof(u32) * fda->num_fds;
2930                 } break;
2931                 case BINDER_TYPE_PTR: {
2932                         struct binder_buffer_object *bp =
2933                                 to_binder_buffer_object(hdr);
2934                         size_t buf_left = sg_buf_end_offset - sg_buf_offset;
2935                         size_t num_valid;
2936
2937                         if (bp->length > buf_left) {
2938                                 binder_user_error("%d:%d got transaction with too large buffer\n",
2939                                                   proc->pid, thread->pid);
2940                                 return_error = BR_FAILED_REPLY;
2941                                 return_error_param = -EINVAL;
2942                                 return_error_line = __LINE__;
2943                                 goto err_bad_offset;
2944                         }
2945                         if (binder_alloc_copy_user_to_buffer(
2946                                                 &target_proc->alloc,
2947                                                 t->buffer,
2948                                                 sg_buf_offset,
2949                                                 (const void __user *)
2950                                                         (uintptr_t)bp->buffer,
2951                                                 bp->length)) {
2952                                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2953                                                   proc->pid, thread->pid);
2954                                 return_error_param = -EFAULT;
2955                                 return_error = BR_FAILED_REPLY;
2956                                 return_error_line = __LINE__;
2957                                 goto err_copy_data_failed;
2958                         }
2959                         /* Fixup buffer pointer to target proc address space */
2960                         bp->buffer = (uintptr_t)
2961                                 t->buffer->user_data + sg_buf_offset;
2962                         sg_buf_offset += ALIGN(bp->length, sizeof(u64));
2963
2964                         num_valid = (buffer_offset - off_start_offset) /
2965                                         sizeof(binder_size_t);
2966                         ret = binder_fixup_parent(t, thread, bp,
2967                                                   off_start_offset,
2968                                                   num_valid,
2969                                                   last_fixup_obj_off,
2970                                                   last_fixup_min_off);
2971                         if (ret < 0 ||
2972                             binder_alloc_copy_to_buffer(&target_proc->alloc,
2973                                                         t->buffer,
2974                                                         object_offset,
2975                                                         bp, sizeof(*bp))) {
2976                                 return_error = BR_FAILED_REPLY;
2977                                 return_error_param = ret;
2978                                 return_error_line = __LINE__;
2979                                 goto err_translate_failed;
2980                         }
2981                         last_fixup_obj_off = object_offset;
2982                         last_fixup_min_off = 0;
2983                 } break;
2984                 default:
2985                         binder_user_error("%d:%d got transaction with invalid object type, %x\n",
2986                                 proc->pid, thread->pid, hdr->type);
2987                         return_error = BR_FAILED_REPLY;
2988                         return_error_param = -EINVAL;
2989                         return_error_line = __LINE__;
2990                         goto err_bad_object_type;
2991                 }
2992         }
2993         tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
2994         t->work.type = BINDER_WORK_TRANSACTION;
2995
2996         if (reply) {
2997                 binder_enqueue_thread_work(thread, tcomplete);
2998                 binder_inner_proc_lock(target_proc);
2999                 if (target_thread->is_dead) {
3000                         binder_inner_proc_unlock(target_proc);
3001                         goto err_dead_proc_or_thread;
3002                 }
3003                 BUG_ON(t->buffer->async_transaction != 0);
3004                 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3005                 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3006                 binder_inner_proc_unlock(target_proc);
3007                 wake_up_interruptible_sync(&target_thread->wait);
3008                 binder_free_transaction(in_reply_to);
3009         } else if (!(t->flags & TF_ONE_WAY)) {
3010                 BUG_ON(t->buffer->async_transaction != 0);
3011                 binder_inner_proc_lock(proc);
3012                 /*
3013                  * Defer the TRANSACTION_COMPLETE, so we don't return to
3014                  * userspace immediately; this allows the target process to
3015                  * immediately start processing this transaction, reducing
3016                  * latency. We will then return the TRANSACTION_COMPLETE when
3017                  * the target replies (or there is an error).
3018                  */
3019                 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3020                 t->need_reply = 1;
3021                 t->from_parent = thread->transaction_stack;
3022                 thread->transaction_stack = t;
3023                 binder_inner_proc_unlock(proc);
3024                 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3025                         binder_inner_proc_lock(proc);
3026                         binder_pop_transaction_ilocked(thread, t);
3027                         binder_inner_proc_unlock(proc);
3028                         goto err_dead_proc_or_thread;
3029                 }
3030         } else {
3031                 BUG_ON(target_node == NULL);
3032                 BUG_ON(t->buffer->async_transaction != 1);
3033                 binder_enqueue_thread_work(thread, tcomplete);
3034                 if (!binder_proc_transaction(t, target_proc, NULL))
3035                         goto err_dead_proc_or_thread;
3036         }
3037         if (target_thread)
3038                 binder_thread_dec_tmpref(target_thread);
3039         binder_proc_dec_tmpref(target_proc);
3040         if (target_node)
3041                 binder_dec_node_tmpref(target_node);
3042         /*
3043          * write barrier to synchronize with initialization
3044          * of log entry
3045          */
3046         smp_wmb();
3047         WRITE_ONCE(e->debug_id_done, t_debug_id);
3048         return;
3049
3050 err_dead_proc_or_thread:
3051         return_error = BR_DEAD_REPLY;
3052         return_error_line = __LINE__;
3053         binder_dequeue_work(proc, tcomplete);
3054 err_translate_failed:
3055 err_bad_object_type:
3056 err_bad_offset:
3057 err_bad_parent:
3058 err_copy_data_failed:
3059         binder_free_txn_fixups(t);
3060         trace_binder_transaction_failed_buffer_release(t->buffer);
3061         binder_transaction_buffer_release(target_proc, t->buffer,
3062                                           buffer_offset, true);
3063         if (target_node)
3064                 binder_dec_node_tmpref(target_node);
3065         target_node = NULL;
3066         t->buffer->transaction = NULL;
3067         binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3068 err_binder_alloc_buf_failed:
3069 err_bad_extra_size:
3070         if (secctx)
3071                 security_release_secctx(secctx, secctx_sz);
3072 err_get_secctx_failed:
3073         kfree(tcomplete);
3074         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3075 err_alloc_tcomplete_failed:
3076         kfree(t);
3077         binder_stats_deleted(BINDER_STAT_TRANSACTION);
3078 err_alloc_t_failed:
3079 err_bad_todo_list:
3080 err_bad_call_stack:
3081 err_empty_call_stack:
3082 err_dead_binder:
3083 err_invalid_target_handle:
3084         if (target_thread)
3085                 binder_thread_dec_tmpref(target_thread);
3086         if (target_proc)
3087                 binder_proc_dec_tmpref(target_proc);
3088         if (target_node) {
3089                 binder_dec_node(target_node, 1, 0);
3090                 binder_dec_node_tmpref(target_node);
3091         }
3092
3093         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3094                      "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3095                      proc->pid, thread->pid, return_error, return_error_param,
3096                      (u64)tr->data_size, (u64)tr->offsets_size,
3097                      return_error_line);
3098
3099         {
3100                 struct binder_transaction_log_entry *fe;
3101
3102                 e->return_error = return_error;
3103                 e->return_error_param = return_error_param;
3104                 e->return_error_line = return_error_line;
3105                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3106                 *fe = *e;
3107                 /*
3108                  * write barrier to synchronize with initialization
3109                  * of log entry
3110                  */
3111                 smp_wmb();
3112                 WRITE_ONCE(e->debug_id_done, t_debug_id);
3113                 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3114         }
3115
3116         BUG_ON(thread->return_error.cmd != BR_OK);
3117         if (in_reply_to) {
3118                 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3119                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3120                 binder_send_failed_reply(in_reply_to, return_error);
3121         } else {
3122                 thread->return_error.cmd = return_error;
3123                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3124         }
3125 }
3126
3127 /**
3128  * binder_free_buf() - free the specified buffer
3129  * @proc:       binder proc that owns buffer
3130  * @buffer:     buffer to be freed
3131  *
3132  * If buffer for an async transaction, enqueue the next async
3133  * transaction from the node.
3134  *
3135  * Cleanup buffer and free it.
3136  */
3137 static void
3138 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3139 {
3140         binder_inner_proc_lock(proc);
3141         if (buffer->transaction) {
3142                 buffer->transaction->buffer = NULL;
3143                 buffer->transaction = NULL;
3144         }
3145         binder_inner_proc_unlock(proc);
3146         if (buffer->async_transaction && buffer->target_node) {
3147                 struct binder_node *buf_node;
3148                 struct binder_work *w;
3149
3150                 buf_node = buffer->target_node;
3151                 binder_node_inner_lock(buf_node);
3152                 BUG_ON(!buf_node->has_async_transaction);
3153                 BUG_ON(buf_node->proc != proc);
3154                 w = binder_dequeue_work_head_ilocked(
3155                                 &buf_node->async_todo);
3156                 if (!w) {
3157                         buf_node->has_async_transaction = false;
3158                 } else {
3159                         binder_enqueue_work_ilocked(
3160                                         w, &proc->todo);
3161                         binder_wakeup_proc_ilocked(proc);
3162                 }
3163                 binder_node_inner_unlock(buf_node);
3164         }
3165         trace_binder_transaction_buffer_release(buffer);
3166         binder_transaction_buffer_release(proc, buffer, 0, false);
3167         binder_alloc_free_buf(&proc->alloc, buffer);
3168 }
3169
3170 static int binder_thread_write(struct binder_proc *proc,
3171                         struct binder_thread *thread,
3172                         binder_uintptr_t binder_buffer, size_t size,
3173                         binder_size_t *consumed)
3174 {
3175         uint32_t cmd;
3176         struct binder_context *context = proc->context;
3177         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3178         void __user *ptr = buffer + *consumed;
3179         void __user *end = buffer + size;
3180
3181         while (ptr < end && thread->return_error.cmd == BR_OK) {
3182                 int ret;
3183
3184                 if (get_user(cmd, (uint32_t __user *)ptr))
3185                         return -EFAULT;
3186                 ptr += sizeof(uint32_t);
3187                 trace_binder_command(cmd);
3188                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3189                         atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3190                         atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3191                         atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3192                 }
3193                 switch (cmd) {
3194                 case BC_INCREFS:
3195                 case BC_ACQUIRE:
3196                 case BC_RELEASE:
3197                 case BC_DECREFS: {
3198                         uint32_t target;
3199                         const char *debug_string;
3200                         bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3201                         bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3202                         struct binder_ref_data rdata;
3203
3204                         if (get_user(target, (uint32_t __user *)ptr))
3205                                 return -EFAULT;
3206
3207                         ptr += sizeof(uint32_t);
3208                         ret = -1;
3209                         if (increment && !target) {
3210                                 struct binder_node *ctx_mgr_node;
3211
3212                                 mutex_lock(&context->context_mgr_node_lock);
3213                                 ctx_mgr_node = context->binder_context_mgr_node;
3214                                 if (ctx_mgr_node) {
3215                                         if (ctx_mgr_node->proc == proc) {
3216                                                 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3217                                                                   proc->pid, thread->pid);
3218                                                 mutex_unlock(&context->context_mgr_node_lock);
3219                                                 return -EINVAL;
3220                                         }
3221                                         ret = binder_inc_ref_for_node(
3222                                                         proc, ctx_mgr_node,
3223                                                         strong, NULL, &rdata);
3224                                 }
3225                                 mutex_unlock(&context->context_mgr_node_lock);
3226                         }
3227                         if (ret)
3228                                 ret = binder_update_ref_for_handle(
3229                                                 proc, target, increment, strong,
3230                                                 &rdata);
3231                         if (!ret && rdata.desc != target) {
3232                                 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3233                                         proc->pid, thread->pid,
3234                                         target, rdata.desc);
3235                         }
3236                         switch (cmd) {
3237                         case BC_INCREFS:
3238                                 debug_string = "IncRefs";
3239                                 break;
3240                         case BC_ACQUIRE:
3241                                 debug_string = "Acquire";
3242                                 break;
3243                         case BC_RELEASE:
3244                                 debug_string = "Release";
3245                                 break;
3246                         case BC_DECREFS:
3247                         default:
3248                                 debug_string = "DecRefs";
3249                                 break;
3250                         }
3251                         if (ret) {
3252                                 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3253                                         proc->pid, thread->pid, debug_string,
3254                                         strong, target, ret);
3255                                 break;
3256                         }
3257                         binder_debug(BINDER_DEBUG_USER_REFS,
3258                                      "%d:%d %s ref %d desc %d s %d w %d\n",
3259                                      proc->pid, thread->pid, debug_string,
3260                                      rdata.debug_id, rdata.desc, rdata.strong,
3261                                      rdata.weak);
3262                         break;
3263                 }
3264                 case BC_INCREFS_DONE:
3265                 case BC_ACQUIRE_DONE: {
3266                         binder_uintptr_t node_ptr;
3267                         binder_uintptr_t cookie;
3268                         struct binder_node *node;
3269                         bool free_node;
3270
3271                         if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3272                                 return -EFAULT;
3273                         ptr += sizeof(binder_uintptr_t);
3274                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3275                                 return -EFAULT;
3276                         ptr += sizeof(binder_uintptr_t);
3277                         node = binder_get_node(proc, node_ptr);
3278                         if (node == NULL) {
3279                                 binder_user_error("%d:%d %s u%016llx no match\n",
3280                                         proc->pid, thread->pid,
3281                                         cmd == BC_INCREFS_DONE ?
3282                                         "BC_INCREFS_DONE" :
3283                                         "BC_ACQUIRE_DONE",
3284                                         (u64)node_ptr);
3285                                 break;
3286                         }
3287                         if (cookie != node->cookie) {
3288                                 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3289                                         proc->pid, thread->pid,
3290                                         cmd == BC_INCREFS_DONE ?
3291                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3292                                         (u64)node_ptr, node->debug_id,
3293                                         (u64)cookie, (u64)node->cookie);
3294                                 binder_put_node(node);
3295                                 break;
3296                         }
3297                         binder_node_inner_lock(node);
3298                         if (cmd == BC_ACQUIRE_DONE) {
3299                                 if (node->pending_strong_ref == 0) {
3300                                         binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3301                                                 proc->pid, thread->pid,
3302                                                 node->debug_id);
3303                                         binder_node_inner_unlock(node);
3304                                         binder_put_node(node);
3305                                         break;
3306                                 }
3307                                 node->pending_strong_ref = 0;
3308                         } else {
3309                                 if (node->pending_weak_ref == 0) {
3310                                         binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3311                                                 proc->pid, thread->pid,
3312                                                 node->debug_id);
3313                                         binder_node_inner_unlock(node);
3314                                         binder_put_node(node);
3315                                         break;
3316                                 }
3317                                 node->pending_weak_ref = 0;
3318                         }
3319                         free_node = binder_dec_node_nilocked(node,
3320                                         cmd == BC_ACQUIRE_DONE, 0);
3321                         WARN_ON(free_node);
3322                         binder_debug(BINDER_DEBUG_USER_REFS,
3323                                      "%d:%d %s node %d ls %d lw %d tr %d\n",
3324                                      proc->pid, thread->pid,
3325                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3326                                      node->debug_id, node->local_strong_refs,
3327                                      node->local_weak_refs, node->tmp_refs);
3328                         binder_node_inner_unlock(node);
3329                         binder_put_node(node);
3330                         break;
3331                 }
3332                 case BC_ATTEMPT_ACQUIRE:
3333                         pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3334                         return -EINVAL;
3335                 case BC_ACQUIRE_RESULT:
3336                         pr_err("BC_ACQUIRE_RESULT not supported\n");
3337                         return -EINVAL;
3338
3339                 case BC_FREE_BUFFER: {
3340                         binder_uintptr_t data_ptr;
3341                         struct binder_buffer *buffer;
3342
3343                         if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3344                                 return -EFAULT;
3345                         ptr += sizeof(binder_uintptr_t);
3346
3347                         buffer = binder_alloc_prepare_to_free(&proc->alloc,
3348                                                               data_ptr);
3349                         if (IS_ERR_OR_NULL(buffer)) {
3350                                 if (PTR_ERR(buffer) == -EPERM) {
3351                                         binder_user_error(
3352                                                 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3353                                                 proc->pid, thread->pid,
3354                                                 (u64)data_ptr);
3355                                 } else {
3356                                         binder_user_error(
3357                                                 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3358                                                 proc->pid, thread->pid,
3359                                                 (u64)data_ptr);
3360                                 }
3361                                 break;
3362                         }
3363                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
3364                                      "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3365                                      proc->pid, thread->pid, (u64)data_ptr,
3366                                      buffer->debug_id,
3367                                      buffer->transaction ? "active" : "finished");
3368                         binder_free_buf(proc, buffer);
3369                         break;
3370                 }
3371
3372                 case BC_TRANSACTION_SG:
3373                 case BC_REPLY_SG: {
3374                         struct binder_transaction_data_sg tr;
3375
3376                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3377                                 return -EFAULT;
3378                         ptr += sizeof(tr);
3379                         binder_transaction(proc, thread, &tr.transaction_data,
3380                                            cmd == BC_REPLY_SG, tr.buffers_size);
3381                         break;
3382                 }
3383                 case BC_TRANSACTION:
3384                 case BC_REPLY: {
3385                         struct binder_transaction_data tr;
3386
3387                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3388                                 return -EFAULT;
3389                         ptr += sizeof(tr);
3390                         binder_transaction(proc, thread, &tr,
3391                                            cmd == BC_REPLY, 0);
3392                         break;
3393                 }
3394
3395                 case BC_REGISTER_LOOPER:
3396                         binder_debug(BINDER_DEBUG_THREADS,
3397                                      "%d:%d BC_REGISTER_LOOPER\n",
3398                                      proc->pid, thread->pid);
3399                         binder_inner_proc_lock(proc);
3400                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3401                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3402                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3403                                         proc->pid, thread->pid);
3404                         } else if (proc->requested_threads == 0) {
3405                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3406                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3407                                         proc->pid, thread->pid);
3408                         } else {
3409                                 proc->requested_threads--;
3410                                 proc->requested_threads_started++;
3411                         }
3412                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3413                         binder_inner_proc_unlock(proc);
3414                         break;
3415                 case BC_ENTER_LOOPER:
3416                         binder_debug(BINDER_DEBUG_THREADS,
3417                                      "%d:%d BC_ENTER_LOOPER\n",
3418                                      proc->pid, thread->pid);
3419                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3420                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3421                                 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3422                                         proc->pid, thread->pid);
3423                         }
3424                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3425                         break;
3426                 case BC_EXIT_LOOPER:
3427                         binder_debug(BINDER_DEBUG_THREADS,
3428                                      "%d:%d BC_EXIT_LOOPER\n",
3429                                      proc->pid, thread->pid);
3430                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
3431                         break;
3432
3433                 case BC_REQUEST_DEATH_NOTIFICATION:
3434                 case BC_CLEAR_DEATH_NOTIFICATION: {
3435                         uint32_t target;
3436                         binder_uintptr_t cookie;
3437                         struct binder_ref *ref;
3438                         struct binder_ref_death *death = NULL;
3439
3440                         if (get_user(target, (uint32_t __user *)ptr))
3441                                 return -EFAULT;
3442                         ptr += sizeof(uint32_t);
3443                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3444                                 return -EFAULT;
3445                         ptr += sizeof(binder_uintptr_t);
3446                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3447                                 /*
3448                                  * Allocate memory for death notification
3449                                  * before taking lock
3450                                  */
3451                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
3452                                 if (death == NULL) {
3453                                         WARN_ON(thread->return_error.cmd !=
3454                                                 BR_OK);
3455                                         thread->return_error.cmd = BR_ERROR;
3456                                         binder_enqueue_thread_work(
3457                                                 thread,
3458                                                 &thread->return_error.work);
3459                                         binder_debug(
3460                                                 BINDER_DEBUG_FAILED_TRANSACTION,
3461                                                 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3462                                                 proc->pid, thread->pid);
3463                                         break;
3464                                 }
3465                         }
3466                         binder_proc_lock(proc);
3467                         ref = binder_get_ref_olocked(proc, target, false);
3468                         if (ref == NULL) {
3469                                 binder_user_error("%d:%d %s invalid ref %d\n",
3470                                         proc->pid, thread->pid,
3471                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3472                                         "BC_REQUEST_DEATH_NOTIFICATION" :
3473                                         "BC_CLEAR_DEATH_NOTIFICATION",
3474                                         target);
3475                                 binder_proc_unlock(proc);
3476                                 kfree(death);
3477                                 break;
3478                         }
3479
3480                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3481                                      "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3482                                      proc->pid, thread->pid,
3483                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3484                                      "BC_REQUEST_DEATH_NOTIFICATION" :
3485                                      "BC_CLEAR_DEATH_NOTIFICATION",
3486                                      (u64)cookie, ref->data.debug_id,
3487                                      ref->data.desc, ref->data.strong,
3488                                      ref->data.weak, ref->node->debug_id);
3489
3490                         binder_node_lock(ref->node);
3491                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3492                                 if (ref->death) {
3493                                         binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3494                                                 proc->pid, thread->pid);
3495                                         binder_node_unlock(ref->node);
3496                                         binder_proc_unlock(proc);
3497                                         kfree(death);
3498                                         break;
3499                                 }
3500                                 binder_stats_created(BINDER_STAT_DEATH);
3501                                 INIT_LIST_HEAD(&death->work.entry);
3502                                 death->cookie = cookie;
3503                                 ref->death = death;
3504                                 if (ref->node->proc == NULL) {
3505                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3506
3507                                         binder_inner_proc_lock(proc);
3508                                         binder_enqueue_work_ilocked(
3509                                                 &ref->death->work, &proc->todo);
3510                                         binder_wakeup_proc_ilocked(proc);
3511                                         binder_inner_proc_unlock(proc);
3512                                 }
3513                         } else {
3514                                 if (ref->death == NULL) {
3515                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3516                                                 proc->pid, thread->pid);
3517                                         binder_node_unlock(ref->node);
3518                                         binder_proc_unlock(proc);
3519                                         break;
3520                                 }
3521                                 death = ref->death;
3522                                 if (death->cookie != cookie) {
3523                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3524                                                 proc->pid, thread->pid,
3525                                                 (u64)death->cookie,
3526                                                 (u64)cookie);
3527                                         binder_node_unlock(ref->node);
3528                                         binder_proc_unlock(proc);
3529                                         break;
3530                                 }
3531                                 ref->death = NULL;
3532                                 binder_inner_proc_lock(proc);
3533                                 if (list_empty(&death->work.entry)) {
3534                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3535                                         if (thread->looper &
3536                                             (BINDER_LOOPER_STATE_REGISTERED |
3537                                              BINDER_LOOPER_STATE_ENTERED))
3538                                                 binder_enqueue_thread_work_ilocked(
3539                                                                 thread,
3540                                                                 &death->work);
3541                                         else {
3542                                                 binder_enqueue_work_ilocked(
3543                                                                 &death->work,
3544                                                                 &proc->todo);
3545                                                 binder_wakeup_proc_ilocked(
3546                                                                 proc);
3547                                         }
3548                                 } else {
3549                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3550                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3551                                 }
3552                                 binder_inner_proc_unlock(proc);
3553                         }
3554                         binder_node_unlock(ref->node);
3555                         binder_proc_unlock(proc);
3556                 } break;
3557                 case BC_DEAD_BINDER_DONE: {
3558                         struct binder_work *w;
3559                         binder_uintptr_t cookie;
3560                         struct binder_ref_death *death = NULL;
3561
3562                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3563                                 return -EFAULT;
3564
3565                         ptr += sizeof(cookie);
3566                         binder_inner_proc_lock(proc);
3567                         list_for_each_entry(w, &proc->delivered_death,
3568                                             entry) {
3569                                 struct binder_ref_death *tmp_death =
3570                                         container_of(w,
3571                                                      struct binder_ref_death,
3572                                                      work);
3573
3574                                 if (tmp_death->cookie == cookie) {
3575                                         death = tmp_death;
3576                                         break;
3577                                 }
3578                         }
3579                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
3580                                      "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3581                                      proc->pid, thread->pid, (u64)cookie,
3582                                      death);
3583                         if (death == NULL) {
3584                                 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3585                                         proc->pid, thread->pid, (u64)cookie);
3586                                 binder_inner_proc_unlock(proc);
3587                                 break;
3588                         }
3589                         binder_dequeue_work_ilocked(&death->work);
3590                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3591                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3592                                 if (thread->looper &
3593                                         (BINDER_LOOPER_STATE_REGISTERED |
3594                                          BINDER_LOOPER_STATE_ENTERED))
3595                                         binder_enqueue_thread_work_ilocked(
3596                                                 thread, &death->work);
3597                                 else {
3598                                         binder_enqueue_work_ilocked(
3599                                                         &death->work,
3600                                                         &proc->todo);
3601                                         binder_wakeup_proc_ilocked(proc);
3602                                 }
3603                         }
3604                         binder_inner_proc_unlock(proc);
3605                 } break;
3606
3607                 default:
3608                         pr_err("%d:%d unknown command %d\n",
3609                                proc->pid, thread->pid, cmd);
3610                         return -EINVAL;
3611                 }
3612                 *consumed = ptr - buffer;
3613         }
3614         return 0;
3615 }
3616
3617 static void binder_stat_br(struct binder_proc *proc,
3618                            struct binder_thread *thread, uint32_t cmd)
3619 {
3620         trace_binder_return(cmd);
3621         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3622                 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3623                 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3624                 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3625         }
3626 }
3627
3628 static int binder_put_node_cmd(struct binder_proc *proc,
3629                                struct binder_thread *thread,
3630                                void __user **ptrp,
3631                                binder_uintptr_t node_ptr,
3632                                binder_uintptr_t node_cookie,
3633                                int node_debug_id,
3634                                uint32_t cmd, const char *cmd_name)
3635 {
3636         void __user *ptr = *ptrp;
3637
3638         if (put_user(cmd, (uint32_t __user *)ptr))
3639                 return -EFAULT;
3640         ptr += sizeof(uint32_t);
3641
3642         if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3643                 return -EFAULT;
3644         ptr += sizeof(binder_uintptr_t);
3645
3646         if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3647                 return -EFAULT;
3648         ptr += sizeof(binder_uintptr_t);
3649
3650         binder_stat_br(proc, thread, cmd);
3651         binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3652                      proc->pid, thread->pid, cmd_name, node_debug_id,
3653                      (u64)node_ptr, (u64)node_cookie);
3654
3655         *ptrp = ptr;
3656         return 0;
3657 }
3658
3659 static int binder_wait_for_work(struct binder_thread *thread,
3660                                 bool do_proc_work)
3661 {
3662         DEFINE_WAIT(wait);
3663         struct binder_proc *proc = thread->proc;
3664         int ret = 0;
3665
3666         freezer_do_not_count();
3667         binder_inner_proc_lock(proc);
3668         for (;;) {
3669                 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3670                 if (binder_has_work_ilocked(thread, do_proc_work))
3671                         break;
3672                 if (do_proc_work)
3673                         list_add(&thread->waiting_thread_node,
3674                                  &proc->waiting_threads);
3675                 binder_inner_proc_unlock(proc);
3676                 schedule();
3677                 binder_inner_proc_lock(proc);
3678                 list_del_init(&thread->waiting_thread_node);
3679                 if (signal_pending(current)) {
3680                         ret = -ERESTARTSYS;
3681                         break;
3682                 }
3683         }
3684         finish_wait(&thread->wait, &wait);
3685         binder_inner_proc_unlock(proc);
3686         freezer_count();
3687
3688         return ret;
3689 }
3690
3691 /**
3692  * binder_apply_fd_fixups() - finish fd translation
3693  * @proc:         binder_proc associated @t->buffer
3694  * @t:  binder transaction with list of fd fixups
3695  *
3696  * Now that we are in the context of the transaction target
3697  * process, we can allocate and install fds. Process the
3698  * list of fds to translate and fixup the buffer with the
3699  * new fds.
3700  *
3701  * If we fail to allocate an fd, then free the resources by
3702  * fput'ing files that have not been processed and ksys_close'ing
3703  * any fds that have already been allocated.
3704  */
3705 static int binder_apply_fd_fixups(struct binder_proc *proc,
3706                                   struct binder_transaction *t)
3707 {
3708         struct binder_txn_fd_fixup *fixup, *tmp;
3709         int ret = 0;
3710
3711         list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3712                 int fd = get_unused_fd_flags(O_CLOEXEC);
3713
3714                 if (fd < 0) {
3715                         binder_debug(BINDER_DEBUG_TRANSACTION,
3716                                      "failed fd fixup txn %d fd %d\n",
3717                                      t->debug_id, fd);
3718                         ret = -ENOMEM;
3719                         break;
3720                 }
3721                 binder_debug(BINDER_DEBUG_TRANSACTION,
3722                              "fd fixup txn %d fd %d\n",
3723                              t->debug_id, fd);
3724                 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3725                 fd_install(fd, fixup->file);
3726                 fixup->file = NULL;
3727                 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
3728                                                 fixup->offset, &fd,
3729                                                 sizeof(u32))) {
3730                         ret = -EINVAL;
3731                         break;
3732                 }
3733         }
3734         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3735                 if (fixup->file) {
3736                         fput(fixup->file);
3737                 } else if (ret) {
3738                         u32 fd;
3739                         int err;
3740
3741                         err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
3742                                                             t->buffer,
3743                                                             fixup->offset,
3744                                                             sizeof(fd));
3745                         WARN_ON(err);
3746                         if (!err)
3747                                 binder_deferred_fd_close(fd);
3748                 }
3749                 list_del(&fixup->fixup_entry);
3750                 kfree(fixup);
3751         }
3752
3753         return ret;
3754 }
3755
3756 static int binder_thread_read(struct binder_proc *proc,
3757                               struct binder_thread *thread,
3758                               binder_uintptr_t binder_buffer, size_t size,
3759                               binder_size_t *consumed, int non_block)
3760 {
3761         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3762         void __user *ptr = buffer + *consumed;
3763         void __user *end = buffer + size;
3764
3765         int ret = 0;
3766         int wait_for_proc_work;
3767
3768         if (*consumed == 0) {
3769                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3770                         return -EFAULT;
3771                 ptr += sizeof(uint32_t);
3772         }
3773
3774 retry:
3775         binder_inner_proc_lock(proc);
3776         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3777         binder_inner_proc_unlock(proc);
3778
3779         thread->looper |= BINDER_LOOPER_STATE_WAITING;
3780
3781         trace_binder_wait_for_work(wait_for_proc_work,
3782                                    !!thread->transaction_stack,
3783                                    !binder_worklist_empty(proc, &thread->todo));
3784         if (wait_for_proc_work) {
3785                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3786                                         BINDER_LOOPER_STATE_ENTERED))) {
3787                         binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3788                                 proc->pid, thread->pid, thread->looper);
3789                         wait_event_interruptible(binder_user_error_wait,
3790                                                  binder_stop_on_user_error < 2);
3791                 }
3792                 binder_set_nice(proc->default_priority);
3793         }
3794
3795         if (non_block) {
3796                 if (!binder_has_work(thread, wait_for_proc_work))
3797                         ret = -EAGAIN;
3798         } else {
3799                 ret = binder_wait_for_work(thread, wait_for_proc_work);
3800         }
3801
3802         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3803
3804         if (ret)
3805                 return ret;
3806
3807         while (1) {
3808                 uint32_t cmd;
3809                 struct binder_transaction_data_secctx tr;
3810                 struct binder_transaction_data *trd = &tr.transaction_data;
3811                 struct binder_work *w = NULL;
3812                 struct list_head *list = NULL;
3813                 struct binder_transaction *t = NULL;
3814                 struct binder_thread *t_from;
3815                 size_t trsize = sizeof(*trd);
3816
3817                 binder_inner_proc_lock(proc);
3818                 if (!binder_worklist_empty_ilocked(&thread->todo))
3819                         list = &thread->todo;
3820                 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3821                            wait_for_proc_work)
3822                         list = &proc->todo;
3823                 else {
3824                         binder_inner_proc_unlock(proc);
3825
3826                         /* no data added */
3827                         if (ptr - buffer == 4 && !thread->looper_need_return)
3828                                 goto retry;
3829                         break;
3830                 }
3831
3832                 if (end - ptr < sizeof(tr) + 4) {
3833                         binder_inner_proc_unlock(proc);
3834                         break;
3835                 }
3836                 w = binder_dequeue_work_head_ilocked(list);
3837                 if (binder_worklist_empty_ilocked(&thread->todo))
3838                         thread->process_todo = false;
3839
3840                 switch (w->type) {
3841                 case BINDER_WORK_TRANSACTION: {
3842                         binder_inner_proc_unlock(proc);
3843                         t = container_of(w, struct binder_transaction, work);
3844                 } break;
3845                 case BINDER_WORK_RETURN_ERROR: {
3846                         struct binder_error *e = container_of(
3847                                         w, struct binder_error, work);
3848
3849                         WARN_ON(e->cmd == BR_OK);
3850                         binder_inner_proc_unlock(proc);
3851                         if (put_user(e->cmd, (uint32_t __user *)ptr))
3852                                 return -EFAULT;
3853                         cmd = e->cmd;
3854                         e->cmd = BR_OK;
3855                         ptr += sizeof(uint32_t);
3856
3857                         binder_stat_br(proc, thread, cmd);
3858                 } break;
3859                 case BINDER_WORK_TRANSACTION_COMPLETE: {
3860                         binder_inner_proc_unlock(proc);
3861                         cmd = BR_TRANSACTION_COMPLETE;
3862                         kfree(w);
3863                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3864                         if (put_user(cmd, (uint32_t __user *)ptr))
3865                                 return -EFAULT;
3866                         ptr += sizeof(uint32_t);
3867
3868                         binder_stat_br(proc, thread, cmd);
3869                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3870                                      "%d:%d BR_TRANSACTION_COMPLETE\n",
3871                                      proc->pid, thread->pid);
3872                 } break;
3873                 case BINDER_WORK_NODE: {
3874                         struct binder_node *node = container_of(w, struct binder_node, work);
3875                         int strong, weak;
3876                         binder_uintptr_t node_ptr = node->ptr;
3877                         binder_uintptr_t node_cookie = node->cookie;
3878                         int node_debug_id = node->debug_id;
3879                         int has_weak_ref;
3880                         int has_strong_ref;
3881                         void __user *orig_ptr = ptr;
3882
3883                         BUG_ON(proc != node->proc);
3884                         strong = node->internal_strong_refs ||
3885                                         node->local_strong_refs;
3886                         weak = !hlist_empty(&node->refs) ||
3887                                         node->local_weak_refs ||
3888                                         node->tmp_refs || strong;
3889                         has_strong_ref = node->has_strong_ref;
3890                         has_weak_ref = node->has_weak_ref;
3891
3892                         if (weak && !has_weak_ref) {
3893                                 node->has_weak_ref = 1;
3894                                 node->pending_weak_ref = 1;
3895                                 node->local_weak_refs++;
3896                         }
3897                         if (strong && !has_strong_ref) {
3898                                 node->has_strong_ref = 1;
3899                                 node->pending_strong_ref = 1;
3900                                 node->local_strong_refs++;
3901                         }
3902                         if (!strong && has_strong_ref)
3903                                 node->has_strong_ref = 0;
3904                         if (!weak && has_weak_ref)
3905                                 node->has_weak_ref = 0;
3906                         if (!weak && !strong) {
3907                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3908                                              "%d:%d node %d u%016llx c%016llx deleted\n",
3909                                              proc->pid, thread->pid,
3910                                              node_debug_id,
3911                                              (u64)node_ptr,
3912                                              (u64)node_cookie);
3913                                 rb_erase(&node->rb_node, &proc->nodes);
3914                                 binder_inner_proc_unlock(proc);
3915                                 binder_node_lock(node);
3916                                 /*
3917                                  * Acquire the node lock before freeing the
3918                                  * node to serialize with other threads that
3919                                  * may have been holding the node lock while
3920                                  * decrementing this node (avoids race where
3921                                  * this thread frees while the other thread
3922                                  * is unlocking the node after the final
3923                                  * decrement)
3924                                  */
3925                                 binder_node_unlock(node);
3926                                 binder_free_node(node);
3927                         } else
3928                                 binder_inner_proc_unlock(proc);
3929
3930                         if (weak && !has_weak_ref)
3931                                 ret = binder_put_node_cmd(
3932                                                 proc, thread, &ptr, node_ptr,
3933                                                 node_cookie, node_debug_id,
3934                                                 BR_INCREFS, "BR_INCREFS");
3935                         if (!ret && strong && !has_strong_ref)
3936                                 ret = binder_put_node_cmd(
3937                                                 proc, thread, &ptr, node_ptr,
3938                                                 node_cookie, node_debug_id,
3939                                                 BR_ACQUIRE, "BR_ACQUIRE");
3940                         if (!ret && !strong && has_strong_ref)
3941                                 ret = binder_put_node_cmd(
3942                                                 proc, thread, &ptr, node_ptr,
3943                                                 node_cookie, node_debug_id,
3944                                                 BR_RELEASE, "BR_RELEASE");
3945                         if (!ret && !weak && has_weak_ref)
3946                                 ret = binder_put_node_cmd(
3947                                                 proc, thread, &ptr, node_ptr,
3948                                                 node_cookie, node_debug_id,
3949                                                 BR_DECREFS, "BR_DECREFS");
3950                         if (orig_ptr == ptr)
3951                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3952                                              "%d:%d node %d u%016llx c%016llx state unchanged\n",
3953                                              proc->pid, thread->pid,
3954                                              node_debug_id,
3955                                              (u64)node_ptr,
3956                                              (u64)node_cookie);
3957                         if (ret)
3958                                 return ret;
3959                 } break;
3960                 case BINDER_WORK_DEAD_BINDER:
3961                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3962                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3963                         struct binder_ref_death *death;
3964                         uint32_t cmd;
3965                         binder_uintptr_t cookie;
3966
3967                         death = container_of(w, struct binder_ref_death, work);
3968                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3969                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3970                         else
3971                                 cmd = BR_DEAD_BINDER;
3972                         cookie = death->cookie;
3973
3974                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3975                                      "%d:%d %s %016llx\n",
3976                                       proc->pid, thread->pid,
3977                                       cmd == BR_DEAD_BINDER ?
3978                                       "BR_DEAD_BINDER" :
3979                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3980                                       (u64)cookie);
3981                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
3982                                 binder_inner_proc_unlock(proc);
3983                                 kfree(death);
3984                                 binder_stats_deleted(BINDER_STAT_DEATH);
3985                         } else {
3986                                 binder_enqueue_work_ilocked(
3987                                                 w, &proc->delivered_death);
3988                                 binder_inner_proc_unlock(proc);
3989                         }
3990                         if (put_user(cmd, (uint32_t __user *)ptr))
3991                                 return -EFAULT;
3992                         ptr += sizeof(uint32_t);
3993                         if (put_user(cookie,
3994                                      (binder_uintptr_t __user *)ptr))
3995                                 return -EFAULT;
3996                         ptr += sizeof(binder_uintptr_t);
3997                         binder_stat_br(proc, thread, cmd);
3998                         if (cmd == BR_DEAD_BINDER)
3999                                 goto done; /* DEAD_BINDER notifications can cause transactions */
4000                 } break;
4001                 default:
4002                         binder_inner_proc_unlock(proc);
4003                         pr_err("%d:%d: bad work type %d\n",
4004                                proc->pid, thread->pid, w->type);
4005                         break;
4006                 }
4007
4008                 if (!t)
4009                         continue;
4010
4011                 BUG_ON(t->buffer == NULL);
4012                 if (t->buffer->target_node) {
4013                         struct binder_node *target_node = t->buffer->target_node;
4014
4015                         trd->target.ptr = target_node->ptr;
4016                         trd->cookie =  target_node->cookie;
4017                         t->saved_priority = task_nice(current);
4018                         if (t->priority < target_node->min_priority &&
4019                             !(t->flags & TF_ONE_WAY))
4020                                 binder_set_nice(t->priority);
4021                         else if (!(t->flags & TF_ONE_WAY) ||
4022                                  t->saved_priority > target_node->min_priority)
4023                                 binder_set_nice(target_node->min_priority);
4024                         cmd = BR_TRANSACTION;
4025                 } else {
4026                         trd->target.ptr = 0;
4027                         trd->cookie = 0;
4028                         cmd = BR_REPLY;
4029                 }
4030                 trd->code = t->code;
4031                 trd->flags = t->flags;
4032                 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4033
4034                 t_from = binder_get_txn_from(t);
4035                 if (t_from) {
4036                         struct task_struct *sender = t_from->proc->tsk;
4037
4038                         trd->sender_pid =
4039                                 task_tgid_nr_ns(sender,
4040                                                 task_active_pid_ns(current));
4041                 } else {
4042                         trd->sender_pid = 0;
4043                 }
4044
4045                 ret = binder_apply_fd_fixups(proc, t);
4046                 if (ret) {
4047                         struct binder_buffer *buffer = t->buffer;
4048                         bool oneway = !!(t->flags & TF_ONE_WAY);
4049                         int tid = t->debug_id;
4050
4051                         if (t_from)
4052                                 binder_thread_dec_tmpref(t_from);
4053                         buffer->transaction = NULL;
4054                         binder_cleanup_transaction(t, "fd fixups failed",
4055                                                    BR_FAILED_REPLY);
4056                         binder_free_buf(proc, buffer);
4057                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4058                                      "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4059                                      proc->pid, thread->pid,
4060                                      oneway ? "async " :
4061                                         (cmd == BR_REPLY ? "reply " : ""),
4062                                      tid, BR_FAILED_REPLY, ret, __LINE__);
4063                         if (cmd == BR_REPLY) {
4064                                 cmd = BR_FAILED_REPLY;
4065                                 if (put_user(cmd, (uint32_t __user *)ptr))
4066                                         return -EFAULT;
4067                                 ptr += sizeof(uint32_t);
4068                                 binder_stat_br(proc, thread, cmd);
4069                                 break;
4070                         }
4071                         continue;
4072                 }
4073                 trd->data_size = t->buffer->data_size;
4074                 trd->offsets_size = t->buffer->offsets_size;
4075                 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4076                 trd->data.ptr.offsets = trd->data.ptr.buffer +
4077                                         ALIGN(t->buffer->data_size,
4078                                             sizeof(void *));
4079
4080                 tr.secctx = t->security_ctx;
4081                 if (t->security_ctx) {
4082                         cmd = BR_TRANSACTION_SEC_CTX;
4083                         trsize = sizeof(tr);
4084                 }
4085                 if (put_user(cmd, (uint32_t __user *)ptr)) {
4086                         if (t_from)
4087                                 binder_thread_dec_tmpref(t_from);
4088
4089                         binder_cleanup_transaction(t, "put_user failed",
4090                                                    BR_FAILED_REPLY);
4091
4092                         return -EFAULT;
4093                 }
4094                 ptr += sizeof(uint32_t);
4095                 if (copy_to_user(ptr, &tr, trsize)) {
4096                         if (t_from)
4097                                 binder_thread_dec_tmpref(t_from);
4098
4099                         binder_cleanup_transaction(t, "copy_to_user failed",
4100                                                    BR_FAILED_REPLY);
4101
4102                         return -EFAULT;
4103                 }
4104                 ptr += trsize;
4105
4106                 trace_binder_transaction_received(t);
4107                 binder_stat_br(proc, thread, cmd);
4108                 binder_debug(BINDER_DEBUG_TRANSACTION,
4109                              "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4110                              proc->pid, thread->pid,
4111                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4112                                 (cmd == BR_TRANSACTION_SEC_CTX) ?
4113                                      "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4114                              t->debug_id, t_from ? t_from->proc->pid : 0,
4115                              t_from ? t_from->pid : 0, cmd,
4116                              t->buffer->data_size, t->buffer->offsets_size,
4117                              (u64)trd->data.ptr.buffer,
4118                              (u64)trd->data.ptr.offsets);
4119
4120                 if (t_from)
4121                         binder_thread_dec_tmpref(t_from);
4122                 t->buffer->allow_user_free = 1;
4123                 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4124                         binder_inner_proc_lock(thread->proc);
4125                         t->to_parent = thread->transaction_stack;
4126                         t->to_thread = thread;
4127                         thread->transaction_stack = t;
4128                         binder_inner_proc_unlock(thread->proc);
4129                 } else {
4130                         binder_free_transaction(t);
4131                 }
4132                 break;
4133         }
4134
4135 done:
4136
4137         *consumed = ptr - buffer;
4138         binder_inner_proc_lock(proc);
4139         if (proc->requested_threads == 0 &&
4140             list_empty(&thread->proc->waiting_threads) &&
4141             proc->requested_threads_started < proc->max_threads &&
4142             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4143              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4144              /*spawn a new thread if we leave this out */) {
4145                 proc->requested_threads++;
4146                 binder_inner_proc_unlock(proc);
4147                 binder_debug(BINDER_DEBUG_THREADS,
4148                              "%d:%d BR_SPAWN_LOOPER\n",
4149                              proc->pid, thread->pid);
4150                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4151                         return -EFAULT;
4152                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4153         } else
4154                 binder_inner_proc_unlock(proc);
4155         return 0;
4156 }
4157
4158 static void binder_release_work(struct binder_proc *proc,
4159                                 struct list_head *list)
4160 {
4161         struct binder_work *w;
4162         enum binder_work_type wtype;
4163
4164         while (1) {
4165                 binder_inner_proc_lock(proc);
4166                 w = binder_dequeue_work_head_ilocked(list);
4167                 wtype = w ? w->type : 0;
4168                 binder_inner_proc_unlock(proc);
4169                 if (!w)
4170                         return;
4171
4172                 switch (wtype) {
4173                 case BINDER_WORK_TRANSACTION: {
4174                         struct binder_transaction *t;
4175
4176                         t = container_of(w, struct binder_transaction, work);
4177
4178                         binder_cleanup_transaction(t, "process died.",
4179                                                    BR_DEAD_REPLY);
4180                 } break;
4181                 case BINDER_WORK_RETURN_ERROR: {
4182                         struct binder_error *e = container_of(
4183                                         w, struct binder_error, work);
4184
4185                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4186                                 "undelivered TRANSACTION_ERROR: %u\n",
4187                                 e->cmd);
4188                 } break;
4189                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4190                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4191                                 "undelivered TRANSACTION_COMPLETE\n");
4192                         kfree(w);
4193                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4194                 } break;
4195                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4196                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4197                         struct binder_ref_death *death;
4198
4199                         death = container_of(w, struct binder_ref_death, work);
4200                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4201                                 "undelivered death notification, %016llx\n",
4202                                 (u64)death->cookie);
4203                         kfree(death);
4204                         binder_stats_deleted(BINDER_STAT_DEATH);
4205                 } break;
4206                 case BINDER_WORK_NODE:
4207                         break;
4208                 default:
4209                         pr_err("unexpected work type, %d, not freed\n",
4210                                wtype);
4211                         break;
4212                 }
4213         }
4214
4215 }
4216
4217 static struct binder_thread *binder_get_thread_ilocked(
4218                 struct binder_proc *proc, struct binder_thread *new_thread)
4219 {
4220         struct binder_thread *thread = NULL;
4221         struct rb_node *parent = NULL;
4222         struct rb_node **p = &proc->threads.rb_node;
4223
4224         while (*p) {
4225                 parent = *p;
4226                 thread = rb_entry(parent, struct binder_thread, rb_node);
4227
4228                 if (current->pid < thread->pid)
4229                         p = &(*p)->rb_left;
4230                 else if (current->pid > thread->pid)
4231                         p = &(*p)->rb_right;
4232                 else
4233                         return thread;
4234         }
4235         if (!new_thread)
4236                 return NULL;
4237         thread = new_thread;
4238         binder_stats_created(BINDER_STAT_THREAD);
4239         thread->proc = proc;
4240         thread->pid = current->pid;
4241         atomic_set(&thread->tmp_ref, 0);
4242         init_waitqueue_head(&thread->wait);
4243         INIT_LIST_HEAD(&thread->todo);
4244         rb_link_node(&thread->rb_node, parent, p);
4245         rb_insert_color(&thread->rb_node, &proc->threads);
4246         thread->looper_need_return = true;
4247         thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4248         thread->return_error.cmd = BR_OK;
4249         thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4250         thread->reply_error.cmd = BR_OK;
4251         INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4252         return thread;
4253 }
4254
4255 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4256 {
4257         struct binder_thread *thread;
4258         struct binder_thread *new_thread;
4259
4260         binder_inner_proc_lock(proc);
4261         thread = binder_get_thread_ilocked(proc, NULL);
4262         binder_inner_proc_unlock(proc);
4263         if (!thread) {
4264                 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4265                 if (new_thread == NULL)
4266                         return NULL;
4267                 binder_inner_proc_lock(proc);
4268                 thread = binder_get_thread_ilocked(proc, new_thread);
4269                 binder_inner_proc_unlock(proc);
4270                 if (thread != new_thread)
4271                         kfree(new_thread);
4272         }
4273         return thread;
4274 }
4275
4276 static void binder_free_proc(struct binder_proc *proc)
4277 {
4278         struct binder_device *device;
4279
4280         BUG_ON(!list_empty(&proc->todo));
4281         BUG_ON(!list_empty(&proc->delivered_death));
4282         device = container_of(proc->context, struct binder_device, context);
4283         if (refcount_dec_and_test(&device->ref)) {
4284                 kfree(proc->context->name);
4285                 kfree(device);
4286         }
4287         binder_alloc_deferred_release(&proc->alloc);
4288         put_task_struct(proc->tsk);
4289         binder_stats_deleted(BINDER_STAT_PROC);
4290         kfree(proc);
4291 }
4292
4293 static void binder_free_thread(struct binder_thread *thread)
4294 {
4295         BUG_ON(!list_empty(&thread->todo));
4296         binder_stats_deleted(BINDER_STAT_THREAD);
4297         binder_proc_dec_tmpref(thread->proc);
4298         kfree(thread);
4299 }
4300
4301 static int binder_thread_release(struct binder_proc *proc,
4302                                  struct binder_thread *thread)
4303 {
4304         struct binder_transaction *t;
4305         struct binder_transaction *send_reply = NULL;
4306         int active_transactions = 0;
4307         struct binder_transaction *last_t = NULL;
4308
4309         binder_inner_proc_lock(thread->proc);
4310         /*
4311          * take a ref on the proc so it survives
4312          * after we remove this thread from proc->threads.
4313          * The corresponding dec is when we actually
4314          * free the thread in binder_free_thread()
4315          */
4316         proc->tmp_ref++;
4317         /*
4318          * take a ref on this thread to ensure it
4319          * survives while we are releasing it
4320          */
4321         atomic_inc(&thread->tmp_ref);
4322         rb_erase(&thread->rb_node, &proc->threads);
4323         t = thread->transaction_stack;
4324         if (t) {
4325                 spin_lock(&t->lock);
4326                 if (t->to_thread == thread)
4327                         send_reply = t;
4328         } else {
4329                 __acquire(&t->lock);
4330         }
4331         thread->is_dead = true;
4332
4333         while (t) {
4334                 last_t = t;
4335                 active_transactions++;
4336                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4337                              "release %d:%d transaction %d %s, still active\n",
4338                               proc->pid, thread->pid,
4339                              t->debug_id,
4340                              (t->to_thread == thread) ? "in" : "out");
4341
4342                 if (t->to_thread == thread) {
4343                         t->to_proc = NULL;
4344                         t->to_thread = NULL;
4345                         if (t->buffer) {
4346                                 t->buffer->transaction = NULL;
4347                                 t->buffer = NULL;
4348                         }
4349                         t = t->to_parent;
4350                 } else if (t->from == thread) {
4351                         t->from = NULL;
4352                         t = t->from_parent;
4353                 } else
4354                         BUG();
4355                 spin_unlock(&last_t->lock);
4356                 if (t)
4357                         spin_lock(&t->lock);
4358                 else
4359                         __acquire(&t->lock);
4360         }
4361         /* annotation for sparse, lock not acquired in last iteration above */
4362         __release(&t->lock);
4363
4364         /*
4365          * If this thread used poll, make sure we remove the waitqueue
4366          * from any epoll data structures holding it with POLLFREE.
4367          * waitqueue_active() is safe to use here because we're holding
4368          * the inner lock.
4369          */
4370         if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4371             waitqueue_active(&thread->wait)) {
4372                 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4373         }
4374
4375         binder_inner_proc_unlock(thread->proc);
4376
4377         /*
4378          * This is needed to avoid races between wake_up_poll() above and
4379          * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4380          * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4381          * lock, so we can be sure it's done after calling synchronize_rcu().
4382          */
4383         if (thread->looper & BINDER_LOOPER_STATE_POLL)
4384                 synchronize_rcu();
4385
4386         if (send_reply)
4387                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4388         binder_release_work(proc, &thread->todo);
4389         binder_thread_dec_tmpref(thread);
4390         return active_transactions;
4391 }
4392
4393 static __poll_t binder_poll(struct file *filp,
4394                                 struct poll_table_struct *wait)
4395 {
4396         struct binder_proc *proc = filp->private_data;
4397         struct binder_thread *thread = NULL;
4398         bool wait_for_proc_work;
4399
4400         thread = binder_get_thread(proc);
4401         if (!thread)
4402                 return POLLERR;
4403
4404         binder_inner_proc_lock(thread->proc);
4405         thread->looper |= BINDER_LOOPER_STATE_POLL;
4406         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4407
4408         binder_inner_proc_unlock(thread->proc);
4409
4410         poll_wait(filp, &thread->wait, wait);
4411
4412         if (binder_has_work(thread, wait_for_proc_work))
4413                 return EPOLLIN;
4414
4415         return 0;
4416 }
4417
4418 static int binder_ioctl_write_read(struct file *filp,
4419                                 unsigned int cmd, unsigned long arg,
4420                                 struct binder_thread *thread)
4421 {
4422         int ret = 0;
4423         struct binder_proc *proc = filp->private_data;
4424         unsigned int size = _IOC_SIZE(cmd);
4425         void __user *ubuf = (void __user *)arg;
4426         struct binder_write_read bwr;
4427
4428         if (size != sizeof(struct binder_write_read)) {
4429                 ret = -EINVAL;
4430                 goto out;
4431         }
4432         if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4433                 ret = -EFAULT;
4434                 goto out;
4435         }
4436         binder_debug(BINDER_DEBUG_READ_WRITE,
4437                      "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4438                      proc->pid, thread->pid,
4439                      (u64)bwr.write_size, (u64)bwr.write_buffer,
4440                      (u64)bwr.read_size, (u64)bwr.read_buffer);
4441
4442         if (bwr.write_size > 0) {
4443                 ret = binder_thread_write(proc, thread,
4444                                           bwr.write_buffer,
4445                                           bwr.write_size,
4446                                           &bwr.write_consumed);
4447                 trace_binder_write_done(ret);
4448                 if (ret < 0) {
4449                         bwr.read_consumed = 0;
4450                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4451                                 ret = -EFAULT;
4452                         goto out;
4453                 }
4454         }
4455         if (bwr.read_size > 0) {
4456                 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4457                                          bwr.read_size,
4458                                          &bwr.read_consumed,
4459                                          filp->f_flags & O_NONBLOCK);
4460                 trace_binder_read_done(ret);
4461                 binder_inner_proc_lock(proc);
4462                 if (!binder_worklist_empty_ilocked(&proc->todo))
4463                         binder_wakeup_proc_ilocked(proc);
4464                 binder_inner_proc_unlock(proc);
4465                 if (ret < 0) {
4466                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4467                                 ret = -EFAULT;
4468                         goto out;
4469                 }
4470         }
4471         binder_debug(BINDER_DEBUG_READ_WRITE,
4472                      "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4473                      proc->pid, thread->pid,
4474                      (u64)bwr.write_consumed, (u64)bwr.write_size,
4475                      (u64)bwr.read_consumed, (u64)bwr.read_size);
4476         if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4477                 ret = -EFAULT;
4478                 goto out;
4479         }
4480 out:
4481         return ret;
4482 }
4483
4484 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4485                                     struct flat_binder_object *fbo)
4486 {
4487         int ret = 0;
4488         struct binder_proc *proc = filp->private_data;
4489         struct binder_context *context = proc->context;
4490         struct binder_node *new_node;
4491         kuid_t curr_euid = current_euid();
4492
4493         mutex_lock(&context->context_mgr_node_lock);
4494         if (context->binder_context_mgr_node) {
4495                 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4496                 ret = -EBUSY;
4497                 goto out;
4498         }
4499         ret = security_binder_set_context_mgr(proc->tsk);
4500         if (ret < 0)
4501                 goto out;
4502         if (uid_valid(context->binder_context_mgr_uid)) {
4503                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4504                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4505                                from_kuid(&init_user_ns, curr_euid),
4506                                from_kuid(&init_user_ns,
4507                                          context->binder_context_mgr_uid));
4508                         ret = -EPERM;
4509                         goto out;
4510                 }
4511         } else {
4512                 context->binder_context_mgr_uid = curr_euid;
4513         }
4514         new_node = binder_new_node(proc, fbo);
4515         if (!new_node) {
4516                 ret = -ENOMEM;
4517                 goto out;
4518         }
4519         binder_node_lock(new_node);
4520         new_node->local_weak_refs++;
4521         new_node->local_strong_refs++;
4522         new_node->has_strong_ref = 1;
4523         new_node->has_weak_ref = 1;
4524         context->binder_context_mgr_node = new_node;
4525         binder_node_unlock(new_node);
4526         binder_put_node(new_node);
4527 out:
4528         mutex_unlock(&context->context_mgr_node_lock);
4529         return ret;
4530 }
4531
4532 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4533                 struct binder_node_info_for_ref *info)
4534 {
4535         struct binder_node *node;
4536         struct binder_context *context = proc->context;
4537         __u32 handle = info->handle;
4538
4539         if (info->strong_count || info->weak_count || info->reserved1 ||
4540             info->reserved2 || info->reserved3) {
4541                 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4542                                   proc->pid);
4543                 return -EINVAL;
4544         }
4545
4546         /* This ioctl may only be used by the context manager */
4547         mutex_lock(&context->context_mgr_node_lock);
4548         if (!context->binder_context_mgr_node ||
4549                 context->binder_context_mgr_node->proc != proc) {
4550                 mutex_unlock(&context->context_mgr_node_lock);
4551                 return -EPERM;
4552         }
4553         mutex_unlock(&context->context_mgr_node_lock);
4554
4555         node = binder_get_node_from_ref(proc, handle, true, NULL);
4556         if (!node)
4557                 return -EINVAL;
4558
4559         info->strong_count = node->local_strong_refs +
4560                 node->internal_strong_refs;
4561         info->weak_count = node->local_weak_refs;
4562
4563         binder_put_node(node);
4564
4565         return 0;
4566 }
4567
4568 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4569                                 struct binder_node_debug_info *info)
4570 {
4571         struct rb_node *n;
4572         binder_uintptr_t ptr = info->ptr;
4573
4574         memset(info, 0, sizeof(*info));
4575
4576         binder_inner_proc_lock(proc);
4577         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4578                 struct binder_node *node = rb_entry(n, struct binder_node,
4579                                                     rb_node);
4580                 if (node->ptr > ptr) {
4581                         info->ptr = node->ptr;
4582                         info->cookie = node->cookie;
4583                         info->has_strong_ref = node->has_strong_ref;
4584                         info->has_weak_ref = node->has_weak_ref;
4585                         break;
4586                 }
4587         }
4588         binder_inner_proc_unlock(proc);
4589
4590         return 0;
4591 }
4592
4593 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4594 {
4595         int ret;
4596         struct binder_proc *proc = filp->private_data;
4597         struct binder_thread *thread;
4598         unsigned int size = _IOC_SIZE(cmd);
4599         void __user *ubuf = (void __user *)arg;
4600
4601         /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4602                         proc->pid, current->pid, cmd, arg);*/
4603
4604         binder_selftest_alloc(&proc->alloc);
4605
4606         trace_binder_ioctl(cmd, arg);
4607
4608         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4609         if (ret)
4610                 goto err_unlocked;
4611
4612         thread = binder_get_thread(proc);
4613         if (thread == NULL) {
4614                 ret = -ENOMEM;
4615                 goto err;
4616         }
4617
4618         switch (cmd) {
4619         case BINDER_WRITE_READ:
4620                 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4621                 if (ret)
4622                         goto err;
4623                 break;
4624         case BINDER_SET_MAX_THREADS: {
4625                 int max_threads;
4626
4627                 if (copy_from_user(&max_threads, ubuf,
4628                                    sizeof(max_threads))) {
4629                         ret = -EINVAL;
4630                         goto err;
4631                 }
4632                 binder_inner_proc_lock(proc);
4633                 proc->max_threads = max_threads;
4634                 binder_inner_proc_unlock(proc);
4635                 break;
4636         }
4637         case BINDER_SET_CONTEXT_MGR_EXT: {
4638                 struct flat_binder_object fbo;
4639
4640                 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4641                         ret = -EINVAL;
4642                         goto err;
4643                 }
4644                 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4645                 if (ret)
4646                         goto err;
4647                 break;
4648         }
4649         case BINDER_SET_CONTEXT_MGR:
4650                 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4651                 if (ret)
4652                         goto err;
4653                 break;
4654         case BINDER_THREAD_EXIT:
4655                 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4656                              proc->pid, thread->pid);
4657                 binder_thread_release(proc, thread);
4658                 thread = NULL;
4659                 break;
4660         case BINDER_VERSION: {
4661                 struct binder_version __user *ver = ubuf;
4662
4663                 if (size != sizeof(struct binder_version)) {
4664                         ret = -EINVAL;
4665                         goto err;
4666                 }
4667                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4668                              &ver->protocol_version)) {
4669                         ret = -EINVAL;
4670                         goto err;
4671                 }
4672                 break;
4673         }
4674         case BINDER_GET_NODE_INFO_FOR_REF: {
4675                 struct binder_node_info_for_ref info;
4676
4677                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4678                         ret = -EFAULT;
4679                         goto err;
4680                 }
4681
4682                 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4683                 if (ret < 0)
4684                         goto err;
4685
4686                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4687                         ret = -EFAULT;
4688                         goto err;
4689                 }
4690
4691                 break;
4692         }
4693         case BINDER_GET_NODE_DEBUG_INFO: {
4694                 struct binder_node_debug_info info;
4695
4696                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4697                         ret = -EFAULT;
4698                         goto err;
4699                 }
4700
4701                 ret = binder_ioctl_get_node_debug_info(proc, &info);
4702                 if (ret < 0)
4703                         goto err;
4704
4705                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4706                         ret = -EFAULT;
4707                         goto err;
4708                 }
4709                 break;
4710         }
4711         default:
4712                 ret = -EINVAL;
4713                 goto err;
4714         }
4715         ret = 0;
4716 err:
4717         if (thread)
4718                 thread->looper_need_return = false;
4719         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4720         if (ret && ret != -ERESTARTSYS)
4721                 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4722 err_unlocked:
4723         trace_binder_ioctl_done(ret);
4724         return ret;
4725 }
4726
4727 static void binder_vma_open(struct vm_area_struct *vma)
4728 {
4729         struct binder_proc *proc = vma->vm_private_data;
4730
4731         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4732                      "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4733                      proc->pid, vma->vm_start, vma->vm_end,
4734                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4735                      (unsigned long)pgprot_val(vma->vm_page_prot));
4736 }
4737
4738 static void binder_vma_close(struct vm_area_struct *vma)
4739 {
4740         struct binder_proc *proc = vma->vm_private_data;
4741
4742         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4743                      "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4744                      proc->pid, vma->vm_start, vma->vm_end,
4745                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4746                      (unsigned long)pgprot_val(vma->vm_page_prot));
4747         binder_alloc_vma_close(&proc->alloc);
4748 }
4749
4750 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4751 {
4752         return VM_FAULT_SIGBUS;
4753 }
4754
4755 static const struct vm_operations_struct binder_vm_ops = {
4756         .open = binder_vma_open,
4757         .close = binder_vma_close,
4758         .fault = binder_vm_fault,
4759 };
4760
4761 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4762 {
4763         struct binder_proc *proc = filp->private_data;
4764
4765         if (proc->tsk != current->group_leader)
4766                 return -EINVAL;
4767
4768         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4769                      "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4770                      __func__, proc->pid, vma->vm_start, vma->vm_end,
4771                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4772                      (unsigned long)pgprot_val(vma->vm_page_prot));
4773
4774         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4775                 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
4776                        proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
4777                 return -EPERM;
4778         }
4779         vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4780         vma->vm_flags &= ~VM_MAYWRITE;
4781
4782         vma->vm_ops = &binder_vm_ops;
4783         vma->vm_private_data = proc;
4784
4785         return binder_alloc_mmap_handler(&proc->alloc, vma);
4786 }
4787
4788 static int binder_open(struct inode *nodp, struct file *filp)
4789 {
4790         struct binder_proc *proc, *itr;
4791         struct binder_device *binder_dev;
4792         struct binderfs_info *info;
4793         struct dentry *binder_binderfs_dir_entry_proc = NULL;
4794         bool existing_pid = false;
4795
4796         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
4797                      current->group_leader->pid, current->pid);
4798
4799         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4800         if (proc == NULL)
4801                 return -ENOMEM;
4802         spin_lock_init(&proc->inner_lock);
4803         spin_lock_init(&proc->outer_lock);
4804         get_task_struct(current->group_leader);
4805         proc->tsk = current->group_leader;
4806         INIT_LIST_HEAD(&proc->todo);
4807         proc->default_priority = task_nice(current);
4808         /* binderfs stashes devices in i_private */
4809         if (is_binderfs_device(nodp)) {
4810                 binder_dev = nodp->i_private;
4811                 info = nodp->i_sb->s_fs_info;
4812                 binder_binderfs_dir_entry_proc = info->proc_log_dir;
4813         } else {
4814                 binder_dev = container_of(filp->private_data,
4815                                           struct binder_device, miscdev);
4816         }
4817         refcount_inc(&binder_dev->ref);
4818         proc->context = &binder_dev->context;
4819         binder_alloc_init(&proc->alloc);
4820
4821         binder_stats_created(BINDER_STAT_PROC);
4822         proc->pid = current->group_leader->pid;
4823         INIT_LIST_HEAD(&proc->delivered_death);
4824         INIT_LIST_HEAD(&proc->waiting_threads);
4825         filp->private_data = proc;
4826
4827         mutex_lock(&binder_procs_lock);
4828         hlist_for_each_entry(itr, &binder_procs, proc_node) {
4829                 if (itr->pid == proc->pid) {
4830                         existing_pid = true;
4831                         break;
4832                 }
4833         }
4834         hlist_add_head(&proc->proc_node, &binder_procs);
4835         mutex_unlock(&binder_procs_lock);
4836
4837         if (binder_debugfs_dir_entry_proc && !existing_pid) {
4838                 char strbuf[11];
4839
4840                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4841                 /*
4842                  * proc debug entries are shared between contexts.
4843                  * Only create for the first PID to avoid debugfs log spamming
4844                  * The printing code will anyway print all contexts for a given
4845                  * PID so this is not a problem.
4846                  */
4847                 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
4848                         binder_debugfs_dir_entry_proc,
4849                         (void *)(unsigned long)proc->pid,
4850                         &proc_fops);
4851         }
4852
4853         if (binder_binderfs_dir_entry_proc && !existing_pid) {
4854                 char strbuf[11];
4855                 struct dentry *binderfs_entry;
4856
4857                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4858                 /*
4859                  * Similar to debugfs, the process specific log file is shared
4860                  * between contexts. Only create for the first PID.
4861                  * This is ok since same as debugfs, the log file will contain
4862                  * information on all contexts of a given PID.
4863                  */
4864                 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
4865                         strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
4866                 if (!IS_ERR(binderfs_entry)) {
4867                         proc->binderfs_entry = binderfs_entry;
4868                 } else {
4869                         int error;
4870
4871                         error = PTR_ERR(binderfs_entry);
4872                         pr_warn("Unable to create file %s in binderfs (error %d)\n",
4873                                 strbuf, error);
4874                 }
4875         }
4876
4877         return 0;
4878 }
4879
4880 static int binder_flush(struct file *filp, fl_owner_t id)
4881 {
4882         struct binder_proc *proc = filp->private_data;
4883
4884         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4885
4886         return 0;
4887 }
4888
4889 static void binder_deferred_flush(struct binder_proc *proc)
4890 {
4891         struct rb_node *n;
4892         int wake_count = 0;
4893
4894         binder_inner_proc_lock(proc);
4895         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4896                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4897
4898                 thread->looper_need_return = true;
4899                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4900                         wake_up_interruptible(&thread->wait);
4901                         wake_count++;
4902                 }
4903         }
4904         binder_inner_proc_unlock(proc);
4905
4906         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4907                      "binder_flush: %d woke %d threads\n", proc->pid,
4908                      wake_count);
4909 }
4910
4911 static int binder_release(struct inode *nodp, struct file *filp)
4912 {
4913         struct binder_proc *proc = filp->private_data;
4914
4915         debugfs_remove(proc->debugfs_entry);
4916
4917         if (proc->binderfs_entry) {
4918                 binderfs_remove_file(proc->binderfs_entry);
4919                 proc->binderfs_entry = NULL;
4920         }
4921
4922         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4923
4924         return 0;
4925 }
4926
4927 static int binder_node_release(struct binder_node *node, int refs)
4928 {
4929         struct binder_ref *ref;
4930         int death = 0;
4931         struct binder_proc *proc = node->proc;
4932
4933         binder_release_work(proc, &node->async_todo);
4934
4935         binder_node_lock(node);
4936         binder_inner_proc_lock(proc);
4937         binder_dequeue_work_ilocked(&node->work);
4938         /*
4939          * The caller must have taken a temporary ref on the node,
4940          */
4941         BUG_ON(!node->tmp_refs);
4942         if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4943                 binder_inner_proc_unlock(proc);
4944                 binder_node_unlock(node);
4945                 binder_free_node(node);
4946
4947                 return refs;
4948         }
4949
4950         node->proc = NULL;
4951         node->local_strong_refs = 0;
4952         node->local_weak_refs = 0;
4953         binder_inner_proc_unlock(proc);
4954
4955         spin_lock(&binder_dead_nodes_lock);
4956         hlist_add_head(&node->dead_node, &binder_dead_nodes);
4957         spin_unlock(&binder_dead_nodes_lock);
4958
4959         hlist_for_each_entry(ref, &node->refs, node_entry) {
4960                 refs++;
4961                 /*
4962                  * Need the node lock to synchronize
4963                  * with new notification requests and the
4964                  * inner lock to synchronize with queued
4965                  * death notifications.
4966                  */
4967                 binder_inner_proc_lock(ref->proc);
4968                 if (!ref->death) {
4969                         binder_inner_proc_unlock(ref->proc);
4970                         continue;
4971                 }
4972
4973                 death++;
4974
4975                 BUG_ON(!list_empty(&ref->death->work.entry));
4976                 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4977                 binder_enqueue_work_ilocked(&ref->death->work,
4978                                             &ref->proc->todo);
4979                 binder_wakeup_proc_ilocked(ref->proc);
4980                 binder_inner_proc_unlock(ref->proc);
4981         }
4982
4983         binder_debug(BINDER_DEBUG_DEAD_BINDER,
4984                      "node %d now dead, refs %d, death %d\n",
4985                      node->debug_id, refs, death);
4986         binder_node_unlock(node);
4987         binder_put_node(node);
4988
4989         return refs;
4990 }
4991
4992 static void binder_deferred_release(struct binder_proc *proc)
4993 {
4994         struct binder_context *context = proc->context;
4995         struct rb_node *n;
4996         int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4997
4998         mutex_lock(&binder_procs_lock);
4999         hlist_del(&proc->proc_node);
5000         mutex_unlock(&binder_procs_lock);
5001
5002         mutex_lock(&context->context_mgr_node_lock);
5003         if (context->binder_context_mgr_node &&
5004             context->binder_context_mgr_node->proc == proc) {
5005                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5006                              "%s: %d context_mgr_node gone\n",
5007                              __func__, proc->pid);
5008                 context->binder_context_mgr_node = NULL;
5009         }
5010         mutex_unlock(&context->context_mgr_node_lock);
5011         binder_inner_proc_lock(proc);
5012         /*
5013          * Make sure proc stays alive after we
5014          * remove all the threads
5015          */
5016         proc->tmp_ref++;
5017
5018         proc->is_dead = true;
5019         threads = 0;
5020         active_transactions = 0;
5021         while ((n = rb_first(&proc->threads))) {
5022                 struct binder_thread *thread;
5023
5024                 thread = rb_entry(n, struct binder_thread, rb_node);
5025                 binder_inner_proc_unlock(proc);
5026                 threads++;
5027                 active_transactions += binder_thread_release(proc, thread);
5028                 binder_inner_proc_lock(proc);
5029         }
5030
5031         nodes = 0;
5032         incoming_refs = 0;
5033         while ((n = rb_first(&proc->nodes))) {
5034                 struct binder_node *node;
5035
5036                 node = rb_entry(n, struct binder_node, rb_node);
5037                 nodes++;
5038                 /*
5039                  * take a temporary ref on the node before
5040                  * calling binder_node_release() which will either
5041                  * kfree() the node or call binder_put_node()
5042                  */
5043                 binder_inc_node_tmpref_ilocked(node);
5044                 rb_erase(&node->rb_node, &proc->nodes);
5045                 binder_inner_proc_unlock(proc);
5046                 incoming_refs = binder_node_release(node, incoming_refs);
5047                 binder_inner_proc_lock(proc);
5048         }
5049         binder_inner_proc_unlock(proc);
5050
5051         outgoing_refs = 0;
5052         binder_proc_lock(proc);
5053         while ((n = rb_first(&proc->refs_by_desc))) {
5054                 struct binder_ref *ref;
5055
5056                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5057                 outgoing_refs++;
5058                 binder_cleanup_ref_olocked(ref);
5059                 binder_proc_unlock(proc);
5060                 binder_free_ref(ref);
5061                 binder_proc_lock(proc);
5062         }
5063         binder_proc_unlock(proc);
5064
5065         binder_release_work(proc, &proc->todo);
5066         binder_release_work(proc, &proc->delivered_death);
5067
5068         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5069                      "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5070                      __func__, proc->pid, threads, nodes, incoming_refs,
5071                      outgoing_refs, active_transactions);
5072
5073         binder_proc_dec_tmpref(proc);
5074 }
5075
5076 static void binder_deferred_func(struct work_struct *work)
5077 {
5078         struct binder_proc *proc;
5079
5080         int defer;
5081
5082         do {
5083                 mutex_lock(&binder_deferred_lock);
5084                 if (!hlist_empty(&binder_deferred_list)) {
5085                         proc = hlist_entry(binder_deferred_list.first,
5086                                         struct binder_proc, deferred_work_node);
5087                         hlist_del_init(&proc->deferred_work_node);
5088                         defer = proc->deferred_work;
5089                         proc->deferred_work = 0;
5090                 } else {
5091                         proc = NULL;
5092                         defer = 0;
5093                 }
5094                 mutex_unlock(&binder_deferred_lock);
5095
5096                 if (defer & BINDER_DEFERRED_FLUSH)
5097                         binder_deferred_flush(proc);
5098
5099                 if (defer & BINDER_DEFERRED_RELEASE)
5100                         binder_deferred_release(proc); /* frees proc */
5101         } while (proc);
5102 }
5103 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5104
5105 static void
5106 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5107 {
5108         mutex_lock(&binder_deferred_lock);
5109         proc->deferred_work |= defer;
5110         if (hlist_unhashed(&proc->deferred_work_node)) {
5111                 hlist_add_head(&proc->deferred_work_node,
5112                                 &binder_deferred_list);
5113                 schedule_work(&binder_deferred_work);
5114         }
5115         mutex_unlock(&binder_deferred_lock);
5116 }
5117
5118 static void print_binder_transaction_ilocked(struct seq_file *m,
5119                                              struct binder_proc *proc,
5120                                              const char *prefix,
5121                                              struct binder_transaction *t)
5122 {
5123         struct binder_proc *to_proc;
5124         struct binder_buffer *buffer = t->buffer;
5125
5126         spin_lock(&t->lock);
5127         to_proc = t->to_proc;
5128         seq_printf(m,
5129                    "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5130                    prefix, t->debug_id, t,
5131                    t->from ? t->from->proc->pid : 0,
5132                    t->from ? t->from->pid : 0,
5133                    to_proc ? to_proc->pid : 0,
5134                    t->to_thread ? t->to_thread->pid : 0,
5135                    t->code, t->flags, t->priority, t->need_reply);
5136         spin_unlock(&t->lock);
5137
5138         if (proc != to_proc) {
5139                 /*
5140                  * Can only safely deref buffer if we are holding the
5141                  * correct proc inner lock for this node
5142                  */
5143                 seq_puts(m, "\n");
5144                 return;
5145         }
5146
5147         if (buffer == NULL) {
5148                 seq_puts(m, " buffer free\n");
5149                 return;
5150         }
5151         if (buffer->target_node)
5152                 seq_printf(m, " node %d", buffer->target_node->debug_id);
5153         seq_printf(m, " size %zd:%zd data %pK\n",
5154                    buffer->data_size, buffer->offsets_size,
5155                    buffer->user_data);
5156 }
5157
5158 static void print_binder_work_ilocked(struct seq_file *m,
5159                                      struct binder_proc *proc,
5160                                      const char *prefix,
5161                                      const char *transaction_prefix,
5162                                      struct binder_work *w)
5163 {
5164         struct binder_node *node;
5165         struct binder_transaction *t;
5166
5167         switch (w->type) {
5168         case BINDER_WORK_TRANSACTION:
5169                 t = container_of(w, struct binder_transaction, work);
5170                 print_binder_transaction_ilocked(
5171                                 m, proc, transaction_prefix, t);
5172                 break;
5173         case BINDER_WORK_RETURN_ERROR: {
5174                 struct binder_error *e = container_of(
5175                                 w, struct binder_error, work);
5176
5177                 seq_printf(m, "%stransaction error: %u\n",
5178                            prefix, e->cmd);
5179         } break;
5180         case BINDER_WORK_TRANSACTION_COMPLETE:
5181                 seq_printf(m, "%stransaction complete\n", prefix);
5182                 break;
5183         case BINDER_WORK_NODE:
5184                 node = container_of(w, struct binder_node, work);
5185                 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5186                            prefix, node->debug_id,
5187                            (u64)node->ptr, (u64)node->cookie);
5188                 break;
5189         case BINDER_WORK_DEAD_BINDER:
5190                 seq_printf(m, "%shas dead binder\n", prefix);
5191                 break;
5192         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5193                 seq_printf(m, "%shas cleared dead binder\n", prefix);
5194                 break;
5195         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5196                 seq_printf(m, "%shas cleared death notification\n", prefix);
5197                 break;
5198         default:
5199                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5200                 break;
5201         }
5202 }
5203
5204 static void print_binder_thread_ilocked(struct seq_file *m,
5205                                         struct binder_thread *thread,
5206                                         int print_always)
5207 {
5208         struct binder_transaction *t;
5209         struct binder_work *w;
5210         size_t start_pos = m->count;
5211         size_t header_pos;
5212
5213         seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5214                         thread->pid, thread->looper,
5215                         thread->looper_need_return,
5216                         atomic_read(&thread->tmp_ref));
5217         header_pos = m->count;
5218         t = thread->transaction_stack;
5219         while (t) {
5220                 if (t->from == thread) {
5221                         print_binder_transaction_ilocked(m, thread->proc,
5222                                         "    outgoing transaction", t);
5223                         t = t->from_parent;
5224                 } else if (t->to_thread == thread) {
5225                         print_binder_transaction_ilocked(m, thread->proc,
5226                                                  "    incoming transaction", t);
5227                         t = t->to_parent;
5228                 } else {
5229                         print_binder_transaction_ilocked(m, thread->proc,
5230                                         "    bad transaction", t);
5231                         t = NULL;
5232                 }
5233         }
5234         list_for_each_entry(w, &thread->todo, entry) {
5235                 print_binder_work_ilocked(m, thread->proc, "    ",
5236                                           "    pending transaction", w);
5237         }
5238         if (!print_always && m->count == header_pos)
5239                 m->count = start_pos;
5240 }
5241
5242 static void print_binder_node_nilocked(struct seq_file *m,
5243                                        struct binder_node *node)
5244 {
5245         struct binder_ref *ref;
5246         struct binder_work *w;
5247         int count;
5248
5249         count = 0;
5250         hlist_for_each_entry(ref, &node->refs, node_entry)
5251                 count++;
5252
5253         seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5254                    node->debug_id, (u64)node->ptr, (u64)node->cookie,
5255                    node->has_strong_ref, node->has_weak_ref,
5256                    node->local_strong_refs, node->local_weak_refs,
5257                    node->internal_strong_refs, count, node->tmp_refs);
5258         if (count) {
5259                 seq_puts(m, " proc");
5260                 hlist_for_each_entry(ref, &node->refs, node_entry)
5261                         seq_printf(m, " %d", ref->proc->pid);
5262         }
5263         seq_puts(m, "\n");
5264         if (node->proc) {
5265                 list_for_each_entry(w, &node->async_todo, entry)
5266                         print_binder_work_ilocked(m, node->proc, "    ",
5267                                           "    pending async transaction", w);
5268         }
5269 }
5270
5271 static void print_binder_ref_olocked(struct seq_file *m,
5272                                      struct binder_ref *ref)
5273 {
5274         binder_node_lock(ref->node);
5275         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5276                    ref->data.debug_id, ref->data.desc,
5277                    ref->node->proc ? "" : "dead ",
5278                    ref->node->debug_id, ref->data.strong,
5279                    ref->data.weak, ref->death);
5280         binder_node_unlock(ref->node);
5281 }
5282
5283 static void print_binder_proc(struct seq_file *m,
5284                               struct binder_proc *proc, int print_all)
5285 {
5286         struct binder_work *w;
5287         struct rb_node *n;
5288         size_t start_pos = m->count;
5289         size_t header_pos;
5290         struct binder_node *last_node = NULL;
5291
5292         seq_printf(m, "proc %d\n", proc->pid);
5293         seq_printf(m, "context %s\n", proc->context->name);
5294         header_pos = m->count;
5295
5296         binder_inner_proc_lock(proc);
5297         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5298                 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5299                                                 rb_node), print_all);
5300
5301         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5302                 struct binder_node *node = rb_entry(n, struct binder_node,
5303                                                     rb_node);
5304                 if (!print_all && !node->has_async_transaction)
5305                         continue;
5306
5307                 /*
5308                  * take a temporary reference on the node so it
5309                  * survives and isn't removed from the tree
5310                  * while we print it.
5311                  */
5312                 binder_inc_node_tmpref_ilocked(node);
5313                 /* Need to drop inner lock to take node lock */
5314                 binder_inner_proc_unlock(proc);
5315                 if (last_node)
5316                         binder_put_node(last_node);
5317                 binder_node_inner_lock(node);
5318                 print_binder_node_nilocked(m, node);
5319                 binder_node_inner_unlock(node);
5320                 last_node = node;
5321                 binder_inner_proc_lock(proc);
5322         }
5323         binder_inner_proc_unlock(proc);
5324         if (last_node)
5325                 binder_put_node(last_node);
5326
5327         if (print_all) {
5328                 binder_proc_lock(proc);
5329                 for (n = rb_first(&proc->refs_by_desc);
5330                      n != NULL;
5331                      n = rb_next(n))
5332                         print_binder_ref_olocked(m, rb_entry(n,
5333                                                             struct binder_ref,
5334                                                             rb_node_desc));
5335                 binder_proc_unlock(proc);
5336         }
5337         binder_alloc_print_allocated(m, &proc->alloc);
5338         binder_inner_proc_lock(proc);
5339         list_for_each_entry(w, &proc->todo, entry)
5340                 print_binder_work_ilocked(m, proc, "  ",
5341                                           "  pending transaction", w);
5342         list_for_each_entry(w, &proc->delivered_death, entry) {
5343                 seq_puts(m, "  has delivered dead binder\n");
5344                 break;
5345         }
5346         binder_inner_proc_unlock(proc);
5347         if (!print_all && m->count == header_pos)
5348                 m->count = start_pos;
5349 }
5350
5351 static const char * const binder_return_strings[] = {
5352         "BR_ERROR",
5353         "BR_OK",
5354         "BR_TRANSACTION",
5355         "BR_REPLY",
5356         "BR_ACQUIRE_RESULT",
5357         "BR_DEAD_REPLY",
5358         "BR_TRANSACTION_COMPLETE",
5359         "BR_INCREFS",
5360         "BR_ACQUIRE",
5361         "BR_RELEASE",
5362         "BR_DECREFS",
5363         "BR_ATTEMPT_ACQUIRE",
5364         "BR_NOOP",
5365         "BR_SPAWN_LOOPER",
5366         "BR_FINISHED",
5367         "BR_DEAD_BINDER",
5368         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5369         "BR_FAILED_REPLY"
5370 };
5371
5372 static const char * const binder_command_strings[] = {
5373         "BC_TRANSACTION",
5374         "BC_REPLY",
5375         "BC_ACQUIRE_RESULT",
5376         "BC_FREE_BUFFER",
5377         "BC_INCREFS",
5378         "BC_ACQUIRE",
5379         "BC_RELEASE",
5380         "BC_DECREFS",
5381         "BC_INCREFS_DONE",
5382         "BC_ACQUIRE_DONE",
5383         "BC_ATTEMPT_ACQUIRE",
5384         "BC_REGISTER_LOOPER",
5385         "BC_ENTER_LOOPER",
5386         "BC_EXIT_LOOPER",
5387         "BC_REQUEST_DEATH_NOTIFICATION",
5388         "BC_CLEAR_DEATH_NOTIFICATION",
5389         "BC_DEAD_BINDER_DONE",
5390         "BC_TRANSACTION_SG",
5391         "BC_REPLY_SG",
5392 };
5393
5394 static const char * const binder_objstat_strings[] = {
5395         "proc",
5396         "thread",
5397         "node",
5398         "ref",
5399         "death",
5400         "transaction",
5401         "transaction_complete"
5402 };
5403
5404 static void print_binder_stats(struct seq_file *m, const char *prefix,
5405                                struct binder_stats *stats)
5406 {
5407         int i;
5408
5409         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5410                      ARRAY_SIZE(binder_command_strings));
5411         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5412                 int temp = atomic_read(&stats->bc[i]);
5413
5414                 if (temp)
5415                         seq_printf(m, "%s%s: %d\n", prefix,
5416                                    binder_command_strings[i], temp);
5417         }
5418
5419         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5420                      ARRAY_SIZE(binder_return_strings));
5421         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5422                 int temp = atomic_read(&stats->br[i]);
5423
5424                 if (temp)
5425                         seq_printf(m, "%s%s: %d\n", prefix,
5426                                    binder_return_strings[i], temp);
5427         }
5428
5429         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5430                      ARRAY_SIZE(binder_objstat_strings));
5431         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5432                      ARRAY_SIZE(stats->obj_deleted));
5433         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5434                 int created = atomic_read(&stats->obj_created[i]);
5435                 int deleted = atomic_read(&stats->obj_deleted[i]);
5436
5437                 if (created || deleted)
5438                         seq_printf(m, "%s%s: active %d total %d\n",
5439                                 prefix,
5440                                 binder_objstat_strings[i],
5441                                 created - deleted,
5442                                 created);
5443         }
5444 }
5445
5446 static void print_binder_proc_stats(struct seq_file *m,
5447                                     struct binder_proc *proc)
5448 {
5449         struct binder_work *w;
5450         struct binder_thread *thread;
5451         struct rb_node *n;
5452         int count, strong, weak, ready_threads;
5453         size_t free_async_space =
5454                 binder_alloc_get_free_async_space(&proc->alloc);
5455
5456         seq_printf(m, "proc %d\n", proc->pid);
5457         seq_printf(m, "context %s\n", proc->context->name);
5458         count = 0;
5459         ready_threads = 0;
5460         binder_inner_proc_lock(proc);
5461         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5462                 count++;
5463
5464         list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5465                 ready_threads++;
5466
5467         seq_printf(m, "  threads: %d\n", count);
5468         seq_printf(m, "  requested threads: %d+%d/%d\n"
5469                         "  ready threads %d\n"
5470                         "  free async space %zd\n", proc->requested_threads,
5471                         proc->requested_threads_started, proc->max_threads,
5472                         ready_threads,
5473                         free_async_space);
5474         count = 0;
5475         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5476                 count++;
5477         binder_inner_proc_unlock(proc);
5478         seq_printf(m, "  nodes: %d\n", count);
5479         count = 0;
5480         strong = 0;
5481         weak = 0;
5482         binder_proc_lock(proc);
5483         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5484                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5485                                                   rb_node_desc);
5486                 count++;
5487                 strong += ref->data.strong;
5488                 weak += ref->data.weak;
5489         }
5490         binder_proc_unlock(proc);
5491         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5492
5493         count = binder_alloc_get_allocated_count(&proc->alloc);
5494         seq_printf(m, "  buffers: %d\n", count);
5495
5496         binder_alloc_print_pages(m, &proc->alloc);
5497
5498         count = 0;
5499         binder_inner_proc_lock(proc);
5500         list_for_each_entry(w, &proc->todo, entry) {
5501                 if (w->type == BINDER_WORK_TRANSACTION)
5502                         count++;
5503         }
5504         binder_inner_proc_unlock(proc);
5505         seq_printf(m, "  pending transactions: %d\n", count);
5506
5507         print_binder_stats(m, "  ", &proc->stats);
5508 }
5509
5510
5511 int binder_state_show(struct seq_file *m, void *unused)
5512 {
5513         struct binder_proc *proc;
5514         struct binder_node *node;
5515         struct binder_node *last_node = NULL;
5516
5517         seq_puts(m, "binder state:\n");
5518
5519         spin_lock(&binder_dead_nodes_lock);
5520         if (!hlist_empty(&binder_dead_nodes))
5521                 seq_puts(m, "dead nodes:\n");
5522         hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5523                 /*
5524                  * take a temporary reference on the node so it
5525                  * survives and isn't removed from the list
5526                  * while we print it.
5527                  */
5528                 node->tmp_refs++;
5529                 spin_unlock(&binder_dead_nodes_lock);
5530                 if (last_node)
5531                         binder_put_node(last_node);
5532                 binder_node_lock(node);
5533                 print_binder_node_nilocked(m, node);
5534                 binder_node_unlock(node);
5535                 last_node = node;
5536                 spin_lock(&binder_dead_nodes_lock);
5537         }
5538         spin_unlock(&binder_dead_nodes_lock);
5539         if (last_node)
5540                 binder_put_node(last_node);
5541
5542         mutex_lock(&binder_procs_lock);
5543         hlist_for_each_entry(proc, &binder_procs, proc_node)
5544                 print_binder_proc(m, proc, 1);
5545         mutex_unlock(&binder_procs_lock);
5546
5547         return 0;
5548 }
5549
5550 int binder_stats_show(struct seq_file *m, void *unused)
5551 {
5552         struct binder_proc *proc;
5553
5554         seq_puts(m, "binder stats:\n");
5555
5556         print_binder_stats(m, "", &binder_stats);
5557
5558         mutex_lock(&binder_procs_lock);
5559         hlist_for_each_entry(proc, &binder_procs, proc_node)
5560                 print_binder_proc_stats(m, proc);
5561         mutex_unlock(&binder_procs_lock);
5562
5563         return 0;
5564 }
5565
5566 int binder_transactions_show(struct seq_file *m, void *unused)
5567 {
5568         struct binder_proc *proc;
5569
5570         seq_puts(m, "binder transactions:\n");
5571         mutex_lock(&binder_procs_lock);
5572         hlist_for_each_entry(proc, &binder_procs, proc_node)
5573                 print_binder_proc(m, proc, 0);
5574         mutex_unlock(&binder_procs_lock);
5575
5576         return 0;
5577 }
5578
5579 static int proc_show(struct seq_file *m, void *unused)
5580 {
5581         struct binder_proc *itr;
5582         int pid = (unsigned long)m->private;
5583
5584         mutex_lock(&binder_procs_lock);
5585         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5586                 if (itr->pid == pid) {
5587                         seq_puts(m, "binder proc state:\n");
5588                         print_binder_proc(m, itr, 1);
5589                 }
5590         }
5591         mutex_unlock(&binder_procs_lock);
5592
5593         return 0;
5594 }
5595
5596 static void print_binder_transaction_log_entry(struct seq_file *m,
5597                                         struct binder_transaction_log_entry *e)
5598 {
5599         int debug_id = READ_ONCE(e->debug_id_done);
5600         /*
5601          * read barrier to guarantee debug_id_done read before
5602          * we print the log values
5603          */
5604         smp_rmb();
5605         seq_printf(m,
5606                    "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5607                    e->debug_id, (e->call_type == 2) ? "reply" :
5608                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5609                    e->from_thread, e->to_proc, e->to_thread, e->context_name,
5610                    e->to_node, e->target_handle, e->data_size, e->offsets_size,
5611                    e->return_error, e->return_error_param,
5612                    e->return_error_line);
5613         /*
5614          * read-barrier to guarantee read of debug_id_done after
5615          * done printing the fields of the entry
5616          */
5617         smp_rmb();
5618         seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5619                         "\n" : " (incomplete)\n");
5620 }
5621
5622 int binder_transaction_log_show(struct seq_file *m, void *unused)
5623 {
5624         struct binder_transaction_log *log = m->private;
5625         unsigned int log_cur = atomic_read(&log->cur);
5626         unsigned int count;
5627         unsigned int cur;
5628         int i;
5629
5630         count = log_cur + 1;
5631         cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5632                 0 : count % ARRAY_SIZE(log->entry);
5633         if (count > ARRAY_SIZE(log->entry) || log->full)
5634                 count = ARRAY_SIZE(log->entry);
5635         for (i = 0; i < count; i++) {
5636                 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5637
5638                 print_binder_transaction_log_entry(m, &log->entry[index]);
5639         }
5640         return 0;
5641 }
5642
5643 const struct file_operations binder_fops = {
5644         .owner = THIS_MODULE,
5645         .poll = binder_poll,
5646         .unlocked_ioctl = binder_ioctl,
5647         .compat_ioctl = compat_ptr_ioctl,
5648         .mmap = binder_mmap,
5649         .open = binder_open,
5650         .flush = binder_flush,
5651         .release = binder_release,
5652 };
5653
5654 static int __init init_binder_device(const char *name)
5655 {
5656         int ret;
5657         struct binder_device *binder_device;
5658
5659         binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5660         if (!binder_device)
5661                 return -ENOMEM;
5662
5663         binder_device->miscdev.fops = &binder_fops;
5664         binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5665         binder_device->miscdev.name = name;
5666
5667         refcount_set(&binder_device->ref, 1);
5668         binder_device->context.binder_context_mgr_uid = INVALID_UID;
5669         binder_device->context.name = name;
5670         mutex_init(&binder_device->context.context_mgr_node_lock);
5671
5672         ret = misc_register(&binder_device->miscdev);
5673         if (ret < 0) {
5674                 kfree(binder_device);
5675                 return ret;
5676         }
5677
5678         hlist_add_head(&binder_device->hlist, &binder_devices);
5679
5680         return ret;
5681 }
5682
5683 static int __init binder_init(void)
5684 {
5685         int ret;
5686         char *device_name, *device_tmp;
5687         struct binder_device *device;
5688         struct hlist_node *tmp;
5689         char *device_names = NULL;
5690
5691         ret = binder_alloc_shrinker_init();
5692         if (ret)
5693                 return ret;
5694
5695         atomic_set(&binder_transaction_log.cur, ~0U);
5696         atomic_set(&binder_transaction_log_failed.cur, ~0U);
5697
5698         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5699         if (binder_debugfs_dir_entry_root)
5700                 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5701                                                  binder_debugfs_dir_entry_root);
5702
5703         if (binder_debugfs_dir_entry_root) {
5704                 debugfs_create_file("state",
5705                                     0444,
5706                                     binder_debugfs_dir_entry_root,
5707                                     NULL,
5708                                     &binder_state_fops);
5709                 debugfs_create_file("stats",
5710                                     0444,
5711                                     binder_debugfs_dir_entry_root,
5712                                     NULL,
5713                                     &binder_stats_fops);
5714                 debugfs_create_file("transactions",
5715                                     0444,
5716                                     binder_debugfs_dir_entry_root,
5717                                     NULL,
5718                                     &binder_transactions_fops);
5719                 debugfs_create_file("transaction_log",
5720                                     0444,
5721                                     binder_debugfs_dir_entry_root,
5722                                     &binder_transaction_log,
5723                                     &binder_transaction_log_fops);
5724                 debugfs_create_file("failed_transaction_log",
5725                                     0444,
5726                                     binder_debugfs_dir_entry_root,
5727                                     &binder_transaction_log_failed,
5728                                     &binder_transaction_log_fops);
5729         }
5730
5731         if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
5732             strcmp(binder_devices_param, "") != 0) {
5733                 /*
5734                 * Copy the module_parameter string, because we don't want to
5735                 * tokenize it in-place.
5736                  */
5737                 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5738                 if (!device_names) {
5739                         ret = -ENOMEM;
5740                         goto err_alloc_device_names_failed;
5741                 }
5742
5743                 device_tmp = device_names;
5744                 while ((device_name = strsep(&device_tmp, ","))) {
5745                         ret = init_binder_device(device_name);
5746                         if (ret)
5747                                 goto err_init_binder_device_failed;
5748                 }
5749         }
5750
5751         ret = init_binderfs();
5752         if (ret)
5753                 goto err_init_binder_device_failed;
5754
5755         return ret;
5756
5757 err_init_binder_device_failed:
5758         hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5759                 misc_deregister(&device->miscdev);
5760                 hlist_del(&device->hlist);
5761                 kfree(device);
5762         }
5763
5764         kfree(device_names);
5765
5766 err_alloc_device_names_failed:
5767         debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5768
5769         return ret;
5770 }
5771
5772 device_initcall(binder_init);
5773
5774 #define CREATE_TRACE_POINTS
5775 #include "binder_trace.h"
5776
5777 MODULE_LICENSE("GPL v2");