GNU Linux-libre 5.15.54-gnu
[releases.git] / drivers / android / binder.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69
70 #include <uapi/linux/android/binder.h>
71
72 #include <asm/cacheflush.h>
73
74 #include "binder_internal.h"
75 #include "binder_trace.h"
76
77 static HLIST_HEAD(binder_deferred_list);
78 static DEFINE_MUTEX(binder_deferred_lock);
79
80 static HLIST_HEAD(binder_devices);
81 static HLIST_HEAD(binder_procs);
82 static DEFINE_MUTEX(binder_procs_lock);
83
84 static HLIST_HEAD(binder_dead_nodes);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86
87 static struct dentry *binder_debugfs_dir_entry_root;
88 static struct dentry *binder_debugfs_dir_entry_proc;
89 static atomic_t binder_last_id;
90
91 static int proc_show(struct seq_file *m, void *unused);
92 DEFINE_SHOW_ATTRIBUTE(proc);
93
94 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
95
96 enum {
97         BINDER_DEBUG_USER_ERROR             = 1U << 0,
98         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
99         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
100         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
101         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
102         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
103         BINDER_DEBUG_READ_WRITE             = 1U << 6,
104         BINDER_DEBUG_USER_REFS              = 1U << 7,
105         BINDER_DEBUG_THREADS                = 1U << 8,
106         BINDER_DEBUG_TRANSACTION            = 1U << 9,
107         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
108         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
109         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
110         BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
111         BINDER_DEBUG_SPINLOCKS              = 1U << 14,
112 };
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
116
117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 module_param_named(devices, binder_devices_param, charp, 0444);
119
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
122
123 static int binder_set_stop_on_user_error(const char *val,
124                                          const struct kernel_param *kp)
125 {
126         int ret;
127
128         ret = param_set_int(val, kp);
129         if (binder_stop_on_user_error < 2)
130                 wake_up(&binder_user_error_wait);
131         return ret;
132 }
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134         param_get_int, &binder_stop_on_user_error, 0644);
135
136 #define binder_debug(mask, x...) \
137         do { \
138                 if (binder_debug_mask & mask) \
139                         pr_info_ratelimited(x); \
140         } while (0)
141
142 #define binder_user_error(x...) \
143         do { \
144                 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145                         pr_info_ratelimited(x); \
146                 if (binder_stop_on_user_error) \
147                         binder_stop_on_user_error = 2; \
148         } while (0)
149
150 #define to_flat_binder_object(hdr) \
151         container_of(hdr, struct flat_binder_object, hdr)
152
153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
154
155 #define to_binder_buffer_object(hdr) \
156         container_of(hdr, struct binder_buffer_object, hdr)
157
158 #define to_binder_fd_array_object(hdr) \
159         container_of(hdr, struct binder_fd_array_object, hdr)
160
161 static struct binder_stats binder_stats;
162
163 static inline void binder_stats_deleted(enum binder_stat_types type)
164 {
165         atomic_inc(&binder_stats.obj_deleted[type]);
166 }
167
168 static inline void binder_stats_created(enum binder_stat_types type)
169 {
170         atomic_inc(&binder_stats.obj_created[type]);
171 }
172
173 struct binder_transaction_log binder_transaction_log;
174 struct binder_transaction_log binder_transaction_log_failed;
175
176 static struct binder_transaction_log_entry *binder_transaction_log_add(
177         struct binder_transaction_log *log)
178 {
179         struct binder_transaction_log_entry *e;
180         unsigned int cur = atomic_inc_return(&log->cur);
181
182         if (cur >= ARRAY_SIZE(log->entry))
183                 log->full = true;
184         e = &log->entry[cur % ARRAY_SIZE(log->entry)];
185         WRITE_ONCE(e->debug_id_done, 0);
186         /*
187          * write-barrier to synchronize access to e->debug_id_done.
188          * We make sure the initialized 0 value is seen before
189          * memset() other fields are zeroed by memset.
190          */
191         smp_wmb();
192         memset(e, 0, sizeof(*e));
193         return e;
194 }
195
196 enum binder_deferred_state {
197         BINDER_DEFERRED_FLUSH        = 0x01,
198         BINDER_DEFERRED_RELEASE      = 0x02,
199 };
200
201 enum {
202         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
203         BINDER_LOOPER_STATE_ENTERED     = 0x02,
204         BINDER_LOOPER_STATE_EXITED      = 0x04,
205         BINDER_LOOPER_STATE_INVALID     = 0x08,
206         BINDER_LOOPER_STATE_WAITING     = 0x10,
207         BINDER_LOOPER_STATE_POLL        = 0x20,
208 };
209
210 /**
211  * binder_proc_lock() - Acquire outer lock for given binder_proc
212  * @proc:         struct binder_proc to acquire
213  *
214  * Acquires proc->outer_lock. Used to protect binder_ref
215  * structures associated with the given proc.
216  */
217 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
218 static void
219 _binder_proc_lock(struct binder_proc *proc, int line)
220         __acquires(&proc->outer_lock)
221 {
222         binder_debug(BINDER_DEBUG_SPINLOCKS,
223                      "%s: line=%d\n", __func__, line);
224         spin_lock(&proc->outer_lock);
225 }
226
227 /**
228  * binder_proc_unlock() - Release spinlock for given binder_proc
229  * @proc:         struct binder_proc to acquire
230  *
231  * Release lock acquired via binder_proc_lock()
232  */
233 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
234 static void
235 _binder_proc_unlock(struct binder_proc *proc, int line)
236         __releases(&proc->outer_lock)
237 {
238         binder_debug(BINDER_DEBUG_SPINLOCKS,
239                      "%s: line=%d\n", __func__, line);
240         spin_unlock(&proc->outer_lock);
241 }
242
243 /**
244  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
245  * @proc:         struct binder_proc to acquire
246  *
247  * Acquires proc->inner_lock. Used to protect todo lists
248  */
249 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
250 static void
251 _binder_inner_proc_lock(struct binder_proc *proc, int line)
252         __acquires(&proc->inner_lock)
253 {
254         binder_debug(BINDER_DEBUG_SPINLOCKS,
255                      "%s: line=%d\n", __func__, line);
256         spin_lock(&proc->inner_lock);
257 }
258
259 /**
260  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
261  * @proc:         struct binder_proc to acquire
262  *
263  * Release lock acquired via binder_inner_proc_lock()
264  */
265 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
266 static void
267 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
268         __releases(&proc->inner_lock)
269 {
270         binder_debug(BINDER_DEBUG_SPINLOCKS,
271                      "%s: line=%d\n", __func__, line);
272         spin_unlock(&proc->inner_lock);
273 }
274
275 /**
276  * binder_node_lock() - Acquire spinlock for given binder_node
277  * @node:         struct binder_node to acquire
278  *
279  * Acquires node->lock. Used to protect binder_node fields
280  */
281 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
282 static void
283 _binder_node_lock(struct binder_node *node, int line)
284         __acquires(&node->lock)
285 {
286         binder_debug(BINDER_DEBUG_SPINLOCKS,
287                      "%s: line=%d\n", __func__, line);
288         spin_lock(&node->lock);
289 }
290
291 /**
292  * binder_node_unlock() - Release spinlock for given binder_proc
293  * @node:         struct binder_node to acquire
294  *
295  * Release lock acquired via binder_node_lock()
296  */
297 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
298 static void
299 _binder_node_unlock(struct binder_node *node, int line)
300         __releases(&node->lock)
301 {
302         binder_debug(BINDER_DEBUG_SPINLOCKS,
303                      "%s: line=%d\n", __func__, line);
304         spin_unlock(&node->lock);
305 }
306
307 /**
308  * binder_node_inner_lock() - Acquire node and inner locks
309  * @node:         struct binder_node to acquire
310  *
311  * Acquires node->lock. If node->proc also acquires
312  * proc->inner_lock. Used to protect binder_node fields
313  */
314 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
315 static void
316 _binder_node_inner_lock(struct binder_node *node, int line)
317         __acquires(&node->lock) __acquires(&node->proc->inner_lock)
318 {
319         binder_debug(BINDER_DEBUG_SPINLOCKS,
320                      "%s: line=%d\n", __func__, line);
321         spin_lock(&node->lock);
322         if (node->proc)
323                 binder_inner_proc_lock(node->proc);
324         else
325                 /* annotation for sparse */
326                 __acquire(&node->proc->inner_lock);
327 }
328
329 /**
330  * binder_node_unlock() - Release node and inner locks
331  * @node:         struct binder_node to acquire
332  *
333  * Release lock acquired via binder_node_lock()
334  */
335 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
336 static void
337 _binder_node_inner_unlock(struct binder_node *node, int line)
338         __releases(&node->lock) __releases(&node->proc->inner_lock)
339 {
340         struct binder_proc *proc = node->proc;
341
342         binder_debug(BINDER_DEBUG_SPINLOCKS,
343                      "%s: line=%d\n", __func__, line);
344         if (proc)
345                 binder_inner_proc_unlock(proc);
346         else
347                 /* annotation for sparse */
348                 __release(&node->proc->inner_lock);
349         spin_unlock(&node->lock);
350 }
351
352 static bool binder_worklist_empty_ilocked(struct list_head *list)
353 {
354         return list_empty(list);
355 }
356
357 /**
358  * binder_worklist_empty() - Check if no items on the work list
359  * @proc:       binder_proc associated with list
360  * @list:       list to check
361  *
362  * Return: true if there are no items on list, else false
363  */
364 static bool binder_worklist_empty(struct binder_proc *proc,
365                                   struct list_head *list)
366 {
367         bool ret;
368
369         binder_inner_proc_lock(proc);
370         ret = binder_worklist_empty_ilocked(list);
371         binder_inner_proc_unlock(proc);
372         return ret;
373 }
374
375 /**
376  * binder_enqueue_work_ilocked() - Add an item to the work list
377  * @work:         struct binder_work to add to list
378  * @target_list:  list to add work to
379  *
380  * Adds the work to the specified list. Asserts that work
381  * is not already on a list.
382  *
383  * Requires the proc->inner_lock to be held.
384  */
385 static void
386 binder_enqueue_work_ilocked(struct binder_work *work,
387                            struct list_head *target_list)
388 {
389         BUG_ON(target_list == NULL);
390         BUG_ON(work->entry.next && !list_empty(&work->entry));
391         list_add_tail(&work->entry, target_list);
392 }
393
394 /**
395  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
396  * @thread:       thread to queue work to
397  * @work:         struct binder_work to add to list
398  *
399  * Adds the work to the todo list of the thread. Doesn't set the process_todo
400  * flag, which means that (if it wasn't already set) the thread will go to
401  * sleep without handling this work when it calls read.
402  *
403  * Requires the proc->inner_lock to be held.
404  */
405 static void
406 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
407                                             struct binder_work *work)
408 {
409         WARN_ON(!list_empty(&thread->waiting_thread_node));
410         binder_enqueue_work_ilocked(work, &thread->todo);
411 }
412
413 /**
414  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
415  * @thread:       thread to queue work to
416  * @work:         struct binder_work to add to list
417  *
418  * Adds the work to the todo list of the thread, and enables processing
419  * of the todo queue.
420  *
421  * Requires the proc->inner_lock to be held.
422  */
423 static void
424 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
425                                    struct binder_work *work)
426 {
427         WARN_ON(!list_empty(&thread->waiting_thread_node));
428         binder_enqueue_work_ilocked(work, &thread->todo);
429         thread->process_todo = true;
430 }
431
432 /**
433  * binder_enqueue_thread_work() - Add an item to the thread work list
434  * @thread:       thread to queue work to
435  * @work:         struct binder_work to add to list
436  *
437  * Adds the work to the todo list of the thread, and enables processing
438  * of the todo queue.
439  */
440 static void
441 binder_enqueue_thread_work(struct binder_thread *thread,
442                            struct binder_work *work)
443 {
444         binder_inner_proc_lock(thread->proc);
445         binder_enqueue_thread_work_ilocked(thread, work);
446         binder_inner_proc_unlock(thread->proc);
447 }
448
449 static void
450 binder_dequeue_work_ilocked(struct binder_work *work)
451 {
452         list_del_init(&work->entry);
453 }
454
455 /**
456  * binder_dequeue_work() - Removes an item from the work list
457  * @proc:         binder_proc associated with list
458  * @work:         struct binder_work to remove from list
459  *
460  * Removes the specified work item from whatever list it is on.
461  * Can safely be called if work is not on any list.
462  */
463 static void
464 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
465 {
466         binder_inner_proc_lock(proc);
467         binder_dequeue_work_ilocked(work);
468         binder_inner_proc_unlock(proc);
469 }
470
471 static struct binder_work *binder_dequeue_work_head_ilocked(
472                                         struct list_head *list)
473 {
474         struct binder_work *w;
475
476         w = list_first_entry_or_null(list, struct binder_work, entry);
477         if (w)
478                 list_del_init(&w->entry);
479         return w;
480 }
481
482 static void
483 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
484 static void binder_free_thread(struct binder_thread *thread);
485 static void binder_free_proc(struct binder_proc *proc);
486 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
487
488 static bool binder_has_work_ilocked(struct binder_thread *thread,
489                                     bool do_proc_work)
490 {
491         return thread->process_todo ||
492                 thread->looper_need_return ||
493                 (do_proc_work &&
494                  !binder_worklist_empty_ilocked(&thread->proc->todo));
495 }
496
497 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
498 {
499         bool has_work;
500
501         binder_inner_proc_lock(thread->proc);
502         has_work = binder_has_work_ilocked(thread, do_proc_work);
503         binder_inner_proc_unlock(thread->proc);
504
505         return has_work;
506 }
507
508 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
509 {
510         return !thread->transaction_stack &&
511                 binder_worklist_empty_ilocked(&thread->todo) &&
512                 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
513                                    BINDER_LOOPER_STATE_REGISTERED));
514 }
515
516 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
517                                                bool sync)
518 {
519         struct rb_node *n;
520         struct binder_thread *thread;
521
522         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
523                 thread = rb_entry(n, struct binder_thread, rb_node);
524                 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
525                     binder_available_for_proc_work_ilocked(thread)) {
526                         if (sync)
527                                 wake_up_interruptible_sync(&thread->wait);
528                         else
529                                 wake_up_interruptible(&thread->wait);
530                 }
531         }
532 }
533
534 /**
535  * binder_select_thread_ilocked() - selects a thread for doing proc work.
536  * @proc:       process to select a thread from
537  *
538  * Note that calling this function moves the thread off the waiting_threads
539  * list, so it can only be woken up by the caller of this function, or a
540  * signal. Therefore, callers *should* always wake up the thread this function
541  * returns.
542  *
543  * Return:      If there's a thread currently waiting for process work,
544  *              returns that thread. Otherwise returns NULL.
545  */
546 static struct binder_thread *
547 binder_select_thread_ilocked(struct binder_proc *proc)
548 {
549         struct binder_thread *thread;
550
551         assert_spin_locked(&proc->inner_lock);
552         thread = list_first_entry_or_null(&proc->waiting_threads,
553                                           struct binder_thread,
554                                           waiting_thread_node);
555
556         if (thread)
557                 list_del_init(&thread->waiting_thread_node);
558
559         return thread;
560 }
561
562 /**
563  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
564  * @proc:       process to wake up a thread in
565  * @thread:     specific thread to wake-up (may be NULL)
566  * @sync:       whether to do a synchronous wake-up
567  *
568  * This function wakes up a thread in the @proc process.
569  * The caller may provide a specific thread to wake-up in
570  * the @thread parameter. If @thread is NULL, this function
571  * will wake up threads that have called poll().
572  *
573  * Note that for this function to work as expected, callers
574  * should first call binder_select_thread() to find a thread
575  * to handle the work (if they don't have a thread already),
576  * and pass the result into the @thread parameter.
577  */
578 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
579                                          struct binder_thread *thread,
580                                          bool sync)
581 {
582         assert_spin_locked(&proc->inner_lock);
583
584         if (thread) {
585                 if (sync)
586                         wake_up_interruptible_sync(&thread->wait);
587                 else
588                         wake_up_interruptible(&thread->wait);
589                 return;
590         }
591
592         /* Didn't find a thread waiting for proc work; this can happen
593          * in two scenarios:
594          * 1. All threads are busy handling transactions
595          *    In that case, one of those threads should call back into
596          *    the kernel driver soon and pick up this work.
597          * 2. Threads are using the (e)poll interface, in which case
598          *    they may be blocked on the waitqueue without having been
599          *    added to waiting_threads. For this case, we just iterate
600          *    over all threads not handling transaction work, and
601          *    wake them all up. We wake all because we don't know whether
602          *    a thread that called into (e)poll is handling non-binder
603          *    work currently.
604          */
605         binder_wakeup_poll_threads_ilocked(proc, sync);
606 }
607
608 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
609 {
610         struct binder_thread *thread = binder_select_thread_ilocked(proc);
611
612         binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
613 }
614
615 static void binder_set_nice(long nice)
616 {
617         long min_nice;
618
619         if (can_nice(current, nice)) {
620                 set_user_nice(current, nice);
621                 return;
622         }
623         min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
624         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
625                      "%d: nice value %ld not allowed use %ld instead\n",
626                       current->pid, nice, min_nice);
627         set_user_nice(current, min_nice);
628         if (min_nice <= MAX_NICE)
629                 return;
630         binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
631 }
632
633 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
634                                                    binder_uintptr_t ptr)
635 {
636         struct rb_node *n = proc->nodes.rb_node;
637         struct binder_node *node;
638
639         assert_spin_locked(&proc->inner_lock);
640
641         while (n) {
642                 node = rb_entry(n, struct binder_node, rb_node);
643
644                 if (ptr < node->ptr)
645                         n = n->rb_left;
646                 else if (ptr > node->ptr)
647                         n = n->rb_right;
648                 else {
649                         /*
650                          * take an implicit weak reference
651                          * to ensure node stays alive until
652                          * call to binder_put_node()
653                          */
654                         binder_inc_node_tmpref_ilocked(node);
655                         return node;
656                 }
657         }
658         return NULL;
659 }
660
661 static struct binder_node *binder_get_node(struct binder_proc *proc,
662                                            binder_uintptr_t ptr)
663 {
664         struct binder_node *node;
665
666         binder_inner_proc_lock(proc);
667         node = binder_get_node_ilocked(proc, ptr);
668         binder_inner_proc_unlock(proc);
669         return node;
670 }
671
672 static struct binder_node *binder_init_node_ilocked(
673                                                 struct binder_proc *proc,
674                                                 struct binder_node *new_node,
675                                                 struct flat_binder_object *fp)
676 {
677         struct rb_node **p = &proc->nodes.rb_node;
678         struct rb_node *parent = NULL;
679         struct binder_node *node;
680         binder_uintptr_t ptr = fp ? fp->binder : 0;
681         binder_uintptr_t cookie = fp ? fp->cookie : 0;
682         __u32 flags = fp ? fp->flags : 0;
683
684         assert_spin_locked(&proc->inner_lock);
685
686         while (*p) {
687
688                 parent = *p;
689                 node = rb_entry(parent, struct binder_node, rb_node);
690
691                 if (ptr < node->ptr)
692                         p = &(*p)->rb_left;
693                 else if (ptr > node->ptr)
694                         p = &(*p)->rb_right;
695                 else {
696                         /*
697                          * A matching node is already in
698                          * the rb tree. Abandon the init
699                          * and return it.
700                          */
701                         binder_inc_node_tmpref_ilocked(node);
702                         return node;
703                 }
704         }
705         node = new_node;
706         binder_stats_created(BINDER_STAT_NODE);
707         node->tmp_refs++;
708         rb_link_node(&node->rb_node, parent, p);
709         rb_insert_color(&node->rb_node, &proc->nodes);
710         node->debug_id = atomic_inc_return(&binder_last_id);
711         node->proc = proc;
712         node->ptr = ptr;
713         node->cookie = cookie;
714         node->work.type = BINDER_WORK_NODE;
715         node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
716         node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
717         node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
718         spin_lock_init(&node->lock);
719         INIT_LIST_HEAD(&node->work.entry);
720         INIT_LIST_HEAD(&node->async_todo);
721         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
722                      "%d:%d node %d u%016llx c%016llx created\n",
723                      proc->pid, current->pid, node->debug_id,
724                      (u64)node->ptr, (u64)node->cookie);
725
726         return node;
727 }
728
729 static struct binder_node *binder_new_node(struct binder_proc *proc,
730                                            struct flat_binder_object *fp)
731 {
732         struct binder_node *node;
733         struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
734
735         if (!new_node)
736                 return NULL;
737         binder_inner_proc_lock(proc);
738         node = binder_init_node_ilocked(proc, new_node, fp);
739         binder_inner_proc_unlock(proc);
740         if (node != new_node)
741                 /*
742                  * The node was already added by another thread
743                  */
744                 kfree(new_node);
745
746         return node;
747 }
748
749 static void binder_free_node(struct binder_node *node)
750 {
751         kfree(node);
752         binder_stats_deleted(BINDER_STAT_NODE);
753 }
754
755 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
756                                     int internal,
757                                     struct list_head *target_list)
758 {
759         struct binder_proc *proc = node->proc;
760
761         assert_spin_locked(&node->lock);
762         if (proc)
763                 assert_spin_locked(&proc->inner_lock);
764         if (strong) {
765                 if (internal) {
766                         if (target_list == NULL &&
767                             node->internal_strong_refs == 0 &&
768                             !(node->proc &&
769                               node == node->proc->context->binder_context_mgr_node &&
770                               node->has_strong_ref)) {
771                                 pr_err("invalid inc strong node for %d\n",
772                                         node->debug_id);
773                                 return -EINVAL;
774                         }
775                         node->internal_strong_refs++;
776                 } else
777                         node->local_strong_refs++;
778                 if (!node->has_strong_ref && target_list) {
779                         struct binder_thread *thread = container_of(target_list,
780                                                     struct binder_thread, todo);
781                         binder_dequeue_work_ilocked(&node->work);
782                         BUG_ON(&thread->todo != target_list);
783                         binder_enqueue_deferred_thread_work_ilocked(thread,
784                                                                    &node->work);
785                 }
786         } else {
787                 if (!internal)
788                         node->local_weak_refs++;
789                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
790                         if (target_list == NULL) {
791                                 pr_err("invalid inc weak node for %d\n",
792                                         node->debug_id);
793                                 return -EINVAL;
794                         }
795                         /*
796                          * See comment above
797                          */
798                         binder_enqueue_work_ilocked(&node->work, target_list);
799                 }
800         }
801         return 0;
802 }
803
804 static int binder_inc_node(struct binder_node *node, int strong, int internal,
805                            struct list_head *target_list)
806 {
807         int ret;
808
809         binder_node_inner_lock(node);
810         ret = binder_inc_node_nilocked(node, strong, internal, target_list);
811         binder_node_inner_unlock(node);
812
813         return ret;
814 }
815
816 static bool binder_dec_node_nilocked(struct binder_node *node,
817                                      int strong, int internal)
818 {
819         struct binder_proc *proc = node->proc;
820
821         assert_spin_locked(&node->lock);
822         if (proc)
823                 assert_spin_locked(&proc->inner_lock);
824         if (strong) {
825                 if (internal)
826                         node->internal_strong_refs--;
827                 else
828                         node->local_strong_refs--;
829                 if (node->local_strong_refs || node->internal_strong_refs)
830                         return false;
831         } else {
832                 if (!internal)
833                         node->local_weak_refs--;
834                 if (node->local_weak_refs || node->tmp_refs ||
835                                 !hlist_empty(&node->refs))
836                         return false;
837         }
838
839         if (proc && (node->has_strong_ref || node->has_weak_ref)) {
840                 if (list_empty(&node->work.entry)) {
841                         binder_enqueue_work_ilocked(&node->work, &proc->todo);
842                         binder_wakeup_proc_ilocked(proc);
843                 }
844         } else {
845                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
846                     !node->local_weak_refs && !node->tmp_refs) {
847                         if (proc) {
848                                 binder_dequeue_work_ilocked(&node->work);
849                                 rb_erase(&node->rb_node, &proc->nodes);
850                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
851                                              "refless node %d deleted\n",
852                                              node->debug_id);
853                         } else {
854                                 BUG_ON(!list_empty(&node->work.entry));
855                                 spin_lock(&binder_dead_nodes_lock);
856                                 /*
857                                  * tmp_refs could have changed so
858                                  * check it again
859                                  */
860                                 if (node->tmp_refs) {
861                                         spin_unlock(&binder_dead_nodes_lock);
862                                         return false;
863                                 }
864                                 hlist_del(&node->dead_node);
865                                 spin_unlock(&binder_dead_nodes_lock);
866                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
867                                              "dead node %d deleted\n",
868                                              node->debug_id);
869                         }
870                         return true;
871                 }
872         }
873         return false;
874 }
875
876 static void binder_dec_node(struct binder_node *node, int strong, int internal)
877 {
878         bool free_node;
879
880         binder_node_inner_lock(node);
881         free_node = binder_dec_node_nilocked(node, strong, internal);
882         binder_node_inner_unlock(node);
883         if (free_node)
884                 binder_free_node(node);
885 }
886
887 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
888 {
889         /*
890          * No call to binder_inc_node() is needed since we
891          * don't need to inform userspace of any changes to
892          * tmp_refs
893          */
894         node->tmp_refs++;
895 }
896
897 /**
898  * binder_inc_node_tmpref() - take a temporary reference on node
899  * @node:       node to reference
900  *
901  * Take reference on node to prevent the node from being freed
902  * while referenced only by a local variable. The inner lock is
903  * needed to serialize with the node work on the queue (which
904  * isn't needed after the node is dead). If the node is dead
905  * (node->proc is NULL), use binder_dead_nodes_lock to protect
906  * node->tmp_refs against dead-node-only cases where the node
907  * lock cannot be acquired (eg traversing the dead node list to
908  * print nodes)
909  */
910 static void binder_inc_node_tmpref(struct binder_node *node)
911 {
912         binder_node_lock(node);
913         if (node->proc)
914                 binder_inner_proc_lock(node->proc);
915         else
916                 spin_lock(&binder_dead_nodes_lock);
917         binder_inc_node_tmpref_ilocked(node);
918         if (node->proc)
919                 binder_inner_proc_unlock(node->proc);
920         else
921                 spin_unlock(&binder_dead_nodes_lock);
922         binder_node_unlock(node);
923 }
924
925 /**
926  * binder_dec_node_tmpref() - remove a temporary reference on node
927  * @node:       node to reference
928  *
929  * Release temporary reference on node taken via binder_inc_node_tmpref()
930  */
931 static void binder_dec_node_tmpref(struct binder_node *node)
932 {
933         bool free_node;
934
935         binder_node_inner_lock(node);
936         if (!node->proc)
937                 spin_lock(&binder_dead_nodes_lock);
938         else
939                 __acquire(&binder_dead_nodes_lock);
940         node->tmp_refs--;
941         BUG_ON(node->tmp_refs < 0);
942         if (!node->proc)
943                 spin_unlock(&binder_dead_nodes_lock);
944         else
945                 __release(&binder_dead_nodes_lock);
946         /*
947          * Call binder_dec_node() to check if all refcounts are 0
948          * and cleanup is needed. Calling with strong=0 and internal=1
949          * causes no actual reference to be released in binder_dec_node().
950          * If that changes, a change is needed here too.
951          */
952         free_node = binder_dec_node_nilocked(node, 0, 1);
953         binder_node_inner_unlock(node);
954         if (free_node)
955                 binder_free_node(node);
956 }
957
958 static void binder_put_node(struct binder_node *node)
959 {
960         binder_dec_node_tmpref(node);
961 }
962
963 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
964                                                  u32 desc, bool need_strong_ref)
965 {
966         struct rb_node *n = proc->refs_by_desc.rb_node;
967         struct binder_ref *ref;
968
969         while (n) {
970                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
971
972                 if (desc < ref->data.desc) {
973                         n = n->rb_left;
974                 } else if (desc > ref->data.desc) {
975                         n = n->rb_right;
976                 } else if (need_strong_ref && !ref->data.strong) {
977                         binder_user_error("tried to use weak ref as strong ref\n");
978                         return NULL;
979                 } else {
980                         return ref;
981                 }
982         }
983         return NULL;
984 }
985
986 /**
987  * binder_get_ref_for_node_olocked() - get the ref associated with given node
988  * @proc:       binder_proc that owns the ref
989  * @node:       binder_node of target
990  * @new_ref:    newly allocated binder_ref to be initialized or %NULL
991  *
992  * Look up the ref for the given node and return it if it exists
993  *
994  * If it doesn't exist and the caller provides a newly allocated
995  * ref, initialize the fields of the newly allocated ref and insert
996  * into the given proc rb_trees and node refs list.
997  *
998  * Return:      the ref for node. It is possible that another thread
999  *              allocated/initialized the ref first in which case the
1000  *              returned ref would be different than the passed-in
1001  *              new_ref. new_ref must be kfree'd by the caller in
1002  *              this case.
1003  */
1004 static struct binder_ref *binder_get_ref_for_node_olocked(
1005                                         struct binder_proc *proc,
1006                                         struct binder_node *node,
1007                                         struct binder_ref *new_ref)
1008 {
1009         struct binder_context *context = proc->context;
1010         struct rb_node **p = &proc->refs_by_node.rb_node;
1011         struct rb_node *parent = NULL;
1012         struct binder_ref *ref;
1013         struct rb_node *n;
1014
1015         while (*p) {
1016                 parent = *p;
1017                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1018
1019                 if (node < ref->node)
1020                         p = &(*p)->rb_left;
1021                 else if (node > ref->node)
1022                         p = &(*p)->rb_right;
1023                 else
1024                         return ref;
1025         }
1026         if (!new_ref)
1027                 return NULL;
1028
1029         binder_stats_created(BINDER_STAT_REF);
1030         new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1031         new_ref->proc = proc;
1032         new_ref->node = node;
1033         rb_link_node(&new_ref->rb_node_node, parent, p);
1034         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1035
1036         new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1037         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1038                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1039                 if (ref->data.desc > new_ref->data.desc)
1040                         break;
1041                 new_ref->data.desc = ref->data.desc + 1;
1042         }
1043
1044         p = &proc->refs_by_desc.rb_node;
1045         while (*p) {
1046                 parent = *p;
1047                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1048
1049                 if (new_ref->data.desc < ref->data.desc)
1050                         p = &(*p)->rb_left;
1051                 else if (new_ref->data.desc > ref->data.desc)
1052                         p = &(*p)->rb_right;
1053                 else
1054                         BUG();
1055         }
1056         rb_link_node(&new_ref->rb_node_desc, parent, p);
1057         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1058
1059         binder_node_lock(node);
1060         hlist_add_head(&new_ref->node_entry, &node->refs);
1061
1062         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1063                      "%d new ref %d desc %d for node %d\n",
1064                       proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1065                       node->debug_id);
1066         binder_node_unlock(node);
1067         return new_ref;
1068 }
1069
1070 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1071 {
1072         bool delete_node = false;
1073
1074         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1075                      "%d delete ref %d desc %d for node %d\n",
1076                       ref->proc->pid, ref->data.debug_id, ref->data.desc,
1077                       ref->node->debug_id);
1078
1079         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1080         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1081
1082         binder_node_inner_lock(ref->node);
1083         if (ref->data.strong)
1084                 binder_dec_node_nilocked(ref->node, 1, 1);
1085
1086         hlist_del(&ref->node_entry);
1087         delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1088         binder_node_inner_unlock(ref->node);
1089         /*
1090          * Clear ref->node unless we want the caller to free the node
1091          */
1092         if (!delete_node) {
1093                 /*
1094                  * The caller uses ref->node to determine
1095                  * whether the node needs to be freed. Clear
1096                  * it since the node is still alive.
1097                  */
1098                 ref->node = NULL;
1099         }
1100
1101         if (ref->death) {
1102                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1103                              "%d delete ref %d desc %d has death notification\n",
1104                               ref->proc->pid, ref->data.debug_id,
1105                               ref->data.desc);
1106                 binder_dequeue_work(ref->proc, &ref->death->work);
1107                 binder_stats_deleted(BINDER_STAT_DEATH);
1108         }
1109         binder_stats_deleted(BINDER_STAT_REF);
1110 }
1111
1112 /**
1113  * binder_inc_ref_olocked() - increment the ref for given handle
1114  * @ref:         ref to be incremented
1115  * @strong:      if true, strong increment, else weak
1116  * @target_list: list to queue node work on
1117  *
1118  * Increment the ref. @ref->proc->outer_lock must be held on entry
1119  *
1120  * Return: 0, if successful, else errno
1121  */
1122 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1123                                   struct list_head *target_list)
1124 {
1125         int ret;
1126
1127         if (strong) {
1128                 if (ref->data.strong == 0) {
1129                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1130                         if (ret)
1131                                 return ret;
1132                 }
1133                 ref->data.strong++;
1134         } else {
1135                 if (ref->data.weak == 0) {
1136                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1137                         if (ret)
1138                                 return ret;
1139                 }
1140                 ref->data.weak++;
1141         }
1142         return 0;
1143 }
1144
1145 /**
1146  * binder_dec_ref() - dec the ref for given handle
1147  * @ref:        ref to be decremented
1148  * @strong:     if true, strong decrement, else weak
1149  *
1150  * Decrement the ref.
1151  *
1152  * Return: true if ref is cleaned up and ready to be freed
1153  */
1154 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1155 {
1156         if (strong) {
1157                 if (ref->data.strong == 0) {
1158                         binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1159                                           ref->proc->pid, ref->data.debug_id,
1160                                           ref->data.desc, ref->data.strong,
1161                                           ref->data.weak);
1162                         return false;
1163                 }
1164                 ref->data.strong--;
1165                 if (ref->data.strong == 0)
1166                         binder_dec_node(ref->node, strong, 1);
1167         } else {
1168                 if (ref->data.weak == 0) {
1169                         binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1170                                           ref->proc->pid, ref->data.debug_id,
1171                                           ref->data.desc, ref->data.strong,
1172                                           ref->data.weak);
1173                         return false;
1174                 }
1175                 ref->data.weak--;
1176         }
1177         if (ref->data.strong == 0 && ref->data.weak == 0) {
1178                 binder_cleanup_ref_olocked(ref);
1179                 return true;
1180         }
1181         return false;
1182 }
1183
1184 /**
1185  * binder_get_node_from_ref() - get the node from the given proc/desc
1186  * @proc:       proc containing the ref
1187  * @desc:       the handle associated with the ref
1188  * @need_strong_ref: if true, only return node if ref is strong
1189  * @rdata:      the id/refcount data for the ref
1190  *
1191  * Given a proc and ref handle, return the associated binder_node
1192  *
1193  * Return: a binder_node or NULL if not found or not strong when strong required
1194  */
1195 static struct binder_node *binder_get_node_from_ref(
1196                 struct binder_proc *proc,
1197                 u32 desc, bool need_strong_ref,
1198                 struct binder_ref_data *rdata)
1199 {
1200         struct binder_node *node;
1201         struct binder_ref *ref;
1202
1203         binder_proc_lock(proc);
1204         ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1205         if (!ref)
1206                 goto err_no_ref;
1207         node = ref->node;
1208         /*
1209          * Take an implicit reference on the node to ensure
1210          * it stays alive until the call to binder_put_node()
1211          */
1212         binder_inc_node_tmpref(node);
1213         if (rdata)
1214                 *rdata = ref->data;
1215         binder_proc_unlock(proc);
1216
1217         return node;
1218
1219 err_no_ref:
1220         binder_proc_unlock(proc);
1221         return NULL;
1222 }
1223
1224 /**
1225  * binder_free_ref() - free the binder_ref
1226  * @ref:        ref to free
1227  *
1228  * Free the binder_ref. Free the binder_node indicated by ref->node
1229  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1230  */
1231 static void binder_free_ref(struct binder_ref *ref)
1232 {
1233         if (ref->node)
1234                 binder_free_node(ref->node);
1235         kfree(ref->death);
1236         kfree(ref);
1237 }
1238
1239 /**
1240  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1241  * @proc:       proc containing the ref
1242  * @desc:       the handle associated with the ref
1243  * @increment:  true=inc reference, false=dec reference
1244  * @strong:     true=strong reference, false=weak reference
1245  * @rdata:      the id/refcount data for the ref
1246  *
1247  * Given a proc and ref handle, increment or decrement the ref
1248  * according to "increment" arg.
1249  *
1250  * Return: 0 if successful, else errno
1251  */
1252 static int binder_update_ref_for_handle(struct binder_proc *proc,
1253                 uint32_t desc, bool increment, bool strong,
1254                 struct binder_ref_data *rdata)
1255 {
1256         int ret = 0;
1257         struct binder_ref *ref;
1258         bool delete_ref = false;
1259
1260         binder_proc_lock(proc);
1261         ref = binder_get_ref_olocked(proc, desc, strong);
1262         if (!ref) {
1263                 ret = -EINVAL;
1264                 goto err_no_ref;
1265         }
1266         if (increment)
1267                 ret = binder_inc_ref_olocked(ref, strong, NULL);
1268         else
1269                 delete_ref = binder_dec_ref_olocked(ref, strong);
1270
1271         if (rdata)
1272                 *rdata = ref->data;
1273         binder_proc_unlock(proc);
1274
1275         if (delete_ref)
1276                 binder_free_ref(ref);
1277         return ret;
1278
1279 err_no_ref:
1280         binder_proc_unlock(proc);
1281         return ret;
1282 }
1283
1284 /**
1285  * binder_dec_ref_for_handle() - dec the ref for given handle
1286  * @proc:       proc containing the ref
1287  * @desc:       the handle associated with the ref
1288  * @strong:     true=strong reference, false=weak reference
1289  * @rdata:      the id/refcount data for the ref
1290  *
1291  * Just calls binder_update_ref_for_handle() to decrement the ref.
1292  *
1293  * Return: 0 if successful, else errno
1294  */
1295 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1296                 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1297 {
1298         return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1299 }
1300
1301
1302 /**
1303  * binder_inc_ref_for_node() - increment the ref for given proc/node
1304  * @proc:        proc containing the ref
1305  * @node:        target node
1306  * @strong:      true=strong reference, false=weak reference
1307  * @target_list: worklist to use if node is incremented
1308  * @rdata:       the id/refcount data for the ref
1309  *
1310  * Given a proc and node, increment the ref. Create the ref if it
1311  * doesn't already exist
1312  *
1313  * Return: 0 if successful, else errno
1314  */
1315 static int binder_inc_ref_for_node(struct binder_proc *proc,
1316                         struct binder_node *node,
1317                         bool strong,
1318                         struct list_head *target_list,
1319                         struct binder_ref_data *rdata)
1320 {
1321         struct binder_ref *ref;
1322         struct binder_ref *new_ref = NULL;
1323         int ret = 0;
1324
1325         binder_proc_lock(proc);
1326         ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1327         if (!ref) {
1328                 binder_proc_unlock(proc);
1329                 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1330                 if (!new_ref)
1331                         return -ENOMEM;
1332                 binder_proc_lock(proc);
1333                 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1334         }
1335         ret = binder_inc_ref_olocked(ref, strong, target_list);
1336         *rdata = ref->data;
1337         binder_proc_unlock(proc);
1338         if (new_ref && ref != new_ref)
1339                 /*
1340                  * Another thread created the ref first so
1341                  * free the one we allocated
1342                  */
1343                 kfree(new_ref);
1344         return ret;
1345 }
1346
1347 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1348                                            struct binder_transaction *t)
1349 {
1350         BUG_ON(!target_thread);
1351         assert_spin_locked(&target_thread->proc->inner_lock);
1352         BUG_ON(target_thread->transaction_stack != t);
1353         BUG_ON(target_thread->transaction_stack->from != target_thread);
1354         target_thread->transaction_stack =
1355                 target_thread->transaction_stack->from_parent;
1356         t->from = NULL;
1357 }
1358
1359 /**
1360  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1361  * @thread:     thread to decrement
1362  *
1363  * A thread needs to be kept alive while being used to create or
1364  * handle a transaction. binder_get_txn_from() is used to safely
1365  * extract t->from from a binder_transaction and keep the thread
1366  * indicated by t->from from being freed. When done with that
1367  * binder_thread, this function is called to decrement the
1368  * tmp_ref and free if appropriate (thread has been released
1369  * and no transaction being processed by the driver)
1370  */
1371 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1372 {
1373         /*
1374          * atomic is used to protect the counter value while
1375          * it cannot reach zero or thread->is_dead is false
1376          */
1377         binder_inner_proc_lock(thread->proc);
1378         atomic_dec(&thread->tmp_ref);
1379         if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1380                 binder_inner_proc_unlock(thread->proc);
1381                 binder_free_thread(thread);
1382                 return;
1383         }
1384         binder_inner_proc_unlock(thread->proc);
1385 }
1386
1387 /**
1388  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1389  * @proc:       proc to decrement
1390  *
1391  * A binder_proc needs to be kept alive while being used to create or
1392  * handle a transaction. proc->tmp_ref is incremented when
1393  * creating a new transaction or the binder_proc is currently in-use
1394  * by threads that are being released. When done with the binder_proc,
1395  * this function is called to decrement the counter and free the
1396  * proc if appropriate (proc has been released, all threads have
1397  * been released and not currenly in-use to process a transaction).
1398  */
1399 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1400 {
1401         binder_inner_proc_lock(proc);
1402         proc->tmp_ref--;
1403         if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1404                         !proc->tmp_ref) {
1405                 binder_inner_proc_unlock(proc);
1406                 binder_free_proc(proc);
1407                 return;
1408         }
1409         binder_inner_proc_unlock(proc);
1410 }
1411
1412 /**
1413  * binder_get_txn_from() - safely extract the "from" thread in transaction
1414  * @t:  binder transaction for t->from
1415  *
1416  * Atomically return the "from" thread and increment the tmp_ref
1417  * count for the thread to ensure it stays alive until
1418  * binder_thread_dec_tmpref() is called.
1419  *
1420  * Return: the value of t->from
1421  */
1422 static struct binder_thread *binder_get_txn_from(
1423                 struct binder_transaction *t)
1424 {
1425         struct binder_thread *from;
1426
1427         spin_lock(&t->lock);
1428         from = t->from;
1429         if (from)
1430                 atomic_inc(&from->tmp_ref);
1431         spin_unlock(&t->lock);
1432         return from;
1433 }
1434
1435 /**
1436  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1437  * @t:  binder transaction for t->from
1438  *
1439  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1440  * to guarantee that the thread cannot be released while operating on it.
1441  * The caller must call binder_inner_proc_unlock() to release the inner lock
1442  * as well as call binder_dec_thread_txn() to release the reference.
1443  *
1444  * Return: the value of t->from
1445  */
1446 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1447                 struct binder_transaction *t)
1448         __acquires(&t->from->proc->inner_lock)
1449 {
1450         struct binder_thread *from;
1451
1452         from = binder_get_txn_from(t);
1453         if (!from) {
1454                 __acquire(&from->proc->inner_lock);
1455                 return NULL;
1456         }
1457         binder_inner_proc_lock(from->proc);
1458         if (t->from) {
1459                 BUG_ON(from != t->from);
1460                 return from;
1461         }
1462         binder_inner_proc_unlock(from->proc);
1463         __acquire(&from->proc->inner_lock);
1464         binder_thread_dec_tmpref(from);
1465         return NULL;
1466 }
1467
1468 /**
1469  * binder_free_txn_fixups() - free unprocessed fd fixups
1470  * @t:  binder transaction for t->from
1471  *
1472  * If the transaction is being torn down prior to being
1473  * processed by the target process, free all of the
1474  * fd fixups and fput the file structs. It is safe to
1475  * call this function after the fixups have been
1476  * processed -- in that case, the list will be empty.
1477  */
1478 static void binder_free_txn_fixups(struct binder_transaction *t)
1479 {
1480         struct binder_txn_fd_fixup *fixup, *tmp;
1481
1482         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1483                 fput(fixup->file);
1484                 list_del(&fixup->fixup_entry);
1485                 kfree(fixup);
1486         }
1487 }
1488
1489 static void binder_txn_latency_free(struct binder_transaction *t)
1490 {
1491         int from_proc, from_thread, to_proc, to_thread;
1492
1493         spin_lock(&t->lock);
1494         from_proc = t->from ? t->from->proc->pid : 0;
1495         from_thread = t->from ? t->from->pid : 0;
1496         to_proc = t->to_proc ? t->to_proc->pid : 0;
1497         to_thread = t->to_thread ? t->to_thread->pid : 0;
1498         spin_unlock(&t->lock);
1499
1500         trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1501 }
1502
1503 static void binder_free_transaction(struct binder_transaction *t)
1504 {
1505         struct binder_proc *target_proc = t->to_proc;
1506
1507         if (target_proc) {
1508                 binder_inner_proc_lock(target_proc);
1509                 target_proc->outstanding_txns--;
1510                 if (target_proc->outstanding_txns < 0)
1511                         pr_warn("%s: Unexpected outstanding_txns %d\n",
1512                                 __func__, target_proc->outstanding_txns);
1513                 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1514                         wake_up_interruptible_all(&target_proc->freeze_wait);
1515                 if (t->buffer)
1516                         t->buffer->transaction = NULL;
1517                 binder_inner_proc_unlock(target_proc);
1518         }
1519         if (trace_binder_txn_latency_free_enabled())
1520                 binder_txn_latency_free(t);
1521         /*
1522          * If the transaction has no target_proc, then
1523          * t->buffer->transaction has already been cleared.
1524          */
1525         binder_free_txn_fixups(t);
1526         kfree(t);
1527         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1528 }
1529
1530 static void binder_send_failed_reply(struct binder_transaction *t,
1531                                      uint32_t error_code)
1532 {
1533         struct binder_thread *target_thread;
1534         struct binder_transaction *next;
1535
1536         BUG_ON(t->flags & TF_ONE_WAY);
1537         while (1) {
1538                 target_thread = binder_get_txn_from_and_acq_inner(t);
1539                 if (target_thread) {
1540                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1541                                      "send failed reply for transaction %d to %d:%d\n",
1542                                       t->debug_id,
1543                                       target_thread->proc->pid,
1544                                       target_thread->pid);
1545
1546                         binder_pop_transaction_ilocked(target_thread, t);
1547                         if (target_thread->reply_error.cmd == BR_OK) {
1548                                 target_thread->reply_error.cmd = error_code;
1549                                 binder_enqueue_thread_work_ilocked(
1550                                         target_thread,
1551                                         &target_thread->reply_error.work);
1552                                 wake_up_interruptible(&target_thread->wait);
1553                         } else {
1554                                 /*
1555                                  * Cannot get here for normal operation, but
1556                                  * we can if multiple synchronous transactions
1557                                  * are sent without blocking for responses.
1558                                  * Just ignore the 2nd error in this case.
1559                                  */
1560                                 pr_warn("Unexpected reply error: %u\n",
1561                                         target_thread->reply_error.cmd);
1562                         }
1563                         binder_inner_proc_unlock(target_thread->proc);
1564                         binder_thread_dec_tmpref(target_thread);
1565                         binder_free_transaction(t);
1566                         return;
1567                 }
1568                 __release(&target_thread->proc->inner_lock);
1569                 next = t->from_parent;
1570
1571                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1572                              "send failed reply for transaction %d, target dead\n",
1573                              t->debug_id);
1574
1575                 binder_free_transaction(t);
1576                 if (next == NULL) {
1577                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1578                                      "reply failed, no target thread at root\n");
1579                         return;
1580                 }
1581                 t = next;
1582                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1583                              "reply failed, no target thread -- retry %d\n",
1584                               t->debug_id);
1585         }
1586 }
1587
1588 /**
1589  * binder_cleanup_transaction() - cleans up undelivered transaction
1590  * @t:          transaction that needs to be cleaned up
1591  * @reason:     reason the transaction wasn't delivered
1592  * @error_code: error to return to caller (if synchronous call)
1593  */
1594 static void binder_cleanup_transaction(struct binder_transaction *t,
1595                                        const char *reason,
1596                                        uint32_t error_code)
1597 {
1598         if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1599                 binder_send_failed_reply(t, error_code);
1600         } else {
1601                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1602                         "undelivered transaction %d, %s\n",
1603                         t->debug_id, reason);
1604                 binder_free_transaction(t);
1605         }
1606 }
1607
1608 /**
1609  * binder_get_object() - gets object and checks for valid metadata
1610  * @proc:       binder_proc owning the buffer
1611  * @u:          sender's user pointer to base of buffer
1612  * @buffer:     binder_buffer that we're parsing.
1613  * @offset:     offset in the @buffer at which to validate an object.
1614  * @object:     struct binder_object to read into
1615  *
1616  * Copy the binder object at the given offset into @object. If @u is
1617  * provided then the copy is from the sender's buffer. If not, then
1618  * it is copied from the target's @buffer.
1619  *
1620  * Return:      If there's a valid metadata object at @offset, the
1621  *              size of that object. Otherwise, it returns zero. The object
1622  *              is read into the struct binder_object pointed to by @object.
1623  */
1624 static size_t binder_get_object(struct binder_proc *proc,
1625                                 const void __user *u,
1626                                 struct binder_buffer *buffer,
1627                                 unsigned long offset,
1628                                 struct binder_object *object)
1629 {
1630         size_t read_size;
1631         struct binder_object_header *hdr;
1632         size_t object_size = 0;
1633
1634         read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1635         if (offset > buffer->data_size || read_size < sizeof(*hdr))
1636                 return 0;
1637         if (u) {
1638                 if (copy_from_user(object, u + offset, read_size))
1639                         return 0;
1640         } else {
1641                 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1642                                                   offset, read_size))
1643                         return 0;
1644         }
1645
1646         /* Ok, now see if we read a complete object. */
1647         hdr = &object->hdr;
1648         switch (hdr->type) {
1649         case BINDER_TYPE_BINDER:
1650         case BINDER_TYPE_WEAK_BINDER:
1651         case BINDER_TYPE_HANDLE:
1652         case BINDER_TYPE_WEAK_HANDLE:
1653                 object_size = sizeof(struct flat_binder_object);
1654                 break;
1655         case BINDER_TYPE_FD:
1656                 object_size = sizeof(struct binder_fd_object);
1657                 break;
1658         case BINDER_TYPE_PTR:
1659                 object_size = sizeof(struct binder_buffer_object);
1660                 break;
1661         case BINDER_TYPE_FDA:
1662                 object_size = sizeof(struct binder_fd_array_object);
1663                 break;
1664         default:
1665                 return 0;
1666         }
1667         if (offset <= buffer->data_size - object_size &&
1668             buffer->data_size >= object_size)
1669                 return object_size;
1670         else
1671                 return 0;
1672 }
1673
1674 /**
1675  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1676  * @proc:       binder_proc owning the buffer
1677  * @b:          binder_buffer containing the object
1678  * @object:     struct binder_object to read into
1679  * @index:      index in offset array at which the binder_buffer_object is
1680  *              located
1681  * @start_offset: points to the start of the offset array
1682  * @object_offsetp: offset of @object read from @b
1683  * @num_valid:  the number of valid offsets in the offset array
1684  *
1685  * Return:      If @index is within the valid range of the offset array
1686  *              described by @start and @num_valid, and if there's a valid
1687  *              binder_buffer_object at the offset found in index @index
1688  *              of the offset array, that object is returned. Otherwise,
1689  *              %NULL is returned.
1690  *              Note that the offset found in index @index itself is not
1691  *              verified; this function assumes that @num_valid elements
1692  *              from @start were previously verified to have valid offsets.
1693  *              If @object_offsetp is non-NULL, then the offset within
1694  *              @b is written to it.
1695  */
1696 static struct binder_buffer_object *binder_validate_ptr(
1697                                                 struct binder_proc *proc,
1698                                                 struct binder_buffer *b,
1699                                                 struct binder_object *object,
1700                                                 binder_size_t index,
1701                                                 binder_size_t start_offset,
1702                                                 binder_size_t *object_offsetp,
1703                                                 binder_size_t num_valid)
1704 {
1705         size_t object_size;
1706         binder_size_t object_offset;
1707         unsigned long buffer_offset;
1708
1709         if (index >= num_valid)
1710                 return NULL;
1711
1712         buffer_offset = start_offset + sizeof(binder_size_t) * index;
1713         if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1714                                           b, buffer_offset,
1715                                           sizeof(object_offset)))
1716                 return NULL;
1717         object_size = binder_get_object(proc, NULL, b, object_offset, object);
1718         if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1719                 return NULL;
1720         if (object_offsetp)
1721                 *object_offsetp = object_offset;
1722
1723         return &object->bbo;
1724 }
1725
1726 /**
1727  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1728  * @proc:               binder_proc owning the buffer
1729  * @b:                  transaction buffer
1730  * @objects_start_offset: offset to start of objects buffer
1731  * @buffer_obj_offset:  offset to binder_buffer_object in which to fix up
1732  * @fixup_offset:       start offset in @buffer to fix up
1733  * @last_obj_offset:    offset to last binder_buffer_object that we fixed
1734  * @last_min_offset:    minimum fixup offset in object at @last_obj_offset
1735  *
1736  * Return:              %true if a fixup in buffer @buffer at offset @offset is
1737  *                      allowed.
1738  *
1739  * For safety reasons, we only allow fixups inside a buffer to happen
1740  * at increasing offsets; additionally, we only allow fixup on the last
1741  * buffer object that was verified, or one of its parents.
1742  *
1743  * Example of what is allowed:
1744  *
1745  * A
1746  *   B (parent = A, offset = 0)
1747  *   C (parent = A, offset = 16)
1748  *     D (parent = C, offset = 0)
1749  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1750  *
1751  * Examples of what is not allowed:
1752  *
1753  * Decreasing offsets within the same parent:
1754  * A
1755  *   C (parent = A, offset = 16)
1756  *   B (parent = A, offset = 0) // decreasing offset within A
1757  *
1758  * Referring to a parent that wasn't the last object or any of its parents:
1759  * A
1760  *   B (parent = A, offset = 0)
1761  *   C (parent = A, offset = 0)
1762  *   C (parent = A, offset = 16)
1763  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1764  */
1765 static bool binder_validate_fixup(struct binder_proc *proc,
1766                                   struct binder_buffer *b,
1767                                   binder_size_t objects_start_offset,
1768                                   binder_size_t buffer_obj_offset,
1769                                   binder_size_t fixup_offset,
1770                                   binder_size_t last_obj_offset,
1771                                   binder_size_t last_min_offset)
1772 {
1773         if (!last_obj_offset) {
1774                 /* Nothing to fix up in */
1775                 return false;
1776         }
1777
1778         while (last_obj_offset != buffer_obj_offset) {
1779                 unsigned long buffer_offset;
1780                 struct binder_object last_object;
1781                 struct binder_buffer_object *last_bbo;
1782                 size_t object_size = binder_get_object(proc, NULL, b,
1783                                                        last_obj_offset,
1784                                                        &last_object);
1785                 if (object_size != sizeof(*last_bbo))
1786                         return false;
1787
1788                 last_bbo = &last_object.bbo;
1789                 /*
1790                  * Safe to retrieve the parent of last_obj, since it
1791                  * was already previously verified by the driver.
1792                  */
1793                 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1794                         return false;
1795                 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1796                 buffer_offset = objects_start_offset +
1797                         sizeof(binder_size_t) * last_bbo->parent;
1798                 if (binder_alloc_copy_from_buffer(&proc->alloc,
1799                                                   &last_obj_offset,
1800                                                   b, buffer_offset,
1801                                                   sizeof(last_obj_offset)))
1802                         return false;
1803         }
1804         return (fixup_offset >= last_min_offset);
1805 }
1806
1807 /**
1808  * struct binder_task_work_cb - for deferred close
1809  *
1810  * @twork:                callback_head for task work
1811  * @fd:                   fd to close
1812  *
1813  * Structure to pass task work to be handled after
1814  * returning from binder_ioctl() via task_work_add().
1815  */
1816 struct binder_task_work_cb {
1817         struct callback_head twork;
1818         struct file *file;
1819 };
1820
1821 /**
1822  * binder_do_fd_close() - close list of file descriptors
1823  * @twork:      callback head for task work
1824  *
1825  * It is not safe to call ksys_close() during the binder_ioctl()
1826  * function if there is a chance that binder's own file descriptor
1827  * might be closed. This is to meet the requirements for using
1828  * fdget() (see comments for __fget_light()). Therefore use
1829  * task_work_add() to schedule the close operation once we have
1830  * returned from binder_ioctl(). This function is a callback
1831  * for that mechanism and does the actual ksys_close() on the
1832  * given file descriptor.
1833  */
1834 static void binder_do_fd_close(struct callback_head *twork)
1835 {
1836         struct binder_task_work_cb *twcb = container_of(twork,
1837                         struct binder_task_work_cb, twork);
1838
1839         fput(twcb->file);
1840         kfree(twcb);
1841 }
1842
1843 /**
1844  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1845  * @fd:         file-descriptor to close
1846  *
1847  * See comments in binder_do_fd_close(). This function is used to schedule
1848  * a file-descriptor to be closed after returning from binder_ioctl().
1849  */
1850 static void binder_deferred_fd_close(int fd)
1851 {
1852         struct binder_task_work_cb *twcb;
1853
1854         twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1855         if (!twcb)
1856                 return;
1857         init_task_work(&twcb->twork, binder_do_fd_close);
1858         close_fd_get_file(fd, &twcb->file);
1859         if (twcb->file) {
1860                 filp_close(twcb->file, current->files);
1861                 task_work_add(current, &twcb->twork, TWA_RESUME);
1862         } else {
1863                 kfree(twcb);
1864         }
1865 }
1866
1867 static void binder_transaction_buffer_release(struct binder_proc *proc,
1868                                               struct binder_thread *thread,
1869                                               struct binder_buffer *buffer,
1870                                               binder_size_t failed_at,
1871                                               bool is_failure)
1872 {
1873         int debug_id = buffer->debug_id;
1874         binder_size_t off_start_offset, buffer_offset, off_end_offset;
1875
1876         binder_debug(BINDER_DEBUG_TRANSACTION,
1877                      "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1878                      proc->pid, buffer->debug_id,
1879                      buffer->data_size, buffer->offsets_size,
1880                      (unsigned long long)failed_at);
1881
1882         if (buffer->target_node)
1883                 binder_dec_node(buffer->target_node, 1, 0);
1884
1885         off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1886         off_end_offset = is_failure && failed_at ? failed_at :
1887                                 off_start_offset + buffer->offsets_size;
1888         for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1889              buffer_offset += sizeof(binder_size_t)) {
1890                 struct binder_object_header *hdr;
1891                 size_t object_size = 0;
1892                 struct binder_object object;
1893                 binder_size_t object_offset;
1894
1895                 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1896                                                    buffer, buffer_offset,
1897                                                    sizeof(object_offset)))
1898                         object_size = binder_get_object(proc, NULL, buffer,
1899                                                         object_offset, &object);
1900                 if (object_size == 0) {
1901                         pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1902                                debug_id, (u64)object_offset, buffer->data_size);
1903                         continue;
1904                 }
1905                 hdr = &object.hdr;
1906                 switch (hdr->type) {
1907                 case BINDER_TYPE_BINDER:
1908                 case BINDER_TYPE_WEAK_BINDER: {
1909                         struct flat_binder_object *fp;
1910                         struct binder_node *node;
1911
1912                         fp = to_flat_binder_object(hdr);
1913                         node = binder_get_node(proc, fp->binder);
1914                         if (node == NULL) {
1915                                 pr_err("transaction release %d bad node %016llx\n",
1916                                        debug_id, (u64)fp->binder);
1917                                 break;
1918                         }
1919                         binder_debug(BINDER_DEBUG_TRANSACTION,
1920                                      "        node %d u%016llx\n",
1921                                      node->debug_id, (u64)node->ptr);
1922                         binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1923                                         0);
1924                         binder_put_node(node);
1925                 } break;
1926                 case BINDER_TYPE_HANDLE:
1927                 case BINDER_TYPE_WEAK_HANDLE: {
1928                         struct flat_binder_object *fp;
1929                         struct binder_ref_data rdata;
1930                         int ret;
1931
1932                         fp = to_flat_binder_object(hdr);
1933                         ret = binder_dec_ref_for_handle(proc, fp->handle,
1934                                 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1935
1936                         if (ret) {
1937                                 pr_err("transaction release %d bad handle %d, ret = %d\n",
1938                                  debug_id, fp->handle, ret);
1939                                 break;
1940                         }
1941                         binder_debug(BINDER_DEBUG_TRANSACTION,
1942                                      "        ref %d desc %d\n",
1943                                      rdata.debug_id, rdata.desc);
1944                 } break;
1945
1946                 case BINDER_TYPE_FD: {
1947                         /*
1948                          * No need to close the file here since user-space
1949                          * closes it for for successfully delivered
1950                          * transactions. For transactions that weren't
1951                          * delivered, the new fd was never allocated so
1952                          * there is no need to close and the fput on the
1953                          * file is done when the transaction is torn
1954                          * down.
1955                          */
1956                 } break;
1957                 case BINDER_TYPE_PTR:
1958                         /*
1959                          * Nothing to do here, this will get cleaned up when the
1960                          * transaction buffer gets freed
1961                          */
1962                         break;
1963                 case BINDER_TYPE_FDA: {
1964                         struct binder_fd_array_object *fda;
1965                         struct binder_buffer_object *parent;
1966                         struct binder_object ptr_object;
1967                         binder_size_t fda_offset;
1968                         size_t fd_index;
1969                         binder_size_t fd_buf_size;
1970                         binder_size_t num_valid;
1971
1972                         if (is_failure) {
1973                                 /*
1974                                  * The fd fixups have not been applied so no
1975                                  * fds need to be closed.
1976                                  */
1977                                 continue;
1978                         }
1979
1980                         num_valid = (buffer_offset - off_start_offset) /
1981                                                 sizeof(binder_size_t);
1982                         fda = to_binder_fd_array_object(hdr);
1983                         parent = binder_validate_ptr(proc, buffer, &ptr_object,
1984                                                      fda->parent,
1985                                                      off_start_offset,
1986                                                      NULL,
1987                                                      num_valid);
1988                         if (!parent) {
1989                                 pr_err("transaction release %d bad parent offset\n",
1990                                        debug_id);
1991                                 continue;
1992                         }
1993                         fd_buf_size = sizeof(u32) * fda->num_fds;
1994                         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1995                                 pr_err("transaction release %d invalid number of fds (%lld)\n",
1996                                        debug_id, (u64)fda->num_fds);
1997                                 continue;
1998                         }
1999                         if (fd_buf_size > parent->length ||
2000                             fda->parent_offset > parent->length - fd_buf_size) {
2001                                 /* No space for all file descriptors here. */
2002                                 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2003                                        debug_id, (u64)fda->num_fds);
2004                                 continue;
2005                         }
2006                         /*
2007                          * the source data for binder_buffer_object is visible
2008                          * to user-space and the @buffer element is the user
2009                          * pointer to the buffer_object containing the fd_array.
2010                          * Convert the address to an offset relative to
2011                          * the base of the transaction buffer.
2012                          */
2013                         fda_offset =
2014                             (parent->buffer - (uintptr_t)buffer->user_data) +
2015                             fda->parent_offset;
2016                         for (fd_index = 0; fd_index < fda->num_fds;
2017                              fd_index++) {
2018                                 u32 fd;
2019                                 int err;
2020                                 binder_size_t offset = fda_offset +
2021                                         fd_index * sizeof(fd);
2022
2023                                 err = binder_alloc_copy_from_buffer(
2024                                                 &proc->alloc, &fd, buffer,
2025                                                 offset, sizeof(fd));
2026                                 WARN_ON(err);
2027                                 if (!err) {
2028                                         binder_deferred_fd_close(fd);
2029                                         /*
2030                                          * Need to make sure the thread goes
2031                                          * back to userspace to complete the
2032                                          * deferred close
2033                                          */
2034                                         if (thread)
2035                                                 thread->looper_need_return = true;
2036                                 }
2037                         }
2038                 } break;
2039                 default:
2040                         pr_err("transaction release %d bad object type %x\n",
2041                                 debug_id, hdr->type);
2042                         break;
2043                 }
2044         }
2045 }
2046
2047 static int binder_translate_binder(struct flat_binder_object *fp,
2048                                    struct binder_transaction *t,
2049                                    struct binder_thread *thread)
2050 {
2051         struct binder_node *node;
2052         struct binder_proc *proc = thread->proc;
2053         struct binder_proc *target_proc = t->to_proc;
2054         struct binder_ref_data rdata;
2055         int ret = 0;
2056
2057         node = binder_get_node(proc, fp->binder);
2058         if (!node) {
2059                 node = binder_new_node(proc, fp);
2060                 if (!node)
2061                         return -ENOMEM;
2062         }
2063         if (fp->cookie != node->cookie) {
2064                 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2065                                   proc->pid, thread->pid, (u64)fp->binder,
2066                                   node->debug_id, (u64)fp->cookie,
2067                                   (u64)node->cookie);
2068                 ret = -EINVAL;
2069                 goto done;
2070         }
2071         if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2072                 ret = -EPERM;
2073                 goto done;
2074         }
2075
2076         ret = binder_inc_ref_for_node(target_proc, node,
2077                         fp->hdr.type == BINDER_TYPE_BINDER,
2078                         &thread->todo, &rdata);
2079         if (ret)
2080                 goto done;
2081
2082         if (fp->hdr.type == BINDER_TYPE_BINDER)
2083                 fp->hdr.type = BINDER_TYPE_HANDLE;
2084         else
2085                 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2086         fp->binder = 0;
2087         fp->handle = rdata.desc;
2088         fp->cookie = 0;
2089
2090         trace_binder_transaction_node_to_ref(t, node, &rdata);
2091         binder_debug(BINDER_DEBUG_TRANSACTION,
2092                      "        node %d u%016llx -> ref %d desc %d\n",
2093                      node->debug_id, (u64)node->ptr,
2094                      rdata.debug_id, rdata.desc);
2095 done:
2096         binder_put_node(node);
2097         return ret;
2098 }
2099
2100 static int binder_translate_handle(struct flat_binder_object *fp,
2101                                    struct binder_transaction *t,
2102                                    struct binder_thread *thread)
2103 {
2104         struct binder_proc *proc = thread->proc;
2105         struct binder_proc *target_proc = t->to_proc;
2106         struct binder_node *node;
2107         struct binder_ref_data src_rdata;
2108         int ret = 0;
2109
2110         node = binder_get_node_from_ref(proc, fp->handle,
2111                         fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2112         if (!node) {
2113                 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2114                                   proc->pid, thread->pid, fp->handle);
2115                 return -EINVAL;
2116         }
2117         if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2118                 ret = -EPERM;
2119                 goto done;
2120         }
2121
2122         binder_node_lock(node);
2123         if (node->proc == target_proc) {
2124                 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2125                         fp->hdr.type = BINDER_TYPE_BINDER;
2126                 else
2127                         fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2128                 fp->binder = node->ptr;
2129                 fp->cookie = node->cookie;
2130                 if (node->proc)
2131                         binder_inner_proc_lock(node->proc);
2132                 else
2133                         __acquire(&node->proc->inner_lock);
2134                 binder_inc_node_nilocked(node,
2135                                          fp->hdr.type == BINDER_TYPE_BINDER,
2136                                          0, NULL);
2137                 if (node->proc)
2138                         binder_inner_proc_unlock(node->proc);
2139                 else
2140                         __release(&node->proc->inner_lock);
2141                 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2142                 binder_debug(BINDER_DEBUG_TRANSACTION,
2143                              "        ref %d desc %d -> node %d u%016llx\n",
2144                              src_rdata.debug_id, src_rdata.desc, node->debug_id,
2145                              (u64)node->ptr);
2146                 binder_node_unlock(node);
2147         } else {
2148                 struct binder_ref_data dest_rdata;
2149
2150                 binder_node_unlock(node);
2151                 ret = binder_inc_ref_for_node(target_proc, node,
2152                                 fp->hdr.type == BINDER_TYPE_HANDLE,
2153                                 NULL, &dest_rdata);
2154                 if (ret)
2155                         goto done;
2156
2157                 fp->binder = 0;
2158                 fp->handle = dest_rdata.desc;
2159                 fp->cookie = 0;
2160                 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2161                                                     &dest_rdata);
2162                 binder_debug(BINDER_DEBUG_TRANSACTION,
2163                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2164                              src_rdata.debug_id, src_rdata.desc,
2165                              dest_rdata.debug_id, dest_rdata.desc,
2166                              node->debug_id);
2167         }
2168 done:
2169         binder_put_node(node);
2170         return ret;
2171 }
2172
2173 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2174                                struct binder_transaction *t,
2175                                struct binder_thread *thread,
2176                                struct binder_transaction *in_reply_to)
2177 {
2178         struct binder_proc *proc = thread->proc;
2179         struct binder_proc *target_proc = t->to_proc;
2180         struct binder_txn_fd_fixup *fixup;
2181         struct file *file;
2182         int ret = 0;
2183         bool target_allows_fd;
2184
2185         if (in_reply_to)
2186                 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2187         else
2188                 target_allows_fd = t->buffer->target_node->accept_fds;
2189         if (!target_allows_fd) {
2190                 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2191                                   proc->pid, thread->pid,
2192                                   in_reply_to ? "reply" : "transaction",
2193                                   fd);
2194                 ret = -EPERM;
2195                 goto err_fd_not_accepted;
2196         }
2197
2198         file = fget(fd);
2199         if (!file) {
2200                 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2201                                   proc->pid, thread->pid, fd);
2202                 ret = -EBADF;
2203                 goto err_fget;
2204         }
2205         ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2206         if (ret < 0) {
2207                 ret = -EPERM;
2208                 goto err_security;
2209         }
2210
2211         /*
2212          * Add fixup record for this transaction. The allocation
2213          * of the fd in the target needs to be done from a
2214          * target thread.
2215          */
2216         fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2217         if (!fixup) {
2218                 ret = -ENOMEM;
2219                 goto err_alloc;
2220         }
2221         fixup->file = file;
2222         fixup->offset = fd_offset;
2223         trace_binder_transaction_fd_send(t, fd, fixup->offset);
2224         list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2225
2226         return ret;
2227
2228 err_alloc:
2229 err_security:
2230         fput(file);
2231 err_fget:
2232 err_fd_not_accepted:
2233         return ret;
2234 }
2235
2236 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2237                                      struct binder_buffer_object *parent,
2238                                      struct binder_transaction *t,
2239                                      struct binder_thread *thread,
2240                                      struct binder_transaction *in_reply_to)
2241 {
2242         binder_size_t fdi, fd_buf_size;
2243         binder_size_t fda_offset;
2244         struct binder_proc *proc = thread->proc;
2245         struct binder_proc *target_proc = t->to_proc;
2246
2247         fd_buf_size = sizeof(u32) * fda->num_fds;
2248         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2249                 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2250                                   proc->pid, thread->pid, (u64)fda->num_fds);
2251                 return -EINVAL;
2252         }
2253         if (fd_buf_size > parent->length ||
2254             fda->parent_offset > parent->length - fd_buf_size) {
2255                 /* No space for all file descriptors here. */
2256                 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2257                                   proc->pid, thread->pid, (u64)fda->num_fds);
2258                 return -EINVAL;
2259         }
2260         /*
2261          * the source data for binder_buffer_object is visible
2262          * to user-space and the @buffer element is the user
2263          * pointer to the buffer_object containing the fd_array.
2264          * Convert the address to an offset relative to
2265          * the base of the transaction buffer.
2266          */
2267         fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2268                 fda->parent_offset;
2269         if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2270                 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2271                                   proc->pid, thread->pid);
2272                 return -EINVAL;
2273         }
2274         for (fdi = 0; fdi < fda->num_fds; fdi++) {
2275                 u32 fd;
2276                 int ret;
2277                 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2278
2279                 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2280                                                     &fd, t->buffer,
2281                                                     offset, sizeof(fd));
2282                 if (!ret)
2283                         ret = binder_translate_fd(fd, offset, t, thread,
2284                                                   in_reply_to);
2285                 if (ret)
2286                         return ret > 0 ? -EINVAL : ret;
2287         }
2288         return 0;
2289 }
2290
2291 static int binder_fixup_parent(struct binder_transaction *t,
2292                                struct binder_thread *thread,
2293                                struct binder_buffer_object *bp,
2294                                binder_size_t off_start_offset,
2295                                binder_size_t num_valid,
2296                                binder_size_t last_fixup_obj_off,
2297                                binder_size_t last_fixup_min_off)
2298 {
2299         struct binder_buffer_object *parent;
2300         struct binder_buffer *b = t->buffer;
2301         struct binder_proc *proc = thread->proc;
2302         struct binder_proc *target_proc = t->to_proc;
2303         struct binder_object object;
2304         binder_size_t buffer_offset;
2305         binder_size_t parent_offset;
2306
2307         if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2308                 return 0;
2309
2310         parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2311                                      off_start_offset, &parent_offset,
2312                                      num_valid);
2313         if (!parent) {
2314                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2315                                   proc->pid, thread->pid);
2316                 return -EINVAL;
2317         }
2318
2319         if (!binder_validate_fixup(target_proc, b, off_start_offset,
2320                                    parent_offset, bp->parent_offset,
2321                                    last_fixup_obj_off,
2322                                    last_fixup_min_off)) {
2323                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2324                                   proc->pid, thread->pid);
2325                 return -EINVAL;
2326         }
2327
2328         if (parent->length < sizeof(binder_uintptr_t) ||
2329             bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2330                 /* No space for a pointer here! */
2331                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2332                                   proc->pid, thread->pid);
2333                 return -EINVAL;
2334         }
2335         buffer_offset = bp->parent_offset +
2336                         (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2337         if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2338                                         &bp->buffer, sizeof(bp->buffer))) {
2339                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2340                                   proc->pid, thread->pid);
2341                 return -EINVAL;
2342         }
2343
2344         return 0;
2345 }
2346
2347 /**
2348  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2349  * @t:          transaction to send
2350  * @proc:       process to send the transaction to
2351  * @thread:     thread in @proc to send the transaction to (may be NULL)
2352  *
2353  * This function queues a transaction to the specified process. It will try
2354  * to find a thread in the target process to handle the transaction and
2355  * wake it up. If no thread is found, the work is queued to the proc
2356  * waitqueue.
2357  *
2358  * If the @thread parameter is not NULL, the transaction is always queued
2359  * to the waitlist of that specific thread.
2360  *
2361  * Return:      0 if the transaction was successfully queued
2362  *              BR_DEAD_REPLY if the target process or thread is dead
2363  *              BR_FROZEN_REPLY if the target process or thread is frozen
2364  */
2365 static int binder_proc_transaction(struct binder_transaction *t,
2366                                     struct binder_proc *proc,
2367                                     struct binder_thread *thread)
2368 {
2369         struct binder_node *node = t->buffer->target_node;
2370         bool oneway = !!(t->flags & TF_ONE_WAY);
2371         bool pending_async = false;
2372
2373         BUG_ON(!node);
2374         binder_node_lock(node);
2375         if (oneway) {
2376                 BUG_ON(thread);
2377                 if (node->has_async_transaction)
2378                         pending_async = true;
2379                 else
2380                         node->has_async_transaction = true;
2381         }
2382
2383         binder_inner_proc_lock(proc);
2384         if (proc->is_frozen) {
2385                 proc->sync_recv |= !oneway;
2386                 proc->async_recv |= oneway;
2387         }
2388
2389         if ((proc->is_frozen && !oneway) || proc->is_dead ||
2390                         (thread && thread->is_dead)) {
2391                 binder_inner_proc_unlock(proc);
2392                 binder_node_unlock(node);
2393                 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2394         }
2395
2396         if (!thread && !pending_async)
2397                 thread = binder_select_thread_ilocked(proc);
2398
2399         if (thread)
2400                 binder_enqueue_thread_work_ilocked(thread, &t->work);
2401         else if (!pending_async)
2402                 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2403         else
2404                 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2405
2406         if (!pending_async)
2407                 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2408
2409         proc->outstanding_txns++;
2410         binder_inner_proc_unlock(proc);
2411         binder_node_unlock(node);
2412
2413         return 0;
2414 }
2415
2416 /**
2417  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2418  * @node:         struct binder_node for which to get refs
2419  * @proc:         returns @node->proc if valid
2420  * @error:        if no @proc then returns BR_DEAD_REPLY
2421  *
2422  * User-space normally keeps the node alive when creating a transaction
2423  * since it has a reference to the target. The local strong ref keeps it
2424  * alive if the sending process dies before the target process processes
2425  * the transaction. If the source process is malicious or has a reference
2426  * counting bug, relying on the local strong ref can fail.
2427  *
2428  * Since user-space can cause the local strong ref to go away, we also take
2429  * a tmpref on the node to ensure it survives while we are constructing
2430  * the transaction. We also need a tmpref on the proc while we are
2431  * constructing the transaction, so we take that here as well.
2432  *
2433  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2434  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2435  * target proc has died, @error is set to BR_DEAD_REPLY
2436  */
2437 static struct binder_node *binder_get_node_refs_for_txn(
2438                 struct binder_node *node,
2439                 struct binder_proc **procp,
2440                 uint32_t *error)
2441 {
2442         struct binder_node *target_node = NULL;
2443
2444         binder_node_inner_lock(node);
2445         if (node->proc) {
2446                 target_node = node;
2447                 binder_inc_node_nilocked(node, 1, 0, NULL);
2448                 binder_inc_node_tmpref_ilocked(node);
2449                 node->proc->tmp_ref++;
2450                 *procp = node->proc;
2451         } else
2452                 *error = BR_DEAD_REPLY;
2453         binder_node_inner_unlock(node);
2454
2455         return target_node;
2456 }
2457
2458 static void binder_transaction(struct binder_proc *proc,
2459                                struct binder_thread *thread,
2460                                struct binder_transaction_data *tr, int reply,
2461                                binder_size_t extra_buffers_size)
2462 {
2463         int ret;
2464         struct binder_transaction *t;
2465         struct binder_work *w;
2466         struct binder_work *tcomplete;
2467         binder_size_t buffer_offset = 0;
2468         binder_size_t off_start_offset, off_end_offset;
2469         binder_size_t off_min;
2470         binder_size_t sg_buf_offset, sg_buf_end_offset;
2471         binder_size_t user_offset = 0;
2472         struct binder_proc *target_proc = NULL;
2473         struct binder_thread *target_thread = NULL;
2474         struct binder_node *target_node = NULL;
2475         struct binder_transaction *in_reply_to = NULL;
2476         struct binder_transaction_log_entry *e;
2477         uint32_t return_error = 0;
2478         uint32_t return_error_param = 0;
2479         uint32_t return_error_line = 0;
2480         binder_size_t last_fixup_obj_off = 0;
2481         binder_size_t last_fixup_min_off = 0;
2482         struct binder_context *context = proc->context;
2483         int t_debug_id = atomic_inc_return(&binder_last_id);
2484         char *secctx = NULL;
2485         u32 secctx_sz = 0;
2486         const void __user *user_buffer = (const void __user *)
2487                                 (uintptr_t)tr->data.ptr.buffer;
2488
2489         e = binder_transaction_log_add(&binder_transaction_log);
2490         e->debug_id = t_debug_id;
2491         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2492         e->from_proc = proc->pid;
2493         e->from_thread = thread->pid;
2494         e->target_handle = tr->target.handle;
2495         e->data_size = tr->data_size;
2496         e->offsets_size = tr->offsets_size;
2497         strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2498
2499         if (reply) {
2500                 binder_inner_proc_lock(proc);
2501                 in_reply_to = thread->transaction_stack;
2502                 if (in_reply_to == NULL) {
2503                         binder_inner_proc_unlock(proc);
2504                         binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2505                                           proc->pid, thread->pid);
2506                         return_error = BR_FAILED_REPLY;
2507                         return_error_param = -EPROTO;
2508                         return_error_line = __LINE__;
2509                         goto err_empty_call_stack;
2510                 }
2511                 if (in_reply_to->to_thread != thread) {
2512                         spin_lock(&in_reply_to->lock);
2513                         binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2514                                 proc->pid, thread->pid, in_reply_to->debug_id,
2515                                 in_reply_to->to_proc ?
2516                                 in_reply_to->to_proc->pid : 0,
2517                                 in_reply_to->to_thread ?
2518                                 in_reply_to->to_thread->pid : 0);
2519                         spin_unlock(&in_reply_to->lock);
2520                         binder_inner_proc_unlock(proc);
2521                         return_error = BR_FAILED_REPLY;
2522                         return_error_param = -EPROTO;
2523                         return_error_line = __LINE__;
2524                         in_reply_to = NULL;
2525                         goto err_bad_call_stack;
2526                 }
2527                 thread->transaction_stack = in_reply_to->to_parent;
2528                 binder_inner_proc_unlock(proc);
2529                 binder_set_nice(in_reply_to->saved_priority);
2530                 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2531                 if (target_thread == NULL) {
2532                         /* annotation for sparse */
2533                         __release(&target_thread->proc->inner_lock);
2534                         return_error = BR_DEAD_REPLY;
2535                         return_error_line = __LINE__;
2536                         goto err_dead_binder;
2537                 }
2538                 if (target_thread->transaction_stack != in_reply_to) {
2539                         binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2540                                 proc->pid, thread->pid,
2541                                 target_thread->transaction_stack ?
2542                                 target_thread->transaction_stack->debug_id : 0,
2543                                 in_reply_to->debug_id);
2544                         binder_inner_proc_unlock(target_thread->proc);
2545                         return_error = BR_FAILED_REPLY;
2546                         return_error_param = -EPROTO;
2547                         return_error_line = __LINE__;
2548                         in_reply_to = NULL;
2549                         target_thread = NULL;
2550                         goto err_dead_binder;
2551                 }
2552                 target_proc = target_thread->proc;
2553                 target_proc->tmp_ref++;
2554                 binder_inner_proc_unlock(target_thread->proc);
2555         } else {
2556                 if (tr->target.handle) {
2557                         struct binder_ref *ref;
2558
2559                         /*
2560                          * There must already be a strong ref
2561                          * on this node. If so, do a strong
2562                          * increment on the node to ensure it
2563                          * stays alive until the transaction is
2564                          * done.
2565                          */
2566                         binder_proc_lock(proc);
2567                         ref = binder_get_ref_olocked(proc, tr->target.handle,
2568                                                      true);
2569                         if (ref) {
2570                                 target_node = binder_get_node_refs_for_txn(
2571                                                 ref->node, &target_proc,
2572                                                 &return_error);
2573                         } else {
2574                                 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2575                                                   proc->pid, thread->pid, tr->target.handle);
2576                                 return_error = BR_FAILED_REPLY;
2577                         }
2578                         binder_proc_unlock(proc);
2579                 } else {
2580                         mutex_lock(&context->context_mgr_node_lock);
2581                         target_node = context->binder_context_mgr_node;
2582                         if (target_node)
2583                                 target_node = binder_get_node_refs_for_txn(
2584                                                 target_node, &target_proc,
2585                                                 &return_error);
2586                         else
2587                                 return_error = BR_DEAD_REPLY;
2588                         mutex_unlock(&context->context_mgr_node_lock);
2589                         if (target_node && target_proc->pid == proc->pid) {
2590                                 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2591                                                   proc->pid, thread->pid);
2592                                 return_error = BR_FAILED_REPLY;
2593                                 return_error_param = -EINVAL;
2594                                 return_error_line = __LINE__;
2595                                 goto err_invalid_target_handle;
2596                         }
2597                 }
2598                 if (!target_node) {
2599                         /*
2600                          * return_error is set above
2601                          */
2602                         return_error_param = -EINVAL;
2603                         return_error_line = __LINE__;
2604                         goto err_dead_binder;
2605                 }
2606                 e->to_node = target_node->debug_id;
2607                 if (WARN_ON(proc == target_proc)) {
2608                         return_error = BR_FAILED_REPLY;
2609                         return_error_param = -EINVAL;
2610                         return_error_line = __LINE__;
2611                         goto err_invalid_target_handle;
2612                 }
2613                 if (security_binder_transaction(proc->cred,
2614                                                 target_proc->cred) < 0) {
2615                         return_error = BR_FAILED_REPLY;
2616                         return_error_param = -EPERM;
2617                         return_error_line = __LINE__;
2618                         goto err_invalid_target_handle;
2619                 }
2620                 binder_inner_proc_lock(proc);
2621
2622                 w = list_first_entry_or_null(&thread->todo,
2623                                              struct binder_work, entry);
2624                 if (!(tr->flags & TF_ONE_WAY) && w &&
2625                     w->type == BINDER_WORK_TRANSACTION) {
2626                         /*
2627                          * Do not allow new outgoing transaction from a
2628                          * thread that has a transaction at the head of
2629                          * its todo list. Only need to check the head
2630                          * because binder_select_thread_ilocked picks a
2631                          * thread from proc->waiting_threads to enqueue
2632                          * the transaction, and nothing is queued to the
2633                          * todo list while the thread is on waiting_threads.
2634                          */
2635                         binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2636                                           proc->pid, thread->pid);
2637                         binder_inner_proc_unlock(proc);
2638                         return_error = BR_FAILED_REPLY;
2639                         return_error_param = -EPROTO;
2640                         return_error_line = __LINE__;
2641                         goto err_bad_todo_list;
2642                 }
2643
2644                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2645                         struct binder_transaction *tmp;
2646
2647                         tmp = thread->transaction_stack;
2648                         if (tmp->to_thread != thread) {
2649                                 spin_lock(&tmp->lock);
2650                                 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2651                                         proc->pid, thread->pid, tmp->debug_id,
2652                                         tmp->to_proc ? tmp->to_proc->pid : 0,
2653                                         tmp->to_thread ?
2654                                         tmp->to_thread->pid : 0);
2655                                 spin_unlock(&tmp->lock);
2656                                 binder_inner_proc_unlock(proc);
2657                                 return_error = BR_FAILED_REPLY;
2658                                 return_error_param = -EPROTO;
2659                                 return_error_line = __LINE__;
2660                                 goto err_bad_call_stack;
2661                         }
2662                         while (tmp) {
2663                                 struct binder_thread *from;
2664
2665                                 spin_lock(&tmp->lock);
2666                                 from = tmp->from;
2667                                 if (from && from->proc == target_proc) {
2668                                         atomic_inc(&from->tmp_ref);
2669                                         target_thread = from;
2670                                         spin_unlock(&tmp->lock);
2671                                         break;
2672                                 }
2673                                 spin_unlock(&tmp->lock);
2674                                 tmp = tmp->from_parent;
2675                         }
2676                 }
2677                 binder_inner_proc_unlock(proc);
2678         }
2679         if (target_thread)
2680                 e->to_thread = target_thread->pid;
2681         e->to_proc = target_proc->pid;
2682
2683         /* TODO: reuse incoming transaction for reply */
2684         t = kzalloc(sizeof(*t), GFP_KERNEL);
2685         if (t == NULL) {
2686                 return_error = BR_FAILED_REPLY;
2687                 return_error_param = -ENOMEM;
2688                 return_error_line = __LINE__;
2689                 goto err_alloc_t_failed;
2690         }
2691         INIT_LIST_HEAD(&t->fd_fixups);
2692         binder_stats_created(BINDER_STAT_TRANSACTION);
2693         spin_lock_init(&t->lock);
2694
2695         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2696         if (tcomplete == NULL) {
2697                 return_error = BR_FAILED_REPLY;
2698                 return_error_param = -ENOMEM;
2699                 return_error_line = __LINE__;
2700                 goto err_alloc_tcomplete_failed;
2701         }
2702         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2703
2704         t->debug_id = t_debug_id;
2705
2706         if (reply)
2707                 binder_debug(BINDER_DEBUG_TRANSACTION,
2708                              "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2709                              proc->pid, thread->pid, t->debug_id,
2710                              target_proc->pid, target_thread->pid,
2711                              (u64)tr->data.ptr.buffer,
2712                              (u64)tr->data.ptr.offsets,
2713                              (u64)tr->data_size, (u64)tr->offsets_size,
2714                              (u64)extra_buffers_size);
2715         else
2716                 binder_debug(BINDER_DEBUG_TRANSACTION,
2717                              "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2718                              proc->pid, thread->pid, t->debug_id,
2719                              target_proc->pid, target_node->debug_id,
2720                              (u64)tr->data.ptr.buffer,
2721                              (u64)tr->data.ptr.offsets,
2722                              (u64)tr->data_size, (u64)tr->offsets_size,
2723                              (u64)extra_buffers_size);
2724
2725         if (!reply && !(tr->flags & TF_ONE_WAY))
2726                 t->from = thread;
2727         else
2728                 t->from = NULL;
2729         t->sender_euid = task_euid(proc->tsk);
2730         t->to_proc = target_proc;
2731         t->to_thread = target_thread;
2732         t->code = tr->code;
2733         t->flags = tr->flags;
2734         t->priority = task_nice(current);
2735
2736         if (target_node && target_node->txn_security_ctx) {
2737                 u32 secid;
2738                 size_t added_size;
2739
2740                 security_cred_getsecid(proc->cred, &secid);
2741                 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
2742                 if (ret) {
2743                         return_error = BR_FAILED_REPLY;
2744                         return_error_param = ret;
2745                         return_error_line = __LINE__;
2746                         goto err_get_secctx_failed;
2747                 }
2748                 added_size = ALIGN(secctx_sz, sizeof(u64));
2749                 extra_buffers_size += added_size;
2750                 if (extra_buffers_size < added_size) {
2751                         /* integer overflow of extra_buffers_size */
2752                         return_error = BR_FAILED_REPLY;
2753                         return_error_param = -EINVAL;
2754                         return_error_line = __LINE__;
2755                         goto err_bad_extra_size;
2756                 }
2757         }
2758
2759         trace_binder_transaction(reply, t, target_node);
2760
2761         t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2762                 tr->offsets_size, extra_buffers_size,
2763                 !reply && (t->flags & TF_ONE_WAY), current->tgid);
2764         if (IS_ERR(t->buffer)) {
2765                 /*
2766                  * -ESRCH indicates VMA cleared. The target is dying.
2767                  */
2768                 return_error_param = PTR_ERR(t->buffer);
2769                 return_error = return_error_param == -ESRCH ?
2770                         BR_DEAD_REPLY : BR_FAILED_REPLY;
2771                 return_error_line = __LINE__;
2772                 t->buffer = NULL;
2773                 goto err_binder_alloc_buf_failed;
2774         }
2775         if (secctx) {
2776                 int err;
2777                 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
2778                                     ALIGN(tr->offsets_size, sizeof(void *)) +
2779                                     ALIGN(extra_buffers_size, sizeof(void *)) -
2780                                     ALIGN(secctx_sz, sizeof(u64));
2781
2782                 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
2783                 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
2784                                                   t->buffer, buf_offset,
2785                                                   secctx, secctx_sz);
2786                 if (err) {
2787                         t->security_ctx = 0;
2788                         WARN_ON(1);
2789                 }
2790                 security_release_secctx(secctx, secctx_sz);
2791                 secctx = NULL;
2792         }
2793         t->buffer->debug_id = t->debug_id;
2794         t->buffer->transaction = t;
2795         t->buffer->target_node = target_node;
2796         t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
2797         trace_binder_transaction_alloc_buf(t->buffer);
2798
2799         if (binder_alloc_copy_user_to_buffer(
2800                                 &target_proc->alloc,
2801                                 t->buffer,
2802                                 ALIGN(tr->data_size, sizeof(void *)),
2803                                 (const void __user *)
2804                                         (uintptr_t)tr->data.ptr.offsets,
2805                                 tr->offsets_size)) {
2806                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2807                                 proc->pid, thread->pid);
2808                 return_error = BR_FAILED_REPLY;
2809                 return_error_param = -EFAULT;
2810                 return_error_line = __LINE__;
2811                 goto err_copy_data_failed;
2812         }
2813         if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2814                 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2815                                 proc->pid, thread->pid, (u64)tr->offsets_size);
2816                 return_error = BR_FAILED_REPLY;
2817                 return_error_param = -EINVAL;
2818                 return_error_line = __LINE__;
2819                 goto err_bad_offset;
2820         }
2821         if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2822                 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2823                                   proc->pid, thread->pid,
2824                                   (u64)extra_buffers_size);
2825                 return_error = BR_FAILED_REPLY;
2826                 return_error_param = -EINVAL;
2827                 return_error_line = __LINE__;
2828                 goto err_bad_offset;
2829         }
2830         off_start_offset = ALIGN(tr->data_size, sizeof(void *));
2831         buffer_offset = off_start_offset;
2832         off_end_offset = off_start_offset + tr->offsets_size;
2833         sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
2834         sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
2835                 ALIGN(secctx_sz, sizeof(u64));
2836         off_min = 0;
2837         for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2838              buffer_offset += sizeof(binder_size_t)) {
2839                 struct binder_object_header *hdr;
2840                 size_t object_size;
2841                 struct binder_object object;
2842                 binder_size_t object_offset;
2843                 binder_size_t copy_size;
2844
2845                 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
2846                                                   &object_offset,
2847                                                   t->buffer,
2848                                                   buffer_offset,
2849                                                   sizeof(object_offset))) {
2850                         return_error = BR_FAILED_REPLY;
2851                         return_error_param = -EINVAL;
2852                         return_error_line = __LINE__;
2853                         goto err_bad_offset;
2854                 }
2855
2856                 /*
2857                  * Copy the source user buffer up to the next object
2858                  * that will be processed.
2859                  */
2860                 copy_size = object_offset - user_offset;
2861                 if (copy_size && (user_offset > object_offset ||
2862                                 binder_alloc_copy_user_to_buffer(
2863                                         &target_proc->alloc,
2864                                         t->buffer, user_offset,
2865                                         user_buffer + user_offset,
2866                                         copy_size))) {
2867                         binder_user_error("%d:%d got transaction with invalid data ptr\n",
2868                                         proc->pid, thread->pid);
2869                         return_error = BR_FAILED_REPLY;
2870                         return_error_param = -EFAULT;
2871                         return_error_line = __LINE__;
2872                         goto err_copy_data_failed;
2873                 }
2874                 object_size = binder_get_object(target_proc, user_buffer,
2875                                 t->buffer, object_offset, &object);
2876                 if (object_size == 0 || object_offset < off_min) {
2877                         binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2878                                           proc->pid, thread->pid,
2879                                           (u64)object_offset,
2880                                           (u64)off_min,
2881                                           (u64)t->buffer->data_size);
2882                         return_error = BR_FAILED_REPLY;
2883                         return_error_param = -EINVAL;
2884                         return_error_line = __LINE__;
2885                         goto err_bad_offset;
2886                 }
2887                 /*
2888                  * Set offset to the next buffer fragment to be
2889                  * copied
2890                  */
2891                 user_offset = object_offset + object_size;
2892
2893                 hdr = &object.hdr;
2894                 off_min = object_offset + object_size;
2895                 switch (hdr->type) {
2896                 case BINDER_TYPE_BINDER:
2897                 case BINDER_TYPE_WEAK_BINDER: {
2898                         struct flat_binder_object *fp;
2899
2900                         fp = to_flat_binder_object(hdr);
2901                         ret = binder_translate_binder(fp, t, thread);
2902
2903                         if (ret < 0 ||
2904                             binder_alloc_copy_to_buffer(&target_proc->alloc,
2905                                                         t->buffer,
2906                                                         object_offset,
2907                                                         fp, sizeof(*fp))) {
2908                                 return_error = BR_FAILED_REPLY;
2909                                 return_error_param = ret;
2910                                 return_error_line = __LINE__;
2911                                 goto err_translate_failed;
2912                         }
2913                 } break;
2914                 case BINDER_TYPE_HANDLE:
2915                 case BINDER_TYPE_WEAK_HANDLE: {
2916                         struct flat_binder_object *fp;
2917
2918                         fp = to_flat_binder_object(hdr);
2919                         ret = binder_translate_handle(fp, t, thread);
2920                         if (ret < 0 ||
2921                             binder_alloc_copy_to_buffer(&target_proc->alloc,
2922                                                         t->buffer,
2923                                                         object_offset,
2924                                                         fp, sizeof(*fp))) {
2925                                 return_error = BR_FAILED_REPLY;
2926                                 return_error_param = ret;
2927                                 return_error_line = __LINE__;
2928                                 goto err_translate_failed;
2929                         }
2930                 } break;
2931
2932                 case BINDER_TYPE_FD: {
2933                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
2934                         binder_size_t fd_offset = object_offset +
2935                                 (uintptr_t)&fp->fd - (uintptr_t)fp;
2936                         int ret = binder_translate_fd(fp->fd, fd_offset, t,
2937                                                       thread, in_reply_to);
2938
2939                         fp->pad_binder = 0;
2940                         if (ret < 0 ||
2941                             binder_alloc_copy_to_buffer(&target_proc->alloc,
2942                                                         t->buffer,
2943                                                         object_offset,
2944                                                         fp, sizeof(*fp))) {
2945                                 return_error = BR_FAILED_REPLY;
2946                                 return_error_param = ret;
2947                                 return_error_line = __LINE__;
2948                                 goto err_translate_failed;
2949                         }
2950                 } break;
2951                 case BINDER_TYPE_FDA: {
2952                         struct binder_object ptr_object;
2953                         binder_size_t parent_offset;
2954                         struct binder_fd_array_object *fda =
2955                                 to_binder_fd_array_object(hdr);
2956                         size_t num_valid = (buffer_offset - off_start_offset) /
2957                                                 sizeof(binder_size_t);
2958                         struct binder_buffer_object *parent =
2959                                 binder_validate_ptr(target_proc, t->buffer,
2960                                                     &ptr_object, fda->parent,
2961                                                     off_start_offset,
2962                                                     &parent_offset,
2963                                                     num_valid);
2964                         if (!parent) {
2965                                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2966                                                   proc->pid, thread->pid);
2967                                 return_error = BR_FAILED_REPLY;
2968                                 return_error_param = -EINVAL;
2969                                 return_error_line = __LINE__;
2970                                 goto err_bad_parent;
2971                         }
2972                         if (!binder_validate_fixup(target_proc, t->buffer,
2973                                                    off_start_offset,
2974                                                    parent_offset,
2975                                                    fda->parent_offset,
2976                                                    last_fixup_obj_off,
2977                                                    last_fixup_min_off)) {
2978                                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2979                                                   proc->pid, thread->pid);
2980                                 return_error = BR_FAILED_REPLY;
2981                                 return_error_param = -EINVAL;
2982                                 return_error_line = __LINE__;
2983                                 goto err_bad_parent;
2984                         }
2985                         ret = binder_translate_fd_array(fda, parent, t, thread,
2986                                                         in_reply_to);
2987                         if (!ret)
2988                                 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
2989                                                                   t->buffer,
2990                                                                   object_offset,
2991                                                                   fda, sizeof(*fda));
2992                         if (ret) {
2993                                 return_error = BR_FAILED_REPLY;
2994                                 return_error_param = ret > 0 ? -EINVAL : ret;
2995                                 return_error_line = __LINE__;
2996                                 goto err_translate_failed;
2997                         }
2998                         last_fixup_obj_off = parent_offset;
2999                         last_fixup_min_off =
3000                                 fda->parent_offset + sizeof(u32) * fda->num_fds;
3001                 } break;
3002                 case BINDER_TYPE_PTR: {
3003                         struct binder_buffer_object *bp =
3004                                 to_binder_buffer_object(hdr);
3005                         size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3006                         size_t num_valid;
3007
3008                         if (bp->length > buf_left) {
3009                                 binder_user_error("%d:%d got transaction with too large buffer\n",
3010                                                   proc->pid, thread->pid);
3011                                 return_error = BR_FAILED_REPLY;
3012                                 return_error_param = -EINVAL;
3013                                 return_error_line = __LINE__;
3014                                 goto err_bad_offset;
3015                         }
3016                         if (binder_alloc_copy_user_to_buffer(
3017                                                 &target_proc->alloc,
3018                                                 t->buffer,
3019                                                 sg_buf_offset,
3020                                                 (const void __user *)
3021                                                         (uintptr_t)bp->buffer,
3022                                                 bp->length)) {
3023                                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3024                                                   proc->pid, thread->pid);
3025                                 return_error_param = -EFAULT;
3026                                 return_error = BR_FAILED_REPLY;
3027                                 return_error_line = __LINE__;
3028                                 goto err_copy_data_failed;
3029                         }
3030                         /* Fixup buffer pointer to target proc address space */
3031                         bp->buffer = (uintptr_t)
3032                                 t->buffer->user_data + sg_buf_offset;
3033                         sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3034
3035                         num_valid = (buffer_offset - off_start_offset) /
3036                                         sizeof(binder_size_t);
3037                         ret = binder_fixup_parent(t, thread, bp,
3038                                                   off_start_offset,
3039                                                   num_valid,
3040                                                   last_fixup_obj_off,
3041                                                   last_fixup_min_off);
3042                         if (ret < 0 ||
3043                             binder_alloc_copy_to_buffer(&target_proc->alloc,
3044                                                         t->buffer,
3045                                                         object_offset,
3046                                                         bp, sizeof(*bp))) {
3047                                 return_error = BR_FAILED_REPLY;
3048                                 return_error_param = ret;
3049                                 return_error_line = __LINE__;
3050                                 goto err_translate_failed;
3051                         }
3052                         last_fixup_obj_off = object_offset;
3053                         last_fixup_min_off = 0;
3054                 } break;
3055                 default:
3056                         binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3057                                 proc->pid, thread->pid, hdr->type);
3058                         return_error = BR_FAILED_REPLY;
3059                         return_error_param = -EINVAL;
3060                         return_error_line = __LINE__;
3061                         goto err_bad_object_type;
3062                 }
3063         }
3064         /* Done processing objects, copy the rest of the buffer */
3065         if (binder_alloc_copy_user_to_buffer(
3066                                 &target_proc->alloc,
3067                                 t->buffer, user_offset,
3068                                 user_buffer + user_offset,
3069                                 tr->data_size - user_offset)) {
3070                 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3071                                 proc->pid, thread->pid);
3072                 return_error = BR_FAILED_REPLY;
3073                 return_error_param = -EFAULT;
3074                 return_error_line = __LINE__;
3075                 goto err_copy_data_failed;
3076         }
3077         if (t->buffer->oneway_spam_suspect)
3078                 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3079         else
3080                 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3081         t->work.type = BINDER_WORK_TRANSACTION;
3082
3083         if (reply) {
3084                 binder_enqueue_thread_work(thread, tcomplete);
3085                 binder_inner_proc_lock(target_proc);
3086                 if (target_thread->is_dead) {
3087                         return_error = BR_DEAD_REPLY;
3088                         binder_inner_proc_unlock(target_proc);
3089                         goto err_dead_proc_or_thread;
3090                 }
3091                 BUG_ON(t->buffer->async_transaction != 0);
3092                 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3093                 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3094                 target_proc->outstanding_txns++;
3095                 binder_inner_proc_unlock(target_proc);
3096                 wake_up_interruptible_sync(&target_thread->wait);
3097                 binder_free_transaction(in_reply_to);
3098         } else if (!(t->flags & TF_ONE_WAY)) {
3099                 BUG_ON(t->buffer->async_transaction != 0);
3100                 binder_inner_proc_lock(proc);
3101                 /*
3102                  * Defer the TRANSACTION_COMPLETE, so we don't return to
3103                  * userspace immediately; this allows the target process to
3104                  * immediately start processing this transaction, reducing
3105                  * latency. We will then return the TRANSACTION_COMPLETE when
3106                  * the target replies (or there is an error).
3107                  */
3108                 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3109                 t->need_reply = 1;
3110                 t->from_parent = thread->transaction_stack;
3111                 thread->transaction_stack = t;
3112                 binder_inner_proc_unlock(proc);
3113                 return_error = binder_proc_transaction(t,
3114                                 target_proc, target_thread);
3115                 if (return_error) {
3116                         binder_inner_proc_lock(proc);
3117                         binder_pop_transaction_ilocked(thread, t);
3118                         binder_inner_proc_unlock(proc);
3119                         goto err_dead_proc_or_thread;
3120                 }
3121         } else {
3122                 BUG_ON(target_node == NULL);
3123                 BUG_ON(t->buffer->async_transaction != 1);
3124                 binder_enqueue_thread_work(thread, tcomplete);
3125                 return_error = binder_proc_transaction(t, target_proc, NULL);
3126                 if (return_error)
3127                         goto err_dead_proc_or_thread;
3128         }
3129         if (target_thread)
3130                 binder_thread_dec_tmpref(target_thread);
3131         binder_proc_dec_tmpref(target_proc);
3132         if (target_node)
3133                 binder_dec_node_tmpref(target_node);
3134         /*
3135          * write barrier to synchronize with initialization
3136          * of log entry
3137          */
3138         smp_wmb();
3139         WRITE_ONCE(e->debug_id_done, t_debug_id);
3140         return;
3141
3142 err_dead_proc_or_thread:
3143         return_error_line = __LINE__;
3144         binder_dequeue_work(proc, tcomplete);
3145 err_translate_failed:
3146 err_bad_object_type:
3147 err_bad_offset:
3148 err_bad_parent:
3149 err_copy_data_failed:
3150         binder_free_txn_fixups(t);
3151         trace_binder_transaction_failed_buffer_release(t->buffer);
3152         binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3153                                           buffer_offset, true);
3154         if (target_node)
3155                 binder_dec_node_tmpref(target_node);
3156         target_node = NULL;
3157         t->buffer->transaction = NULL;
3158         binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3159 err_binder_alloc_buf_failed:
3160 err_bad_extra_size:
3161         if (secctx)
3162                 security_release_secctx(secctx, secctx_sz);
3163 err_get_secctx_failed:
3164         kfree(tcomplete);
3165         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3166 err_alloc_tcomplete_failed:
3167         if (trace_binder_txn_latency_free_enabled())
3168                 binder_txn_latency_free(t);
3169         kfree(t);
3170         binder_stats_deleted(BINDER_STAT_TRANSACTION);
3171 err_alloc_t_failed:
3172 err_bad_todo_list:
3173 err_bad_call_stack:
3174 err_empty_call_stack:
3175 err_dead_binder:
3176 err_invalid_target_handle:
3177         if (target_thread)
3178                 binder_thread_dec_tmpref(target_thread);
3179         if (target_proc)
3180                 binder_proc_dec_tmpref(target_proc);
3181         if (target_node) {
3182                 binder_dec_node(target_node, 1, 0);
3183                 binder_dec_node_tmpref(target_node);
3184         }
3185
3186         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3187                      "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3188                      proc->pid, thread->pid, return_error, return_error_param,
3189                      (u64)tr->data_size, (u64)tr->offsets_size,
3190                      return_error_line);
3191
3192         {
3193                 struct binder_transaction_log_entry *fe;
3194
3195                 e->return_error = return_error;
3196                 e->return_error_param = return_error_param;
3197                 e->return_error_line = return_error_line;
3198                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3199                 *fe = *e;
3200                 /*
3201                  * write barrier to synchronize with initialization
3202                  * of log entry
3203                  */
3204                 smp_wmb();
3205                 WRITE_ONCE(e->debug_id_done, t_debug_id);
3206                 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3207         }
3208
3209         BUG_ON(thread->return_error.cmd != BR_OK);
3210         if (in_reply_to) {
3211                 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3212                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3213                 binder_send_failed_reply(in_reply_to, return_error);
3214         } else {
3215                 thread->return_error.cmd = return_error;
3216                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3217         }
3218 }
3219
3220 /**
3221  * binder_free_buf() - free the specified buffer
3222  * @proc:       binder proc that owns buffer
3223  * @buffer:     buffer to be freed
3224  * @is_failure: failed to send transaction
3225  *
3226  * If buffer for an async transaction, enqueue the next async
3227  * transaction from the node.
3228  *
3229  * Cleanup buffer and free it.
3230  */
3231 static void
3232 binder_free_buf(struct binder_proc *proc,
3233                 struct binder_thread *thread,
3234                 struct binder_buffer *buffer, bool is_failure)
3235 {
3236         binder_inner_proc_lock(proc);
3237         if (buffer->transaction) {
3238                 buffer->transaction->buffer = NULL;
3239                 buffer->transaction = NULL;
3240         }
3241         binder_inner_proc_unlock(proc);
3242         if (buffer->async_transaction && buffer->target_node) {
3243                 struct binder_node *buf_node;
3244                 struct binder_work *w;
3245
3246                 buf_node = buffer->target_node;
3247                 binder_node_inner_lock(buf_node);
3248                 BUG_ON(!buf_node->has_async_transaction);
3249                 BUG_ON(buf_node->proc != proc);
3250                 w = binder_dequeue_work_head_ilocked(
3251                                 &buf_node->async_todo);
3252                 if (!w) {
3253                         buf_node->has_async_transaction = false;
3254                 } else {
3255                         binder_enqueue_work_ilocked(
3256                                         w, &proc->todo);
3257                         binder_wakeup_proc_ilocked(proc);
3258                 }
3259                 binder_node_inner_unlock(buf_node);
3260         }
3261         trace_binder_transaction_buffer_release(buffer);
3262         binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3263         binder_alloc_free_buf(&proc->alloc, buffer);
3264 }
3265
3266 static int binder_thread_write(struct binder_proc *proc,
3267                         struct binder_thread *thread,
3268                         binder_uintptr_t binder_buffer, size_t size,
3269                         binder_size_t *consumed)
3270 {
3271         uint32_t cmd;
3272         struct binder_context *context = proc->context;
3273         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3274         void __user *ptr = buffer + *consumed;
3275         void __user *end = buffer + size;
3276
3277         while (ptr < end && thread->return_error.cmd == BR_OK) {
3278                 int ret;
3279
3280                 if (get_user(cmd, (uint32_t __user *)ptr))
3281                         return -EFAULT;
3282                 ptr += sizeof(uint32_t);
3283                 trace_binder_command(cmd);
3284                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3285                         atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3286                         atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3287                         atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3288                 }
3289                 switch (cmd) {
3290                 case BC_INCREFS:
3291                 case BC_ACQUIRE:
3292                 case BC_RELEASE:
3293                 case BC_DECREFS: {
3294                         uint32_t target;
3295                         const char *debug_string;
3296                         bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3297                         bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3298                         struct binder_ref_data rdata;
3299
3300                         if (get_user(target, (uint32_t __user *)ptr))
3301                                 return -EFAULT;
3302
3303                         ptr += sizeof(uint32_t);
3304                         ret = -1;
3305                         if (increment && !target) {
3306                                 struct binder_node *ctx_mgr_node;
3307
3308                                 mutex_lock(&context->context_mgr_node_lock);
3309                                 ctx_mgr_node = context->binder_context_mgr_node;
3310                                 if (ctx_mgr_node) {
3311                                         if (ctx_mgr_node->proc == proc) {
3312                                                 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3313                                                                   proc->pid, thread->pid);
3314                                                 mutex_unlock(&context->context_mgr_node_lock);
3315                                                 return -EINVAL;
3316                                         }
3317                                         ret = binder_inc_ref_for_node(
3318                                                         proc, ctx_mgr_node,
3319                                                         strong, NULL, &rdata);
3320                                 }
3321                                 mutex_unlock(&context->context_mgr_node_lock);
3322                         }
3323                         if (ret)
3324                                 ret = binder_update_ref_for_handle(
3325                                                 proc, target, increment, strong,
3326                                                 &rdata);
3327                         if (!ret && rdata.desc != target) {
3328                                 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3329                                         proc->pid, thread->pid,
3330                                         target, rdata.desc);
3331                         }
3332                         switch (cmd) {
3333                         case BC_INCREFS:
3334                                 debug_string = "IncRefs";
3335                                 break;
3336                         case BC_ACQUIRE:
3337                                 debug_string = "Acquire";
3338                                 break;
3339                         case BC_RELEASE:
3340                                 debug_string = "Release";
3341                                 break;
3342                         case BC_DECREFS:
3343                         default:
3344                                 debug_string = "DecRefs";
3345                                 break;
3346                         }
3347                         if (ret) {
3348                                 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3349                                         proc->pid, thread->pid, debug_string,
3350                                         strong, target, ret);
3351                                 break;
3352                         }
3353                         binder_debug(BINDER_DEBUG_USER_REFS,
3354                                      "%d:%d %s ref %d desc %d s %d w %d\n",
3355                                      proc->pid, thread->pid, debug_string,
3356                                      rdata.debug_id, rdata.desc, rdata.strong,
3357                                      rdata.weak);
3358                         break;
3359                 }
3360                 case BC_INCREFS_DONE:
3361                 case BC_ACQUIRE_DONE: {
3362                         binder_uintptr_t node_ptr;
3363                         binder_uintptr_t cookie;
3364                         struct binder_node *node;
3365                         bool free_node;
3366
3367                         if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3368                                 return -EFAULT;
3369                         ptr += sizeof(binder_uintptr_t);
3370                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3371                                 return -EFAULT;
3372                         ptr += sizeof(binder_uintptr_t);
3373                         node = binder_get_node(proc, node_ptr);
3374                         if (node == NULL) {
3375                                 binder_user_error("%d:%d %s u%016llx no match\n",
3376                                         proc->pid, thread->pid,
3377                                         cmd == BC_INCREFS_DONE ?
3378                                         "BC_INCREFS_DONE" :
3379                                         "BC_ACQUIRE_DONE",
3380                                         (u64)node_ptr);
3381                                 break;
3382                         }
3383                         if (cookie != node->cookie) {
3384                                 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3385                                         proc->pid, thread->pid,
3386                                         cmd == BC_INCREFS_DONE ?
3387                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3388                                         (u64)node_ptr, node->debug_id,
3389                                         (u64)cookie, (u64)node->cookie);
3390                                 binder_put_node(node);
3391                                 break;
3392                         }
3393                         binder_node_inner_lock(node);
3394                         if (cmd == BC_ACQUIRE_DONE) {
3395                                 if (node->pending_strong_ref == 0) {
3396                                         binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3397                                                 proc->pid, thread->pid,
3398                                                 node->debug_id);
3399                                         binder_node_inner_unlock(node);
3400                                         binder_put_node(node);
3401                                         break;
3402                                 }
3403                                 node->pending_strong_ref = 0;
3404                         } else {
3405                                 if (node->pending_weak_ref == 0) {
3406                                         binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3407                                                 proc->pid, thread->pid,
3408                                                 node->debug_id);
3409                                         binder_node_inner_unlock(node);
3410                                         binder_put_node(node);
3411                                         break;
3412                                 }
3413                                 node->pending_weak_ref = 0;
3414                         }
3415                         free_node = binder_dec_node_nilocked(node,
3416                                         cmd == BC_ACQUIRE_DONE, 0);
3417                         WARN_ON(free_node);
3418                         binder_debug(BINDER_DEBUG_USER_REFS,
3419                                      "%d:%d %s node %d ls %d lw %d tr %d\n",
3420                                      proc->pid, thread->pid,
3421                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3422                                      node->debug_id, node->local_strong_refs,
3423                                      node->local_weak_refs, node->tmp_refs);
3424                         binder_node_inner_unlock(node);
3425                         binder_put_node(node);
3426                         break;
3427                 }
3428                 case BC_ATTEMPT_ACQUIRE:
3429                         pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3430                         return -EINVAL;
3431                 case BC_ACQUIRE_RESULT:
3432                         pr_err("BC_ACQUIRE_RESULT not supported\n");
3433                         return -EINVAL;
3434
3435                 case BC_FREE_BUFFER: {
3436                         binder_uintptr_t data_ptr;
3437                         struct binder_buffer *buffer;
3438
3439                         if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3440                                 return -EFAULT;
3441                         ptr += sizeof(binder_uintptr_t);
3442
3443                         buffer = binder_alloc_prepare_to_free(&proc->alloc,
3444                                                               data_ptr);
3445                         if (IS_ERR_OR_NULL(buffer)) {
3446                                 if (PTR_ERR(buffer) == -EPERM) {
3447                                         binder_user_error(
3448                                                 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3449                                                 proc->pid, thread->pid,
3450                                                 (u64)data_ptr);
3451                                 } else {
3452                                         binder_user_error(
3453                                                 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3454                                                 proc->pid, thread->pid,
3455                                                 (u64)data_ptr);
3456                                 }
3457                                 break;
3458                         }
3459                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
3460                                      "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3461                                      proc->pid, thread->pid, (u64)data_ptr,
3462                                      buffer->debug_id,
3463                                      buffer->transaction ? "active" : "finished");
3464                         binder_free_buf(proc, thread, buffer, false);
3465                         break;
3466                 }
3467
3468                 case BC_TRANSACTION_SG:
3469                 case BC_REPLY_SG: {
3470                         struct binder_transaction_data_sg tr;
3471
3472                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3473                                 return -EFAULT;
3474                         ptr += sizeof(tr);
3475                         binder_transaction(proc, thread, &tr.transaction_data,
3476                                            cmd == BC_REPLY_SG, tr.buffers_size);
3477                         break;
3478                 }
3479                 case BC_TRANSACTION:
3480                 case BC_REPLY: {
3481                         struct binder_transaction_data tr;
3482
3483                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3484                                 return -EFAULT;
3485                         ptr += sizeof(tr);
3486                         binder_transaction(proc, thread, &tr,
3487                                            cmd == BC_REPLY, 0);
3488                         break;
3489                 }
3490
3491                 case BC_REGISTER_LOOPER:
3492                         binder_debug(BINDER_DEBUG_THREADS,
3493                                      "%d:%d BC_REGISTER_LOOPER\n",
3494                                      proc->pid, thread->pid);
3495                         binder_inner_proc_lock(proc);
3496                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3497                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3498                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3499                                         proc->pid, thread->pid);
3500                         } else if (proc->requested_threads == 0) {
3501                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3502                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3503                                         proc->pid, thread->pid);
3504                         } else {
3505                                 proc->requested_threads--;
3506                                 proc->requested_threads_started++;
3507                         }
3508                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3509                         binder_inner_proc_unlock(proc);
3510                         break;
3511                 case BC_ENTER_LOOPER:
3512                         binder_debug(BINDER_DEBUG_THREADS,
3513                                      "%d:%d BC_ENTER_LOOPER\n",
3514                                      proc->pid, thread->pid);
3515                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3516                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3517                                 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3518                                         proc->pid, thread->pid);
3519                         }
3520                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3521                         break;
3522                 case BC_EXIT_LOOPER:
3523                         binder_debug(BINDER_DEBUG_THREADS,
3524                                      "%d:%d BC_EXIT_LOOPER\n",
3525                                      proc->pid, thread->pid);
3526                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
3527                         break;
3528
3529                 case BC_REQUEST_DEATH_NOTIFICATION:
3530                 case BC_CLEAR_DEATH_NOTIFICATION: {
3531                         uint32_t target;
3532                         binder_uintptr_t cookie;
3533                         struct binder_ref *ref;
3534                         struct binder_ref_death *death = NULL;
3535
3536                         if (get_user(target, (uint32_t __user *)ptr))
3537                                 return -EFAULT;
3538                         ptr += sizeof(uint32_t);
3539                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3540                                 return -EFAULT;
3541                         ptr += sizeof(binder_uintptr_t);
3542                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3543                                 /*
3544                                  * Allocate memory for death notification
3545                                  * before taking lock
3546                                  */
3547                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
3548                                 if (death == NULL) {
3549                                         WARN_ON(thread->return_error.cmd !=
3550                                                 BR_OK);
3551                                         thread->return_error.cmd = BR_ERROR;
3552                                         binder_enqueue_thread_work(
3553                                                 thread,
3554                                                 &thread->return_error.work);
3555                                         binder_debug(
3556                                                 BINDER_DEBUG_FAILED_TRANSACTION,
3557                                                 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3558                                                 proc->pid, thread->pid);
3559                                         break;
3560                                 }
3561                         }
3562                         binder_proc_lock(proc);
3563                         ref = binder_get_ref_olocked(proc, target, false);
3564                         if (ref == NULL) {
3565                                 binder_user_error("%d:%d %s invalid ref %d\n",
3566                                         proc->pid, thread->pid,
3567                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3568                                         "BC_REQUEST_DEATH_NOTIFICATION" :
3569                                         "BC_CLEAR_DEATH_NOTIFICATION",
3570                                         target);
3571                                 binder_proc_unlock(proc);
3572                                 kfree(death);
3573                                 break;
3574                         }
3575
3576                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3577                                      "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3578                                      proc->pid, thread->pid,
3579                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3580                                      "BC_REQUEST_DEATH_NOTIFICATION" :
3581                                      "BC_CLEAR_DEATH_NOTIFICATION",
3582                                      (u64)cookie, ref->data.debug_id,
3583                                      ref->data.desc, ref->data.strong,
3584                                      ref->data.weak, ref->node->debug_id);
3585
3586                         binder_node_lock(ref->node);
3587                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3588                                 if (ref->death) {
3589                                         binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3590                                                 proc->pid, thread->pid);
3591                                         binder_node_unlock(ref->node);
3592                                         binder_proc_unlock(proc);
3593                                         kfree(death);
3594                                         break;
3595                                 }
3596                                 binder_stats_created(BINDER_STAT_DEATH);
3597                                 INIT_LIST_HEAD(&death->work.entry);
3598                                 death->cookie = cookie;
3599                                 ref->death = death;
3600                                 if (ref->node->proc == NULL) {
3601                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3602
3603                                         binder_inner_proc_lock(proc);
3604                                         binder_enqueue_work_ilocked(
3605                                                 &ref->death->work, &proc->todo);
3606                                         binder_wakeup_proc_ilocked(proc);
3607                                         binder_inner_proc_unlock(proc);
3608                                 }
3609                         } else {
3610                                 if (ref->death == NULL) {
3611                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3612                                                 proc->pid, thread->pid);
3613                                         binder_node_unlock(ref->node);
3614                                         binder_proc_unlock(proc);
3615                                         break;
3616                                 }
3617                                 death = ref->death;
3618                                 if (death->cookie != cookie) {
3619                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3620                                                 proc->pid, thread->pid,
3621                                                 (u64)death->cookie,
3622                                                 (u64)cookie);
3623                                         binder_node_unlock(ref->node);
3624                                         binder_proc_unlock(proc);
3625                                         break;
3626                                 }
3627                                 ref->death = NULL;
3628                                 binder_inner_proc_lock(proc);
3629                                 if (list_empty(&death->work.entry)) {
3630                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3631                                         if (thread->looper &
3632                                             (BINDER_LOOPER_STATE_REGISTERED |
3633                                              BINDER_LOOPER_STATE_ENTERED))
3634                                                 binder_enqueue_thread_work_ilocked(
3635                                                                 thread,
3636                                                                 &death->work);
3637                                         else {
3638                                                 binder_enqueue_work_ilocked(
3639                                                                 &death->work,
3640                                                                 &proc->todo);
3641                                                 binder_wakeup_proc_ilocked(
3642                                                                 proc);
3643                                         }
3644                                 } else {
3645                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3646                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3647                                 }
3648                                 binder_inner_proc_unlock(proc);
3649                         }
3650                         binder_node_unlock(ref->node);
3651                         binder_proc_unlock(proc);
3652                 } break;
3653                 case BC_DEAD_BINDER_DONE: {
3654                         struct binder_work *w;
3655                         binder_uintptr_t cookie;
3656                         struct binder_ref_death *death = NULL;
3657
3658                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3659                                 return -EFAULT;
3660
3661                         ptr += sizeof(cookie);
3662                         binder_inner_proc_lock(proc);
3663                         list_for_each_entry(w, &proc->delivered_death,
3664                                             entry) {
3665                                 struct binder_ref_death *tmp_death =
3666                                         container_of(w,
3667                                                      struct binder_ref_death,
3668                                                      work);
3669
3670                                 if (tmp_death->cookie == cookie) {
3671                                         death = tmp_death;
3672                                         break;
3673                                 }
3674                         }
3675                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
3676                                      "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3677                                      proc->pid, thread->pid, (u64)cookie,
3678                                      death);
3679                         if (death == NULL) {
3680                                 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3681                                         proc->pid, thread->pid, (u64)cookie);
3682                                 binder_inner_proc_unlock(proc);
3683                                 break;
3684                         }
3685                         binder_dequeue_work_ilocked(&death->work);
3686                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3687                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3688                                 if (thread->looper &
3689                                         (BINDER_LOOPER_STATE_REGISTERED |
3690                                          BINDER_LOOPER_STATE_ENTERED))
3691                                         binder_enqueue_thread_work_ilocked(
3692                                                 thread, &death->work);
3693                                 else {
3694                                         binder_enqueue_work_ilocked(
3695                                                         &death->work,
3696                                                         &proc->todo);
3697                                         binder_wakeup_proc_ilocked(proc);
3698                                 }
3699                         }
3700                         binder_inner_proc_unlock(proc);
3701                 } break;
3702
3703                 default:
3704                         pr_err("%d:%d unknown command %d\n",
3705                                proc->pid, thread->pid, cmd);
3706                         return -EINVAL;
3707                 }
3708                 *consumed = ptr - buffer;
3709         }
3710         return 0;
3711 }
3712
3713 static void binder_stat_br(struct binder_proc *proc,
3714                            struct binder_thread *thread, uint32_t cmd)
3715 {
3716         trace_binder_return(cmd);
3717         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3718                 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3719                 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3720                 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3721         }
3722 }
3723
3724 static int binder_put_node_cmd(struct binder_proc *proc,
3725                                struct binder_thread *thread,
3726                                void __user **ptrp,
3727                                binder_uintptr_t node_ptr,
3728                                binder_uintptr_t node_cookie,
3729                                int node_debug_id,
3730                                uint32_t cmd, const char *cmd_name)
3731 {
3732         void __user *ptr = *ptrp;
3733
3734         if (put_user(cmd, (uint32_t __user *)ptr))
3735                 return -EFAULT;
3736         ptr += sizeof(uint32_t);
3737
3738         if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3739                 return -EFAULT;
3740         ptr += sizeof(binder_uintptr_t);
3741
3742         if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3743                 return -EFAULT;
3744         ptr += sizeof(binder_uintptr_t);
3745
3746         binder_stat_br(proc, thread, cmd);
3747         binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3748                      proc->pid, thread->pid, cmd_name, node_debug_id,
3749                      (u64)node_ptr, (u64)node_cookie);
3750
3751         *ptrp = ptr;
3752         return 0;
3753 }
3754
3755 static int binder_wait_for_work(struct binder_thread *thread,
3756                                 bool do_proc_work)
3757 {
3758         DEFINE_WAIT(wait);
3759         struct binder_proc *proc = thread->proc;
3760         int ret = 0;
3761
3762         freezer_do_not_count();
3763         binder_inner_proc_lock(proc);
3764         for (;;) {
3765                 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3766                 if (binder_has_work_ilocked(thread, do_proc_work))
3767                         break;
3768                 if (do_proc_work)
3769                         list_add(&thread->waiting_thread_node,
3770                                  &proc->waiting_threads);
3771                 binder_inner_proc_unlock(proc);
3772                 schedule();
3773                 binder_inner_proc_lock(proc);
3774                 list_del_init(&thread->waiting_thread_node);
3775                 if (signal_pending(current)) {
3776                         ret = -EINTR;
3777                         break;
3778                 }
3779         }
3780         finish_wait(&thread->wait, &wait);
3781         binder_inner_proc_unlock(proc);
3782         freezer_count();
3783
3784         return ret;
3785 }
3786
3787 /**
3788  * binder_apply_fd_fixups() - finish fd translation
3789  * @proc:         binder_proc associated @t->buffer
3790  * @t:  binder transaction with list of fd fixups
3791  *
3792  * Now that we are in the context of the transaction target
3793  * process, we can allocate and install fds. Process the
3794  * list of fds to translate and fixup the buffer with the
3795  * new fds.
3796  *
3797  * If we fail to allocate an fd, then free the resources by
3798  * fput'ing files that have not been processed and ksys_close'ing
3799  * any fds that have already been allocated.
3800  */
3801 static int binder_apply_fd_fixups(struct binder_proc *proc,
3802                                   struct binder_transaction *t)
3803 {
3804         struct binder_txn_fd_fixup *fixup, *tmp;
3805         int ret = 0;
3806
3807         list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3808                 int fd = get_unused_fd_flags(O_CLOEXEC);
3809
3810                 if (fd < 0) {
3811                         binder_debug(BINDER_DEBUG_TRANSACTION,
3812                                      "failed fd fixup txn %d fd %d\n",
3813                                      t->debug_id, fd);
3814                         ret = -ENOMEM;
3815                         break;
3816                 }
3817                 binder_debug(BINDER_DEBUG_TRANSACTION,
3818                              "fd fixup txn %d fd %d\n",
3819                              t->debug_id, fd);
3820                 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3821                 fd_install(fd, fixup->file);
3822                 fixup->file = NULL;
3823                 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
3824                                                 fixup->offset, &fd,
3825                                                 sizeof(u32))) {
3826                         ret = -EINVAL;
3827                         break;
3828                 }
3829         }
3830         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3831                 if (fixup->file) {
3832                         fput(fixup->file);
3833                 } else if (ret) {
3834                         u32 fd;
3835                         int err;
3836
3837                         err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
3838                                                             t->buffer,
3839                                                             fixup->offset,
3840                                                             sizeof(fd));
3841                         WARN_ON(err);
3842                         if (!err)
3843                                 binder_deferred_fd_close(fd);
3844                 }
3845                 list_del(&fixup->fixup_entry);
3846                 kfree(fixup);
3847         }
3848
3849         return ret;
3850 }
3851
3852 static int binder_thread_read(struct binder_proc *proc,
3853                               struct binder_thread *thread,
3854                               binder_uintptr_t binder_buffer, size_t size,
3855                               binder_size_t *consumed, int non_block)
3856 {
3857         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3858         void __user *ptr = buffer + *consumed;
3859         void __user *end = buffer + size;
3860
3861         int ret = 0;
3862         int wait_for_proc_work;
3863
3864         if (*consumed == 0) {
3865                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3866                         return -EFAULT;
3867                 ptr += sizeof(uint32_t);
3868         }
3869
3870 retry:
3871         binder_inner_proc_lock(proc);
3872         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3873         binder_inner_proc_unlock(proc);
3874
3875         thread->looper |= BINDER_LOOPER_STATE_WAITING;
3876
3877         trace_binder_wait_for_work(wait_for_proc_work,
3878                                    !!thread->transaction_stack,
3879                                    !binder_worklist_empty(proc, &thread->todo));
3880         if (wait_for_proc_work) {
3881                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3882                                         BINDER_LOOPER_STATE_ENTERED))) {
3883                         binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3884                                 proc->pid, thread->pid, thread->looper);
3885                         wait_event_interruptible(binder_user_error_wait,
3886                                                  binder_stop_on_user_error < 2);
3887                 }
3888                 binder_set_nice(proc->default_priority);
3889         }
3890
3891         if (non_block) {
3892                 if (!binder_has_work(thread, wait_for_proc_work))
3893                         ret = -EAGAIN;
3894         } else {
3895                 ret = binder_wait_for_work(thread, wait_for_proc_work);
3896         }
3897
3898         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3899
3900         if (ret)
3901                 return ret;
3902
3903         while (1) {
3904                 uint32_t cmd;
3905                 struct binder_transaction_data_secctx tr;
3906                 struct binder_transaction_data *trd = &tr.transaction_data;
3907                 struct binder_work *w = NULL;
3908                 struct list_head *list = NULL;
3909                 struct binder_transaction *t = NULL;
3910                 struct binder_thread *t_from;
3911                 size_t trsize = sizeof(*trd);
3912
3913                 binder_inner_proc_lock(proc);
3914                 if (!binder_worklist_empty_ilocked(&thread->todo))
3915                         list = &thread->todo;
3916                 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3917                            wait_for_proc_work)
3918                         list = &proc->todo;
3919                 else {
3920                         binder_inner_proc_unlock(proc);
3921
3922                         /* no data added */
3923                         if (ptr - buffer == 4 && !thread->looper_need_return)
3924                                 goto retry;
3925                         break;
3926                 }
3927
3928                 if (end - ptr < sizeof(tr) + 4) {
3929                         binder_inner_proc_unlock(proc);
3930                         break;
3931                 }
3932                 w = binder_dequeue_work_head_ilocked(list);
3933                 if (binder_worklist_empty_ilocked(&thread->todo))
3934                         thread->process_todo = false;
3935
3936                 switch (w->type) {
3937                 case BINDER_WORK_TRANSACTION: {
3938                         binder_inner_proc_unlock(proc);
3939                         t = container_of(w, struct binder_transaction, work);
3940                 } break;
3941                 case BINDER_WORK_RETURN_ERROR: {
3942                         struct binder_error *e = container_of(
3943                                         w, struct binder_error, work);
3944
3945                         WARN_ON(e->cmd == BR_OK);
3946                         binder_inner_proc_unlock(proc);
3947                         if (put_user(e->cmd, (uint32_t __user *)ptr))
3948                                 return -EFAULT;
3949                         cmd = e->cmd;
3950                         e->cmd = BR_OK;
3951                         ptr += sizeof(uint32_t);
3952
3953                         binder_stat_br(proc, thread, cmd);
3954                 } break;
3955                 case BINDER_WORK_TRANSACTION_COMPLETE:
3956                 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
3957                         if (proc->oneway_spam_detection_enabled &&
3958                                    w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
3959                                 cmd = BR_ONEWAY_SPAM_SUSPECT;
3960                         else
3961                                 cmd = BR_TRANSACTION_COMPLETE;
3962                         binder_inner_proc_unlock(proc);
3963                         kfree(w);
3964                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3965                         if (put_user(cmd, (uint32_t __user *)ptr))
3966                                 return -EFAULT;
3967                         ptr += sizeof(uint32_t);
3968
3969                         binder_stat_br(proc, thread, cmd);
3970                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3971                                      "%d:%d BR_TRANSACTION_COMPLETE\n",
3972                                      proc->pid, thread->pid);
3973                 } break;
3974                 case BINDER_WORK_NODE: {
3975                         struct binder_node *node = container_of(w, struct binder_node, work);
3976                         int strong, weak;
3977                         binder_uintptr_t node_ptr = node->ptr;
3978                         binder_uintptr_t node_cookie = node->cookie;
3979                         int node_debug_id = node->debug_id;
3980                         int has_weak_ref;
3981                         int has_strong_ref;
3982                         void __user *orig_ptr = ptr;
3983
3984                         BUG_ON(proc != node->proc);
3985                         strong = node->internal_strong_refs ||
3986                                         node->local_strong_refs;
3987                         weak = !hlist_empty(&node->refs) ||
3988                                         node->local_weak_refs ||
3989                                         node->tmp_refs || strong;
3990                         has_strong_ref = node->has_strong_ref;
3991                         has_weak_ref = node->has_weak_ref;
3992
3993                         if (weak && !has_weak_ref) {
3994                                 node->has_weak_ref = 1;
3995                                 node->pending_weak_ref = 1;
3996                                 node->local_weak_refs++;
3997                         }
3998                         if (strong && !has_strong_ref) {
3999                                 node->has_strong_ref = 1;
4000                                 node->pending_strong_ref = 1;
4001                                 node->local_strong_refs++;
4002                         }
4003                         if (!strong && has_strong_ref)
4004                                 node->has_strong_ref = 0;
4005                         if (!weak && has_weak_ref)
4006                                 node->has_weak_ref = 0;
4007                         if (!weak && !strong) {
4008                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4009                                              "%d:%d node %d u%016llx c%016llx deleted\n",
4010                                              proc->pid, thread->pid,
4011                                              node_debug_id,
4012                                              (u64)node_ptr,
4013                                              (u64)node_cookie);
4014                                 rb_erase(&node->rb_node, &proc->nodes);
4015                                 binder_inner_proc_unlock(proc);
4016                                 binder_node_lock(node);
4017                                 /*
4018                                  * Acquire the node lock before freeing the
4019                                  * node to serialize with other threads that
4020                                  * may have been holding the node lock while
4021                                  * decrementing this node (avoids race where
4022                                  * this thread frees while the other thread
4023                                  * is unlocking the node after the final
4024                                  * decrement)
4025                                  */
4026                                 binder_node_unlock(node);
4027                                 binder_free_node(node);
4028                         } else
4029                                 binder_inner_proc_unlock(proc);
4030
4031                         if (weak && !has_weak_ref)
4032                                 ret = binder_put_node_cmd(
4033                                                 proc, thread, &ptr, node_ptr,
4034                                                 node_cookie, node_debug_id,
4035                                                 BR_INCREFS, "BR_INCREFS");
4036                         if (!ret && strong && !has_strong_ref)
4037                                 ret = binder_put_node_cmd(
4038                                                 proc, thread, &ptr, node_ptr,
4039                                                 node_cookie, node_debug_id,
4040                                                 BR_ACQUIRE, "BR_ACQUIRE");
4041                         if (!ret && !strong && has_strong_ref)
4042                                 ret = binder_put_node_cmd(
4043                                                 proc, thread, &ptr, node_ptr,
4044                                                 node_cookie, node_debug_id,
4045                                                 BR_RELEASE, "BR_RELEASE");
4046                         if (!ret && !weak && has_weak_ref)
4047                                 ret = binder_put_node_cmd(
4048                                                 proc, thread, &ptr, node_ptr,
4049                                                 node_cookie, node_debug_id,
4050                                                 BR_DECREFS, "BR_DECREFS");
4051                         if (orig_ptr == ptr)
4052                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4053                                              "%d:%d node %d u%016llx c%016llx state unchanged\n",
4054                                              proc->pid, thread->pid,
4055                                              node_debug_id,
4056                                              (u64)node_ptr,
4057                                              (u64)node_cookie);
4058                         if (ret)
4059                                 return ret;
4060                 } break;
4061                 case BINDER_WORK_DEAD_BINDER:
4062                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4063                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4064                         struct binder_ref_death *death;
4065                         uint32_t cmd;
4066                         binder_uintptr_t cookie;
4067
4068                         death = container_of(w, struct binder_ref_death, work);
4069                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4070                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4071                         else
4072                                 cmd = BR_DEAD_BINDER;
4073                         cookie = death->cookie;
4074
4075                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4076                                      "%d:%d %s %016llx\n",
4077                                       proc->pid, thread->pid,
4078                                       cmd == BR_DEAD_BINDER ?
4079                                       "BR_DEAD_BINDER" :
4080                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4081                                       (u64)cookie);
4082                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4083                                 binder_inner_proc_unlock(proc);
4084                                 kfree(death);
4085                                 binder_stats_deleted(BINDER_STAT_DEATH);
4086                         } else {
4087                                 binder_enqueue_work_ilocked(
4088                                                 w, &proc->delivered_death);
4089                                 binder_inner_proc_unlock(proc);
4090                         }
4091                         if (put_user(cmd, (uint32_t __user *)ptr))
4092                                 return -EFAULT;
4093                         ptr += sizeof(uint32_t);
4094                         if (put_user(cookie,
4095                                      (binder_uintptr_t __user *)ptr))
4096                                 return -EFAULT;
4097                         ptr += sizeof(binder_uintptr_t);
4098                         binder_stat_br(proc, thread, cmd);
4099                         if (cmd == BR_DEAD_BINDER)
4100                                 goto done; /* DEAD_BINDER notifications can cause transactions */
4101                 } break;
4102                 default:
4103                         binder_inner_proc_unlock(proc);
4104                         pr_err("%d:%d: bad work type %d\n",
4105                                proc->pid, thread->pid, w->type);
4106                         break;
4107                 }
4108
4109                 if (!t)
4110                         continue;
4111
4112                 BUG_ON(t->buffer == NULL);
4113                 if (t->buffer->target_node) {
4114                         struct binder_node *target_node = t->buffer->target_node;
4115
4116                         trd->target.ptr = target_node->ptr;
4117                         trd->cookie =  target_node->cookie;
4118                         t->saved_priority = task_nice(current);
4119                         if (t->priority < target_node->min_priority &&
4120                             !(t->flags & TF_ONE_WAY))
4121                                 binder_set_nice(t->priority);
4122                         else if (!(t->flags & TF_ONE_WAY) ||
4123                                  t->saved_priority > target_node->min_priority)
4124                                 binder_set_nice(target_node->min_priority);
4125                         cmd = BR_TRANSACTION;
4126                 } else {
4127                         trd->target.ptr = 0;
4128                         trd->cookie = 0;
4129                         cmd = BR_REPLY;
4130                 }
4131                 trd->code = t->code;
4132                 trd->flags = t->flags;
4133                 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4134
4135                 t_from = binder_get_txn_from(t);
4136                 if (t_from) {
4137                         struct task_struct *sender = t_from->proc->tsk;
4138
4139                         trd->sender_pid =
4140                                 task_tgid_nr_ns(sender,
4141                                                 task_active_pid_ns(current));
4142                 } else {
4143                         trd->sender_pid = 0;
4144                 }
4145
4146                 ret = binder_apply_fd_fixups(proc, t);
4147                 if (ret) {
4148                         struct binder_buffer *buffer = t->buffer;
4149                         bool oneway = !!(t->flags & TF_ONE_WAY);
4150                         int tid = t->debug_id;
4151
4152                         if (t_from)
4153                                 binder_thread_dec_tmpref(t_from);
4154                         buffer->transaction = NULL;
4155                         binder_cleanup_transaction(t, "fd fixups failed",
4156                                                    BR_FAILED_REPLY);
4157                         binder_free_buf(proc, thread, buffer, true);
4158                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4159                                      "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4160                                      proc->pid, thread->pid,
4161                                      oneway ? "async " :
4162                                         (cmd == BR_REPLY ? "reply " : ""),
4163                                      tid, BR_FAILED_REPLY, ret, __LINE__);
4164                         if (cmd == BR_REPLY) {
4165                                 cmd = BR_FAILED_REPLY;
4166                                 if (put_user(cmd, (uint32_t __user *)ptr))
4167                                         return -EFAULT;
4168                                 ptr += sizeof(uint32_t);
4169                                 binder_stat_br(proc, thread, cmd);
4170                                 break;
4171                         }
4172                         continue;
4173                 }
4174                 trd->data_size = t->buffer->data_size;
4175                 trd->offsets_size = t->buffer->offsets_size;
4176                 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4177                 trd->data.ptr.offsets = trd->data.ptr.buffer +
4178                                         ALIGN(t->buffer->data_size,
4179                                             sizeof(void *));
4180
4181                 tr.secctx = t->security_ctx;
4182                 if (t->security_ctx) {
4183                         cmd = BR_TRANSACTION_SEC_CTX;
4184                         trsize = sizeof(tr);
4185                 }
4186                 if (put_user(cmd, (uint32_t __user *)ptr)) {
4187                         if (t_from)
4188                                 binder_thread_dec_tmpref(t_from);
4189
4190                         binder_cleanup_transaction(t, "put_user failed",
4191                                                    BR_FAILED_REPLY);
4192
4193                         return -EFAULT;
4194                 }
4195                 ptr += sizeof(uint32_t);
4196                 if (copy_to_user(ptr, &tr, trsize)) {
4197                         if (t_from)
4198                                 binder_thread_dec_tmpref(t_from);
4199
4200                         binder_cleanup_transaction(t, "copy_to_user failed",
4201                                                    BR_FAILED_REPLY);
4202
4203                         return -EFAULT;
4204                 }
4205                 ptr += trsize;
4206
4207                 trace_binder_transaction_received(t);
4208                 binder_stat_br(proc, thread, cmd);
4209                 binder_debug(BINDER_DEBUG_TRANSACTION,
4210                              "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4211                              proc->pid, thread->pid,
4212                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4213                                 (cmd == BR_TRANSACTION_SEC_CTX) ?
4214                                      "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4215                              t->debug_id, t_from ? t_from->proc->pid : 0,
4216                              t_from ? t_from->pid : 0, cmd,
4217                              t->buffer->data_size, t->buffer->offsets_size,
4218                              (u64)trd->data.ptr.buffer,
4219                              (u64)trd->data.ptr.offsets);
4220
4221                 if (t_from)
4222                         binder_thread_dec_tmpref(t_from);
4223                 t->buffer->allow_user_free = 1;
4224                 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4225                         binder_inner_proc_lock(thread->proc);
4226                         t->to_parent = thread->transaction_stack;
4227                         t->to_thread = thread;
4228                         thread->transaction_stack = t;
4229                         binder_inner_proc_unlock(thread->proc);
4230                 } else {
4231                         binder_free_transaction(t);
4232                 }
4233                 break;
4234         }
4235
4236 done:
4237
4238         *consumed = ptr - buffer;
4239         binder_inner_proc_lock(proc);
4240         if (proc->requested_threads == 0 &&
4241             list_empty(&thread->proc->waiting_threads) &&
4242             proc->requested_threads_started < proc->max_threads &&
4243             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4244              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4245              /*spawn a new thread if we leave this out */) {
4246                 proc->requested_threads++;
4247                 binder_inner_proc_unlock(proc);
4248                 binder_debug(BINDER_DEBUG_THREADS,
4249                              "%d:%d BR_SPAWN_LOOPER\n",
4250                              proc->pid, thread->pid);
4251                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4252                         return -EFAULT;
4253                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4254         } else
4255                 binder_inner_proc_unlock(proc);
4256         return 0;
4257 }
4258
4259 static void binder_release_work(struct binder_proc *proc,
4260                                 struct list_head *list)
4261 {
4262         struct binder_work *w;
4263         enum binder_work_type wtype;
4264
4265         while (1) {
4266                 binder_inner_proc_lock(proc);
4267                 w = binder_dequeue_work_head_ilocked(list);
4268                 wtype = w ? w->type : 0;
4269                 binder_inner_proc_unlock(proc);
4270                 if (!w)
4271                         return;
4272
4273                 switch (wtype) {
4274                 case BINDER_WORK_TRANSACTION: {
4275                         struct binder_transaction *t;
4276
4277                         t = container_of(w, struct binder_transaction, work);
4278
4279                         binder_cleanup_transaction(t, "process died.",
4280                                                    BR_DEAD_REPLY);
4281                 } break;
4282                 case BINDER_WORK_RETURN_ERROR: {
4283                         struct binder_error *e = container_of(
4284                                         w, struct binder_error, work);
4285
4286                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4287                                 "undelivered TRANSACTION_ERROR: %u\n",
4288                                 e->cmd);
4289                 } break;
4290                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4291                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4292                                 "undelivered TRANSACTION_COMPLETE\n");
4293                         kfree(w);
4294                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4295                 } break;
4296                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4297                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4298                         struct binder_ref_death *death;
4299
4300                         death = container_of(w, struct binder_ref_death, work);
4301                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4302                                 "undelivered death notification, %016llx\n",
4303                                 (u64)death->cookie);
4304                         kfree(death);
4305                         binder_stats_deleted(BINDER_STAT_DEATH);
4306                 } break;
4307                 case BINDER_WORK_NODE:
4308                         break;
4309                 default:
4310                         pr_err("unexpected work type, %d, not freed\n",
4311                                wtype);
4312                         break;
4313                 }
4314         }
4315
4316 }
4317
4318 static struct binder_thread *binder_get_thread_ilocked(
4319                 struct binder_proc *proc, struct binder_thread *new_thread)
4320 {
4321         struct binder_thread *thread = NULL;
4322         struct rb_node *parent = NULL;
4323         struct rb_node **p = &proc->threads.rb_node;
4324
4325         while (*p) {
4326                 parent = *p;
4327                 thread = rb_entry(parent, struct binder_thread, rb_node);
4328
4329                 if (current->pid < thread->pid)
4330                         p = &(*p)->rb_left;
4331                 else if (current->pid > thread->pid)
4332                         p = &(*p)->rb_right;
4333                 else
4334                         return thread;
4335         }
4336         if (!new_thread)
4337                 return NULL;
4338         thread = new_thread;
4339         binder_stats_created(BINDER_STAT_THREAD);
4340         thread->proc = proc;
4341         thread->pid = current->pid;
4342         atomic_set(&thread->tmp_ref, 0);
4343         init_waitqueue_head(&thread->wait);
4344         INIT_LIST_HEAD(&thread->todo);
4345         rb_link_node(&thread->rb_node, parent, p);
4346         rb_insert_color(&thread->rb_node, &proc->threads);
4347         thread->looper_need_return = true;
4348         thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4349         thread->return_error.cmd = BR_OK;
4350         thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4351         thread->reply_error.cmd = BR_OK;
4352         INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4353         return thread;
4354 }
4355
4356 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4357 {
4358         struct binder_thread *thread;
4359         struct binder_thread *new_thread;
4360
4361         binder_inner_proc_lock(proc);
4362         thread = binder_get_thread_ilocked(proc, NULL);
4363         binder_inner_proc_unlock(proc);
4364         if (!thread) {
4365                 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4366                 if (new_thread == NULL)
4367                         return NULL;
4368                 binder_inner_proc_lock(proc);
4369                 thread = binder_get_thread_ilocked(proc, new_thread);
4370                 binder_inner_proc_unlock(proc);
4371                 if (thread != new_thread)
4372                         kfree(new_thread);
4373         }
4374         return thread;
4375 }
4376
4377 static void binder_free_proc(struct binder_proc *proc)
4378 {
4379         struct binder_device *device;
4380
4381         BUG_ON(!list_empty(&proc->todo));
4382         BUG_ON(!list_empty(&proc->delivered_death));
4383         if (proc->outstanding_txns)
4384                 pr_warn("%s: Unexpected outstanding_txns %d\n",
4385                         __func__, proc->outstanding_txns);
4386         device = container_of(proc->context, struct binder_device, context);
4387         if (refcount_dec_and_test(&device->ref)) {
4388                 kfree(proc->context->name);
4389                 kfree(device);
4390         }
4391         binder_alloc_deferred_release(&proc->alloc);
4392         put_task_struct(proc->tsk);
4393         put_cred(proc->cred);
4394         binder_stats_deleted(BINDER_STAT_PROC);
4395         kfree(proc);
4396 }
4397
4398 static void binder_free_thread(struct binder_thread *thread)
4399 {
4400         BUG_ON(!list_empty(&thread->todo));
4401         binder_stats_deleted(BINDER_STAT_THREAD);
4402         binder_proc_dec_tmpref(thread->proc);
4403         kfree(thread);
4404 }
4405
4406 static int binder_thread_release(struct binder_proc *proc,
4407                                  struct binder_thread *thread)
4408 {
4409         struct binder_transaction *t;
4410         struct binder_transaction *send_reply = NULL;
4411         int active_transactions = 0;
4412         struct binder_transaction *last_t = NULL;
4413
4414         binder_inner_proc_lock(thread->proc);
4415         /*
4416          * take a ref on the proc so it survives
4417          * after we remove this thread from proc->threads.
4418          * The corresponding dec is when we actually
4419          * free the thread in binder_free_thread()
4420          */
4421         proc->tmp_ref++;
4422         /*
4423          * take a ref on this thread to ensure it
4424          * survives while we are releasing it
4425          */
4426         atomic_inc(&thread->tmp_ref);
4427         rb_erase(&thread->rb_node, &proc->threads);
4428         t = thread->transaction_stack;
4429         if (t) {
4430                 spin_lock(&t->lock);
4431                 if (t->to_thread == thread)
4432                         send_reply = t;
4433         } else {
4434                 __acquire(&t->lock);
4435         }
4436         thread->is_dead = true;
4437
4438         while (t) {
4439                 last_t = t;
4440                 active_transactions++;
4441                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4442                              "release %d:%d transaction %d %s, still active\n",
4443                               proc->pid, thread->pid,
4444                              t->debug_id,
4445                              (t->to_thread == thread) ? "in" : "out");
4446
4447                 if (t->to_thread == thread) {
4448                         thread->proc->outstanding_txns--;
4449                         t->to_proc = NULL;
4450                         t->to_thread = NULL;
4451                         if (t->buffer) {
4452                                 t->buffer->transaction = NULL;
4453                                 t->buffer = NULL;
4454                         }
4455                         t = t->to_parent;
4456                 } else if (t->from == thread) {
4457                         t->from = NULL;
4458                         t = t->from_parent;
4459                 } else
4460                         BUG();
4461                 spin_unlock(&last_t->lock);
4462                 if (t)
4463                         spin_lock(&t->lock);
4464                 else
4465                         __acquire(&t->lock);
4466         }
4467         /* annotation for sparse, lock not acquired in last iteration above */
4468         __release(&t->lock);
4469
4470         /*
4471          * If this thread used poll, make sure we remove the waitqueue from any
4472          * poll data structures holding it.
4473          */
4474         if (thread->looper & BINDER_LOOPER_STATE_POLL)
4475                 wake_up_pollfree(&thread->wait);
4476
4477         binder_inner_proc_unlock(thread->proc);
4478
4479         /*
4480          * This is needed to avoid races between wake_up_pollfree() above and
4481          * someone else removing the last entry from the queue for other reasons
4482          * (e.g. ep_remove_wait_queue() being called due to an epoll file
4483          * descriptor being closed).  Such other users hold an RCU read lock, so
4484          * we can be sure they're done after we call synchronize_rcu().
4485          */
4486         if (thread->looper & BINDER_LOOPER_STATE_POLL)
4487                 synchronize_rcu();
4488
4489         if (send_reply)
4490                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4491         binder_release_work(proc, &thread->todo);
4492         binder_thread_dec_tmpref(thread);
4493         return active_transactions;
4494 }
4495
4496 static __poll_t binder_poll(struct file *filp,
4497                                 struct poll_table_struct *wait)
4498 {
4499         struct binder_proc *proc = filp->private_data;
4500         struct binder_thread *thread = NULL;
4501         bool wait_for_proc_work;
4502
4503         thread = binder_get_thread(proc);
4504         if (!thread)
4505                 return POLLERR;
4506
4507         binder_inner_proc_lock(thread->proc);
4508         thread->looper |= BINDER_LOOPER_STATE_POLL;
4509         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4510
4511         binder_inner_proc_unlock(thread->proc);
4512
4513         poll_wait(filp, &thread->wait, wait);
4514
4515         if (binder_has_work(thread, wait_for_proc_work))
4516                 return EPOLLIN;
4517
4518         return 0;
4519 }
4520
4521 static int binder_ioctl_write_read(struct file *filp,
4522                                 unsigned int cmd, unsigned long arg,
4523                                 struct binder_thread *thread)
4524 {
4525         int ret = 0;
4526         struct binder_proc *proc = filp->private_data;
4527         unsigned int size = _IOC_SIZE(cmd);
4528         void __user *ubuf = (void __user *)arg;
4529         struct binder_write_read bwr;
4530
4531         if (size != sizeof(struct binder_write_read)) {
4532                 ret = -EINVAL;
4533                 goto out;
4534         }
4535         if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4536                 ret = -EFAULT;
4537                 goto out;
4538         }
4539         binder_debug(BINDER_DEBUG_READ_WRITE,
4540                      "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4541                      proc->pid, thread->pid,
4542                      (u64)bwr.write_size, (u64)bwr.write_buffer,
4543                      (u64)bwr.read_size, (u64)bwr.read_buffer);
4544
4545         if (bwr.write_size > 0) {
4546                 ret = binder_thread_write(proc, thread,
4547                                           bwr.write_buffer,
4548                                           bwr.write_size,
4549                                           &bwr.write_consumed);
4550                 trace_binder_write_done(ret);
4551                 if (ret < 0) {
4552                         bwr.read_consumed = 0;
4553                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4554                                 ret = -EFAULT;
4555                         goto out;
4556                 }
4557         }
4558         if (bwr.read_size > 0) {
4559                 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4560                                          bwr.read_size,
4561                                          &bwr.read_consumed,
4562                                          filp->f_flags & O_NONBLOCK);
4563                 trace_binder_read_done(ret);
4564                 binder_inner_proc_lock(proc);
4565                 if (!binder_worklist_empty_ilocked(&proc->todo))
4566                         binder_wakeup_proc_ilocked(proc);
4567                 binder_inner_proc_unlock(proc);
4568                 if (ret < 0) {
4569                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4570                                 ret = -EFAULT;
4571                         goto out;
4572                 }
4573         }
4574         binder_debug(BINDER_DEBUG_READ_WRITE,
4575                      "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4576                      proc->pid, thread->pid,
4577                      (u64)bwr.write_consumed, (u64)bwr.write_size,
4578                      (u64)bwr.read_consumed, (u64)bwr.read_size);
4579         if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4580                 ret = -EFAULT;
4581                 goto out;
4582         }
4583 out:
4584         return ret;
4585 }
4586
4587 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4588                                     struct flat_binder_object *fbo)
4589 {
4590         int ret = 0;
4591         struct binder_proc *proc = filp->private_data;
4592         struct binder_context *context = proc->context;
4593         struct binder_node *new_node;
4594         kuid_t curr_euid = current_euid();
4595
4596         mutex_lock(&context->context_mgr_node_lock);
4597         if (context->binder_context_mgr_node) {
4598                 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4599                 ret = -EBUSY;
4600                 goto out;
4601         }
4602         ret = security_binder_set_context_mgr(proc->cred);
4603         if (ret < 0)
4604                 goto out;
4605         if (uid_valid(context->binder_context_mgr_uid)) {
4606                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4607                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4608                                from_kuid(&init_user_ns, curr_euid),
4609                                from_kuid(&init_user_ns,
4610                                          context->binder_context_mgr_uid));
4611                         ret = -EPERM;
4612                         goto out;
4613                 }
4614         } else {
4615                 context->binder_context_mgr_uid = curr_euid;
4616         }
4617         new_node = binder_new_node(proc, fbo);
4618         if (!new_node) {
4619                 ret = -ENOMEM;
4620                 goto out;
4621         }
4622         binder_node_lock(new_node);
4623         new_node->local_weak_refs++;
4624         new_node->local_strong_refs++;
4625         new_node->has_strong_ref = 1;
4626         new_node->has_weak_ref = 1;
4627         context->binder_context_mgr_node = new_node;
4628         binder_node_unlock(new_node);
4629         binder_put_node(new_node);
4630 out:
4631         mutex_unlock(&context->context_mgr_node_lock);
4632         return ret;
4633 }
4634
4635 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4636                 struct binder_node_info_for_ref *info)
4637 {
4638         struct binder_node *node;
4639         struct binder_context *context = proc->context;
4640         __u32 handle = info->handle;
4641
4642         if (info->strong_count || info->weak_count || info->reserved1 ||
4643             info->reserved2 || info->reserved3) {
4644                 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4645                                   proc->pid);
4646                 return -EINVAL;
4647         }
4648
4649         /* This ioctl may only be used by the context manager */
4650         mutex_lock(&context->context_mgr_node_lock);
4651         if (!context->binder_context_mgr_node ||
4652                 context->binder_context_mgr_node->proc != proc) {
4653                 mutex_unlock(&context->context_mgr_node_lock);
4654                 return -EPERM;
4655         }
4656         mutex_unlock(&context->context_mgr_node_lock);
4657
4658         node = binder_get_node_from_ref(proc, handle, true, NULL);
4659         if (!node)
4660                 return -EINVAL;
4661
4662         info->strong_count = node->local_strong_refs +
4663                 node->internal_strong_refs;
4664         info->weak_count = node->local_weak_refs;
4665
4666         binder_put_node(node);
4667
4668         return 0;
4669 }
4670
4671 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4672                                 struct binder_node_debug_info *info)
4673 {
4674         struct rb_node *n;
4675         binder_uintptr_t ptr = info->ptr;
4676
4677         memset(info, 0, sizeof(*info));
4678
4679         binder_inner_proc_lock(proc);
4680         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4681                 struct binder_node *node = rb_entry(n, struct binder_node,
4682                                                     rb_node);
4683                 if (node->ptr > ptr) {
4684                         info->ptr = node->ptr;
4685                         info->cookie = node->cookie;
4686                         info->has_strong_ref = node->has_strong_ref;
4687                         info->has_weak_ref = node->has_weak_ref;
4688                         break;
4689                 }
4690         }
4691         binder_inner_proc_unlock(proc);
4692
4693         return 0;
4694 }
4695
4696 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
4697 {
4698         struct rb_node *n;
4699         struct binder_thread *thread;
4700
4701         if (proc->outstanding_txns > 0)
4702                 return true;
4703
4704         for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
4705                 thread = rb_entry(n, struct binder_thread, rb_node);
4706                 if (thread->transaction_stack)
4707                         return true;
4708         }
4709         return false;
4710 }
4711
4712 static int binder_ioctl_freeze(struct binder_freeze_info *info,
4713                                struct binder_proc *target_proc)
4714 {
4715         int ret = 0;
4716
4717         if (!info->enable) {
4718                 binder_inner_proc_lock(target_proc);
4719                 target_proc->sync_recv = false;
4720                 target_proc->async_recv = false;
4721                 target_proc->is_frozen = false;
4722                 binder_inner_proc_unlock(target_proc);
4723                 return 0;
4724         }
4725
4726         /*
4727          * Freezing the target. Prevent new transactions by
4728          * setting frozen state. If timeout specified, wait
4729          * for transactions to drain.
4730          */
4731         binder_inner_proc_lock(target_proc);
4732         target_proc->sync_recv = false;
4733         target_proc->async_recv = false;
4734         target_proc->is_frozen = true;
4735         binder_inner_proc_unlock(target_proc);
4736
4737         if (info->timeout_ms > 0)
4738                 ret = wait_event_interruptible_timeout(
4739                         target_proc->freeze_wait,
4740                         (!target_proc->outstanding_txns),
4741                         msecs_to_jiffies(info->timeout_ms));
4742
4743         /* Check pending transactions that wait for reply */
4744         if (ret >= 0) {
4745                 binder_inner_proc_lock(target_proc);
4746                 if (binder_txns_pending_ilocked(target_proc))
4747                         ret = -EAGAIN;
4748                 binder_inner_proc_unlock(target_proc);
4749         }
4750
4751         if (ret < 0) {
4752                 binder_inner_proc_lock(target_proc);
4753                 target_proc->is_frozen = false;
4754                 binder_inner_proc_unlock(target_proc);
4755         }
4756
4757         return ret;
4758 }
4759
4760 static int binder_ioctl_get_freezer_info(
4761                                 struct binder_frozen_status_info *info)
4762 {
4763         struct binder_proc *target_proc;
4764         bool found = false;
4765         __u32 txns_pending;
4766
4767         info->sync_recv = 0;
4768         info->async_recv = 0;
4769
4770         mutex_lock(&binder_procs_lock);
4771         hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4772                 if (target_proc->pid == info->pid) {
4773                         found = true;
4774                         binder_inner_proc_lock(target_proc);
4775                         txns_pending = binder_txns_pending_ilocked(target_proc);
4776                         info->sync_recv |= target_proc->sync_recv |
4777                                         (txns_pending << 1);
4778                         info->async_recv |= target_proc->async_recv;
4779                         binder_inner_proc_unlock(target_proc);
4780                 }
4781         }
4782         mutex_unlock(&binder_procs_lock);
4783
4784         if (!found)
4785                 return -EINVAL;
4786
4787         return 0;
4788 }
4789
4790 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4791 {
4792         int ret;
4793         struct binder_proc *proc = filp->private_data;
4794         struct binder_thread *thread;
4795         unsigned int size = _IOC_SIZE(cmd);
4796         void __user *ubuf = (void __user *)arg;
4797
4798         /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4799                         proc->pid, current->pid, cmd, arg);*/
4800
4801         binder_selftest_alloc(&proc->alloc);
4802
4803         trace_binder_ioctl(cmd, arg);
4804
4805         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4806         if (ret)
4807                 goto err_unlocked;
4808
4809         thread = binder_get_thread(proc);
4810         if (thread == NULL) {
4811                 ret = -ENOMEM;
4812                 goto err;
4813         }
4814
4815         switch (cmd) {
4816         case BINDER_WRITE_READ:
4817                 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4818                 if (ret)
4819                         goto err;
4820                 break;
4821         case BINDER_SET_MAX_THREADS: {
4822                 int max_threads;
4823
4824                 if (copy_from_user(&max_threads, ubuf,
4825                                    sizeof(max_threads))) {
4826                         ret = -EINVAL;
4827                         goto err;
4828                 }
4829                 binder_inner_proc_lock(proc);
4830                 proc->max_threads = max_threads;
4831                 binder_inner_proc_unlock(proc);
4832                 break;
4833         }
4834         case BINDER_SET_CONTEXT_MGR_EXT: {
4835                 struct flat_binder_object fbo;
4836
4837                 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4838                         ret = -EINVAL;
4839                         goto err;
4840                 }
4841                 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4842                 if (ret)
4843                         goto err;
4844                 break;
4845         }
4846         case BINDER_SET_CONTEXT_MGR:
4847                 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4848                 if (ret)
4849                         goto err;
4850                 break;
4851         case BINDER_THREAD_EXIT:
4852                 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4853                              proc->pid, thread->pid);
4854                 binder_thread_release(proc, thread);
4855                 thread = NULL;
4856                 break;
4857         case BINDER_VERSION: {
4858                 struct binder_version __user *ver = ubuf;
4859
4860                 if (size != sizeof(struct binder_version)) {
4861                         ret = -EINVAL;
4862                         goto err;
4863                 }
4864                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4865                              &ver->protocol_version)) {
4866                         ret = -EINVAL;
4867                         goto err;
4868                 }
4869                 break;
4870         }
4871         case BINDER_GET_NODE_INFO_FOR_REF: {
4872                 struct binder_node_info_for_ref info;
4873
4874                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4875                         ret = -EFAULT;
4876                         goto err;
4877                 }
4878
4879                 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4880                 if (ret < 0)
4881                         goto err;
4882
4883                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4884                         ret = -EFAULT;
4885                         goto err;
4886                 }
4887
4888                 break;
4889         }
4890         case BINDER_GET_NODE_DEBUG_INFO: {
4891                 struct binder_node_debug_info info;
4892
4893                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4894                         ret = -EFAULT;
4895                         goto err;
4896                 }
4897
4898                 ret = binder_ioctl_get_node_debug_info(proc, &info);
4899                 if (ret < 0)
4900                         goto err;
4901
4902                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4903                         ret = -EFAULT;
4904                         goto err;
4905                 }
4906                 break;
4907         }
4908         case BINDER_FREEZE: {
4909                 struct binder_freeze_info info;
4910                 struct binder_proc **target_procs = NULL, *target_proc;
4911                 int target_procs_count = 0, i = 0;
4912
4913                 ret = 0;
4914
4915                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4916                         ret = -EFAULT;
4917                         goto err;
4918                 }
4919
4920                 mutex_lock(&binder_procs_lock);
4921                 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4922                         if (target_proc->pid == info.pid)
4923                                 target_procs_count++;
4924                 }
4925
4926                 if (target_procs_count == 0) {
4927                         mutex_unlock(&binder_procs_lock);
4928                         ret = -EINVAL;
4929                         goto err;
4930                 }
4931
4932                 target_procs = kcalloc(target_procs_count,
4933                                        sizeof(struct binder_proc *),
4934                                        GFP_KERNEL);
4935
4936                 if (!target_procs) {
4937                         mutex_unlock(&binder_procs_lock);
4938                         ret = -ENOMEM;
4939                         goto err;
4940                 }
4941
4942                 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4943                         if (target_proc->pid != info.pid)
4944                                 continue;
4945
4946                         binder_inner_proc_lock(target_proc);
4947                         target_proc->tmp_ref++;
4948                         binder_inner_proc_unlock(target_proc);
4949
4950                         target_procs[i++] = target_proc;
4951                 }
4952                 mutex_unlock(&binder_procs_lock);
4953
4954                 for (i = 0; i < target_procs_count; i++) {
4955                         if (ret >= 0)
4956                                 ret = binder_ioctl_freeze(&info,
4957                                                           target_procs[i]);
4958
4959                         binder_proc_dec_tmpref(target_procs[i]);
4960                 }
4961
4962                 kfree(target_procs);
4963
4964                 if (ret < 0)
4965                         goto err;
4966                 break;
4967         }
4968         case BINDER_GET_FROZEN_INFO: {
4969                 struct binder_frozen_status_info info;
4970
4971                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4972                         ret = -EFAULT;
4973                         goto err;
4974                 }
4975
4976                 ret = binder_ioctl_get_freezer_info(&info);
4977                 if (ret < 0)
4978                         goto err;
4979
4980                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4981                         ret = -EFAULT;
4982                         goto err;
4983                 }
4984                 break;
4985         }
4986         case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
4987                 uint32_t enable;
4988
4989                 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
4990                         ret = -EFAULT;
4991                         goto err;
4992                 }
4993                 binder_inner_proc_lock(proc);
4994                 proc->oneway_spam_detection_enabled = (bool)enable;
4995                 binder_inner_proc_unlock(proc);
4996                 break;
4997         }
4998         default:
4999                 ret = -EINVAL;
5000                 goto err;
5001         }
5002         ret = 0;
5003 err:
5004         if (thread)
5005                 thread->looper_need_return = false;
5006         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5007         if (ret && ret != -EINTR)
5008                 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5009 err_unlocked:
5010         trace_binder_ioctl_done(ret);
5011         return ret;
5012 }
5013
5014 static void binder_vma_open(struct vm_area_struct *vma)
5015 {
5016         struct binder_proc *proc = vma->vm_private_data;
5017
5018         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5019                      "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5020                      proc->pid, vma->vm_start, vma->vm_end,
5021                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5022                      (unsigned long)pgprot_val(vma->vm_page_prot));
5023 }
5024
5025 static void binder_vma_close(struct vm_area_struct *vma)
5026 {
5027         struct binder_proc *proc = vma->vm_private_data;
5028
5029         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5030                      "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5031                      proc->pid, vma->vm_start, vma->vm_end,
5032                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5033                      (unsigned long)pgprot_val(vma->vm_page_prot));
5034         binder_alloc_vma_close(&proc->alloc);
5035 }
5036
5037 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5038 {
5039         return VM_FAULT_SIGBUS;
5040 }
5041
5042 static const struct vm_operations_struct binder_vm_ops = {
5043         .open = binder_vma_open,
5044         .close = binder_vma_close,
5045         .fault = binder_vm_fault,
5046 };
5047
5048 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5049 {
5050         struct binder_proc *proc = filp->private_data;
5051
5052         if (proc->tsk != current->group_leader)
5053                 return -EINVAL;
5054
5055         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5056                      "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5057                      __func__, proc->pid, vma->vm_start, vma->vm_end,
5058                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5059                      (unsigned long)pgprot_val(vma->vm_page_prot));
5060
5061         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5062                 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5063                        proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5064                 return -EPERM;
5065         }
5066         vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5067         vma->vm_flags &= ~VM_MAYWRITE;
5068
5069         vma->vm_ops = &binder_vm_ops;
5070         vma->vm_private_data = proc;
5071
5072         return binder_alloc_mmap_handler(&proc->alloc, vma);
5073 }
5074
5075 static int binder_open(struct inode *nodp, struct file *filp)
5076 {
5077         struct binder_proc *proc, *itr;
5078         struct binder_device *binder_dev;
5079         struct binderfs_info *info;
5080         struct dentry *binder_binderfs_dir_entry_proc = NULL;
5081         bool existing_pid = false;
5082
5083         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5084                      current->group_leader->pid, current->pid);
5085
5086         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5087         if (proc == NULL)
5088                 return -ENOMEM;
5089         spin_lock_init(&proc->inner_lock);
5090         spin_lock_init(&proc->outer_lock);
5091         get_task_struct(current->group_leader);
5092         proc->tsk = current->group_leader;
5093         proc->cred = get_cred(filp->f_cred);
5094         INIT_LIST_HEAD(&proc->todo);
5095         init_waitqueue_head(&proc->freeze_wait);
5096         proc->default_priority = task_nice(current);
5097         /* binderfs stashes devices in i_private */
5098         if (is_binderfs_device(nodp)) {
5099                 binder_dev = nodp->i_private;
5100                 info = nodp->i_sb->s_fs_info;
5101                 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5102         } else {
5103                 binder_dev = container_of(filp->private_data,
5104                                           struct binder_device, miscdev);
5105         }
5106         refcount_inc(&binder_dev->ref);
5107         proc->context = &binder_dev->context;
5108         binder_alloc_init(&proc->alloc);
5109
5110         binder_stats_created(BINDER_STAT_PROC);
5111         proc->pid = current->group_leader->pid;
5112         INIT_LIST_HEAD(&proc->delivered_death);
5113         INIT_LIST_HEAD(&proc->waiting_threads);
5114         filp->private_data = proc;
5115
5116         mutex_lock(&binder_procs_lock);
5117         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5118                 if (itr->pid == proc->pid) {
5119                         existing_pid = true;
5120                         break;
5121                 }
5122         }
5123         hlist_add_head(&proc->proc_node, &binder_procs);
5124         mutex_unlock(&binder_procs_lock);
5125
5126         if (binder_debugfs_dir_entry_proc && !existing_pid) {
5127                 char strbuf[11];
5128
5129                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5130                 /*
5131                  * proc debug entries are shared between contexts.
5132                  * Only create for the first PID to avoid debugfs log spamming
5133                  * The printing code will anyway print all contexts for a given
5134                  * PID so this is not a problem.
5135                  */
5136                 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5137                         binder_debugfs_dir_entry_proc,
5138                         (void *)(unsigned long)proc->pid,
5139                         &proc_fops);
5140         }
5141
5142         if (binder_binderfs_dir_entry_proc && !existing_pid) {
5143                 char strbuf[11];
5144                 struct dentry *binderfs_entry;
5145
5146                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5147                 /*
5148                  * Similar to debugfs, the process specific log file is shared
5149                  * between contexts. Only create for the first PID.
5150                  * This is ok since same as debugfs, the log file will contain
5151                  * information on all contexts of a given PID.
5152                  */
5153                 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5154                         strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5155                 if (!IS_ERR(binderfs_entry)) {
5156                         proc->binderfs_entry = binderfs_entry;
5157                 } else {
5158                         int error;
5159
5160                         error = PTR_ERR(binderfs_entry);
5161                         pr_warn("Unable to create file %s in binderfs (error %d)\n",
5162                                 strbuf, error);
5163                 }
5164         }
5165
5166         return 0;
5167 }
5168
5169 static int binder_flush(struct file *filp, fl_owner_t id)
5170 {
5171         struct binder_proc *proc = filp->private_data;
5172
5173         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5174
5175         return 0;
5176 }
5177
5178 static void binder_deferred_flush(struct binder_proc *proc)
5179 {
5180         struct rb_node *n;
5181         int wake_count = 0;
5182
5183         binder_inner_proc_lock(proc);
5184         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5185                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5186
5187                 thread->looper_need_return = true;
5188                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5189                         wake_up_interruptible(&thread->wait);
5190                         wake_count++;
5191                 }
5192         }
5193         binder_inner_proc_unlock(proc);
5194
5195         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5196                      "binder_flush: %d woke %d threads\n", proc->pid,
5197                      wake_count);
5198 }
5199
5200 static int binder_release(struct inode *nodp, struct file *filp)
5201 {
5202         struct binder_proc *proc = filp->private_data;
5203
5204         debugfs_remove(proc->debugfs_entry);
5205
5206         if (proc->binderfs_entry) {
5207                 binderfs_remove_file(proc->binderfs_entry);
5208                 proc->binderfs_entry = NULL;
5209         }
5210
5211         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5212
5213         return 0;
5214 }
5215
5216 static int binder_node_release(struct binder_node *node, int refs)
5217 {
5218         struct binder_ref *ref;
5219         int death = 0;
5220         struct binder_proc *proc = node->proc;
5221
5222         binder_release_work(proc, &node->async_todo);
5223
5224         binder_node_lock(node);
5225         binder_inner_proc_lock(proc);
5226         binder_dequeue_work_ilocked(&node->work);
5227         /*
5228          * The caller must have taken a temporary ref on the node,
5229          */
5230         BUG_ON(!node->tmp_refs);
5231         if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5232                 binder_inner_proc_unlock(proc);
5233                 binder_node_unlock(node);
5234                 binder_free_node(node);
5235
5236                 return refs;
5237         }
5238
5239         node->proc = NULL;
5240         node->local_strong_refs = 0;
5241         node->local_weak_refs = 0;
5242         binder_inner_proc_unlock(proc);
5243
5244         spin_lock(&binder_dead_nodes_lock);
5245         hlist_add_head(&node->dead_node, &binder_dead_nodes);
5246         spin_unlock(&binder_dead_nodes_lock);
5247
5248         hlist_for_each_entry(ref, &node->refs, node_entry) {
5249                 refs++;
5250                 /*
5251                  * Need the node lock to synchronize
5252                  * with new notification requests and the
5253                  * inner lock to synchronize with queued
5254                  * death notifications.
5255                  */
5256                 binder_inner_proc_lock(ref->proc);
5257                 if (!ref->death) {
5258                         binder_inner_proc_unlock(ref->proc);
5259                         continue;
5260                 }
5261
5262                 death++;
5263
5264                 BUG_ON(!list_empty(&ref->death->work.entry));
5265                 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5266                 binder_enqueue_work_ilocked(&ref->death->work,
5267                                             &ref->proc->todo);
5268                 binder_wakeup_proc_ilocked(ref->proc);
5269                 binder_inner_proc_unlock(ref->proc);
5270         }
5271
5272         binder_debug(BINDER_DEBUG_DEAD_BINDER,
5273                      "node %d now dead, refs %d, death %d\n",
5274                      node->debug_id, refs, death);
5275         binder_node_unlock(node);
5276         binder_put_node(node);
5277
5278         return refs;
5279 }
5280
5281 static void binder_deferred_release(struct binder_proc *proc)
5282 {
5283         struct binder_context *context = proc->context;
5284         struct rb_node *n;
5285         int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5286
5287         mutex_lock(&binder_procs_lock);
5288         hlist_del(&proc->proc_node);
5289         mutex_unlock(&binder_procs_lock);
5290
5291         mutex_lock(&context->context_mgr_node_lock);
5292         if (context->binder_context_mgr_node &&
5293             context->binder_context_mgr_node->proc == proc) {
5294                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5295                              "%s: %d context_mgr_node gone\n",
5296                              __func__, proc->pid);
5297                 context->binder_context_mgr_node = NULL;
5298         }
5299         mutex_unlock(&context->context_mgr_node_lock);
5300         binder_inner_proc_lock(proc);
5301         /*
5302          * Make sure proc stays alive after we
5303          * remove all the threads
5304          */
5305         proc->tmp_ref++;
5306
5307         proc->is_dead = true;
5308         proc->is_frozen = false;
5309         proc->sync_recv = false;
5310         proc->async_recv = false;
5311         threads = 0;
5312         active_transactions = 0;
5313         while ((n = rb_first(&proc->threads))) {
5314                 struct binder_thread *thread;
5315
5316                 thread = rb_entry(n, struct binder_thread, rb_node);
5317                 binder_inner_proc_unlock(proc);
5318                 threads++;
5319                 active_transactions += binder_thread_release(proc, thread);
5320                 binder_inner_proc_lock(proc);
5321         }
5322
5323         nodes = 0;
5324         incoming_refs = 0;
5325         while ((n = rb_first(&proc->nodes))) {
5326                 struct binder_node *node;
5327
5328                 node = rb_entry(n, struct binder_node, rb_node);
5329                 nodes++;
5330                 /*
5331                  * take a temporary ref on the node before
5332                  * calling binder_node_release() which will either
5333                  * kfree() the node or call binder_put_node()
5334                  */
5335                 binder_inc_node_tmpref_ilocked(node);
5336                 rb_erase(&node->rb_node, &proc->nodes);
5337                 binder_inner_proc_unlock(proc);
5338                 incoming_refs = binder_node_release(node, incoming_refs);
5339                 binder_inner_proc_lock(proc);
5340         }
5341         binder_inner_proc_unlock(proc);
5342
5343         outgoing_refs = 0;
5344         binder_proc_lock(proc);
5345         while ((n = rb_first(&proc->refs_by_desc))) {
5346                 struct binder_ref *ref;
5347
5348                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5349                 outgoing_refs++;
5350                 binder_cleanup_ref_olocked(ref);
5351                 binder_proc_unlock(proc);
5352                 binder_free_ref(ref);
5353                 binder_proc_lock(proc);
5354         }
5355         binder_proc_unlock(proc);
5356
5357         binder_release_work(proc, &proc->todo);
5358         binder_release_work(proc, &proc->delivered_death);
5359
5360         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5361                      "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5362                      __func__, proc->pid, threads, nodes, incoming_refs,
5363                      outgoing_refs, active_transactions);
5364
5365         binder_proc_dec_tmpref(proc);
5366 }
5367
5368 static void binder_deferred_func(struct work_struct *work)
5369 {
5370         struct binder_proc *proc;
5371
5372         int defer;
5373
5374         do {
5375                 mutex_lock(&binder_deferred_lock);
5376                 if (!hlist_empty(&binder_deferred_list)) {
5377                         proc = hlist_entry(binder_deferred_list.first,
5378                                         struct binder_proc, deferred_work_node);
5379                         hlist_del_init(&proc->deferred_work_node);
5380                         defer = proc->deferred_work;
5381                         proc->deferred_work = 0;
5382                 } else {
5383                         proc = NULL;
5384                         defer = 0;
5385                 }
5386                 mutex_unlock(&binder_deferred_lock);
5387
5388                 if (defer & BINDER_DEFERRED_FLUSH)
5389                         binder_deferred_flush(proc);
5390
5391                 if (defer & BINDER_DEFERRED_RELEASE)
5392                         binder_deferred_release(proc); /* frees proc */
5393         } while (proc);
5394 }
5395 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5396
5397 static void
5398 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5399 {
5400         mutex_lock(&binder_deferred_lock);
5401         proc->deferred_work |= defer;
5402         if (hlist_unhashed(&proc->deferred_work_node)) {
5403                 hlist_add_head(&proc->deferred_work_node,
5404                                 &binder_deferred_list);
5405                 schedule_work(&binder_deferred_work);
5406         }
5407         mutex_unlock(&binder_deferred_lock);
5408 }
5409
5410 static void print_binder_transaction_ilocked(struct seq_file *m,
5411                                              struct binder_proc *proc,
5412                                              const char *prefix,
5413                                              struct binder_transaction *t)
5414 {
5415         struct binder_proc *to_proc;
5416         struct binder_buffer *buffer = t->buffer;
5417
5418         spin_lock(&t->lock);
5419         to_proc = t->to_proc;
5420         seq_printf(m,
5421                    "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5422                    prefix, t->debug_id, t,
5423                    t->from ? t->from->proc->pid : 0,
5424                    t->from ? t->from->pid : 0,
5425                    to_proc ? to_proc->pid : 0,
5426                    t->to_thread ? t->to_thread->pid : 0,
5427                    t->code, t->flags, t->priority, t->need_reply);
5428         spin_unlock(&t->lock);
5429
5430         if (proc != to_proc) {
5431                 /*
5432                  * Can only safely deref buffer if we are holding the
5433                  * correct proc inner lock for this node
5434                  */
5435                 seq_puts(m, "\n");
5436                 return;
5437         }
5438
5439         if (buffer == NULL) {
5440                 seq_puts(m, " buffer free\n");
5441                 return;
5442         }
5443         if (buffer->target_node)
5444                 seq_printf(m, " node %d", buffer->target_node->debug_id);
5445         seq_printf(m, " size %zd:%zd data %pK\n",
5446                    buffer->data_size, buffer->offsets_size,
5447                    buffer->user_data);
5448 }
5449
5450 static void print_binder_work_ilocked(struct seq_file *m,
5451                                      struct binder_proc *proc,
5452                                      const char *prefix,
5453                                      const char *transaction_prefix,
5454                                      struct binder_work *w)
5455 {
5456         struct binder_node *node;
5457         struct binder_transaction *t;
5458
5459         switch (w->type) {
5460         case BINDER_WORK_TRANSACTION:
5461                 t = container_of(w, struct binder_transaction, work);
5462                 print_binder_transaction_ilocked(
5463                                 m, proc, transaction_prefix, t);
5464                 break;
5465         case BINDER_WORK_RETURN_ERROR: {
5466                 struct binder_error *e = container_of(
5467                                 w, struct binder_error, work);
5468
5469                 seq_printf(m, "%stransaction error: %u\n",
5470                            prefix, e->cmd);
5471         } break;
5472         case BINDER_WORK_TRANSACTION_COMPLETE:
5473                 seq_printf(m, "%stransaction complete\n", prefix);
5474                 break;
5475         case BINDER_WORK_NODE:
5476                 node = container_of(w, struct binder_node, work);
5477                 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5478                            prefix, node->debug_id,
5479                            (u64)node->ptr, (u64)node->cookie);
5480                 break;
5481         case BINDER_WORK_DEAD_BINDER:
5482                 seq_printf(m, "%shas dead binder\n", prefix);
5483                 break;
5484         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5485                 seq_printf(m, "%shas cleared dead binder\n", prefix);
5486                 break;
5487         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5488                 seq_printf(m, "%shas cleared death notification\n", prefix);
5489                 break;
5490         default:
5491                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5492                 break;
5493         }
5494 }
5495
5496 static void print_binder_thread_ilocked(struct seq_file *m,
5497                                         struct binder_thread *thread,
5498                                         int print_always)
5499 {
5500         struct binder_transaction *t;
5501         struct binder_work *w;
5502         size_t start_pos = m->count;
5503         size_t header_pos;
5504
5505         seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5506                         thread->pid, thread->looper,
5507                         thread->looper_need_return,
5508                         atomic_read(&thread->tmp_ref));
5509         header_pos = m->count;
5510         t = thread->transaction_stack;
5511         while (t) {
5512                 if (t->from == thread) {
5513                         print_binder_transaction_ilocked(m, thread->proc,
5514                                         "    outgoing transaction", t);
5515                         t = t->from_parent;
5516                 } else if (t->to_thread == thread) {
5517                         print_binder_transaction_ilocked(m, thread->proc,
5518                                                  "    incoming transaction", t);
5519                         t = t->to_parent;
5520                 } else {
5521                         print_binder_transaction_ilocked(m, thread->proc,
5522                                         "    bad transaction", t);
5523                         t = NULL;
5524                 }
5525         }
5526         list_for_each_entry(w, &thread->todo, entry) {
5527                 print_binder_work_ilocked(m, thread->proc, "    ",
5528                                           "    pending transaction", w);
5529         }
5530         if (!print_always && m->count == header_pos)
5531                 m->count = start_pos;
5532 }
5533
5534 static void print_binder_node_nilocked(struct seq_file *m,
5535                                        struct binder_node *node)
5536 {
5537         struct binder_ref *ref;
5538         struct binder_work *w;
5539         int count;
5540
5541         count = 0;
5542         hlist_for_each_entry(ref, &node->refs, node_entry)
5543                 count++;
5544
5545         seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5546                    node->debug_id, (u64)node->ptr, (u64)node->cookie,
5547                    node->has_strong_ref, node->has_weak_ref,
5548                    node->local_strong_refs, node->local_weak_refs,
5549                    node->internal_strong_refs, count, node->tmp_refs);
5550         if (count) {
5551                 seq_puts(m, " proc");
5552                 hlist_for_each_entry(ref, &node->refs, node_entry)
5553                         seq_printf(m, " %d", ref->proc->pid);
5554         }
5555         seq_puts(m, "\n");
5556         if (node->proc) {
5557                 list_for_each_entry(w, &node->async_todo, entry)
5558                         print_binder_work_ilocked(m, node->proc, "    ",
5559                                           "    pending async transaction", w);
5560         }
5561 }
5562
5563 static void print_binder_ref_olocked(struct seq_file *m,
5564                                      struct binder_ref *ref)
5565 {
5566         binder_node_lock(ref->node);
5567         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5568                    ref->data.debug_id, ref->data.desc,
5569                    ref->node->proc ? "" : "dead ",
5570                    ref->node->debug_id, ref->data.strong,
5571                    ref->data.weak, ref->death);
5572         binder_node_unlock(ref->node);
5573 }
5574
5575 static void print_binder_proc(struct seq_file *m,
5576                               struct binder_proc *proc, int print_all)
5577 {
5578         struct binder_work *w;
5579         struct rb_node *n;
5580         size_t start_pos = m->count;
5581         size_t header_pos;
5582         struct binder_node *last_node = NULL;
5583
5584         seq_printf(m, "proc %d\n", proc->pid);
5585         seq_printf(m, "context %s\n", proc->context->name);
5586         header_pos = m->count;
5587
5588         binder_inner_proc_lock(proc);
5589         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5590                 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5591                                                 rb_node), print_all);
5592
5593         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5594                 struct binder_node *node = rb_entry(n, struct binder_node,
5595                                                     rb_node);
5596                 if (!print_all && !node->has_async_transaction)
5597                         continue;
5598
5599                 /*
5600                  * take a temporary reference on the node so it
5601                  * survives and isn't removed from the tree
5602                  * while we print it.
5603                  */
5604                 binder_inc_node_tmpref_ilocked(node);
5605                 /* Need to drop inner lock to take node lock */
5606                 binder_inner_proc_unlock(proc);
5607                 if (last_node)
5608                         binder_put_node(last_node);
5609                 binder_node_inner_lock(node);
5610                 print_binder_node_nilocked(m, node);
5611                 binder_node_inner_unlock(node);
5612                 last_node = node;
5613                 binder_inner_proc_lock(proc);
5614         }
5615         binder_inner_proc_unlock(proc);
5616         if (last_node)
5617                 binder_put_node(last_node);
5618
5619         if (print_all) {
5620                 binder_proc_lock(proc);
5621                 for (n = rb_first(&proc->refs_by_desc);
5622                      n != NULL;
5623                      n = rb_next(n))
5624                         print_binder_ref_olocked(m, rb_entry(n,
5625                                                             struct binder_ref,
5626                                                             rb_node_desc));
5627                 binder_proc_unlock(proc);
5628         }
5629         binder_alloc_print_allocated(m, &proc->alloc);
5630         binder_inner_proc_lock(proc);
5631         list_for_each_entry(w, &proc->todo, entry)
5632                 print_binder_work_ilocked(m, proc, "  ",
5633                                           "  pending transaction", w);
5634         list_for_each_entry(w, &proc->delivered_death, entry) {
5635                 seq_puts(m, "  has delivered dead binder\n");
5636                 break;
5637         }
5638         binder_inner_proc_unlock(proc);
5639         if (!print_all && m->count == header_pos)
5640                 m->count = start_pos;
5641 }
5642
5643 static const char * const binder_return_strings[] = {
5644         "BR_ERROR",
5645         "BR_OK",
5646         "BR_TRANSACTION",
5647         "BR_REPLY",
5648         "BR_ACQUIRE_RESULT",
5649         "BR_DEAD_REPLY",
5650         "BR_TRANSACTION_COMPLETE",
5651         "BR_INCREFS",
5652         "BR_ACQUIRE",
5653         "BR_RELEASE",
5654         "BR_DECREFS",
5655         "BR_ATTEMPT_ACQUIRE",
5656         "BR_NOOP",
5657         "BR_SPAWN_LOOPER",
5658         "BR_FINISHED",
5659         "BR_DEAD_BINDER",
5660         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5661         "BR_FAILED_REPLY",
5662         "BR_FROZEN_REPLY",
5663         "BR_ONEWAY_SPAM_SUSPECT",
5664 };
5665
5666 static const char * const binder_command_strings[] = {
5667         "BC_TRANSACTION",
5668         "BC_REPLY",
5669         "BC_ACQUIRE_RESULT",
5670         "BC_FREE_BUFFER",
5671         "BC_INCREFS",
5672         "BC_ACQUIRE",
5673         "BC_RELEASE",
5674         "BC_DECREFS",
5675         "BC_INCREFS_DONE",
5676         "BC_ACQUIRE_DONE",
5677         "BC_ATTEMPT_ACQUIRE",
5678         "BC_REGISTER_LOOPER",
5679         "BC_ENTER_LOOPER",
5680         "BC_EXIT_LOOPER",
5681         "BC_REQUEST_DEATH_NOTIFICATION",
5682         "BC_CLEAR_DEATH_NOTIFICATION",
5683         "BC_DEAD_BINDER_DONE",
5684         "BC_TRANSACTION_SG",
5685         "BC_REPLY_SG",
5686 };
5687
5688 static const char * const binder_objstat_strings[] = {
5689         "proc",
5690         "thread",
5691         "node",
5692         "ref",
5693         "death",
5694         "transaction",
5695         "transaction_complete"
5696 };
5697
5698 static void print_binder_stats(struct seq_file *m, const char *prefix,
5699                                struct binder_stats *stats)
5700 {
5701         int i;
5702
5703         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5704                      ARRAY_SIZE(binder_command_strings));
5705         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5706                 int temp = atomic_read(&stats->bc[i]);
5707
5708                 if (temp)
5709                         seq_printf(m, "%s%s: %d\n", prefix,
5710                                    binder_command_strings[i], temp);
5711         }
5712
5713         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5714                      ARRAY_SIZE(binder_return_strings));
5715         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5716                 int temp = atomic_read(&stats->br[i]);
5717
5718                 if (temp)
5719                         seq_printf(m, "%s%s: %d\n", prefix,
5720                                    binder_return_strings[i], temp);
5721         }
5722
5723         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5724                      ARRAY_SIZE(binder_objstat_strings));
5725         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5726                      ARRAY_SIZE(stats->obj_deleted));
5727         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5728                 int created = atomic_read(&stats->obj_created[i]);
5729                 int deleted = atomic_read(&stats->obj_deleted[i]);
5730
5731                 if (created || deleted)
5732                         seq_printf(m, "%s%s: active %d total %d\n",
5733                                 prefix,
5734                                 binder_objstat_strings[i],
5735                                 created - deleted,
5736                                 created);
5737         }
5738 }
5739
5740 static void print_binder_proc_stats(struct seq_file *m,
5741                                     struct binder_proc *proc)
5742 {
5743         struct binder_work *w;
5744         struct binder_thread *thread;
5745         struct rb_node *n;
5746         int count, strong, weak, ready_threads;
5747         size_t free_async_space =
5748                 binder_alloc_get_free_async_space(&proc->alloc);
5749
5750         seq_printf(m, "proc %d\n", proc->pid);
5751         seq_printf(m, "context %s\n", proc->context->name);
5752         count = 0;
5753         ready_threads = 0;
5754         binder_inner_proc_lock(proc);
5755         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5756                 count++;
5757
5758         list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5759                 ready_threads++;
5760
5761         seq_printf(m, "  threads: %d\n", count);
5762         seq_printf(m, "  requested threads: %d+%d/%d\n"
5763                         "  ready threads %d\n"
5764                         "  free async space %zd\n", proc->requested_threads,
5765                         proc->requested_threads_started, proc->max_threads,
5766                         ready_threads,
5767                         free_async_space);
5768         count = 0;
5769         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5770                 count++;
5771         binder_inner_proc_unlock(proc);
5772         seq_printf(m, "  nodes: %d\n", count);
5773         count = 0;
5774         strong = 0;
5775         weak = 0;
5776         binder_proc_lock(proc);
5777         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5778                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5779                                                   rb_node_desc);
5780                 count++;
5781                 strong += ref->data.strong;
5782                 weak += ref->data.weak;
5783         }
5784         binder_proc_unlock(proc);
5785         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5786
5787         count = binder_alloc_get_allocated_count(&proc->alloc);
5788         seq_printf(m, "  buffers: %d\n", count);
5789
5790         binder_alloc_print_pages(m, &proc->alloc);
5791
5792         count = 0;
5793         binder_inner_proc_lock(proc);
5794         list_for_each_entry(w, &proc->todo, entry) {
5795                 if (w->type == BINDER_WORK_TRANSACTION)
5796                         count++;
5797         }
5798         binder_inner_proc_unlock(proc);
5799         seq_printf(m, "  pending transactions: %d\n", count);
5800
5801         print_binder_stats(m, "  ", &proc->stats);
5802 }
5803
5804
5805 int binder_state_show(struct seq_file *m, void *unused)
5806 {
5807         struct binder_proc *proc;
5808         struct binder_node *node;
5809         struct binder_node *last_node = NULL;
5810
5811         seq_puts(m, "binder state:\n");
5812
5813         spin_lock(&binder_dead_nodes_lock);
5814         if (!hlist_empty(&binder_dead_nodes))
5815                 seq_puts(m, "dead nodes:\n");
5816         hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5817                 /*
5818                  * take a temporary reference on the node so it
5819                  * survives and isn't removed from the list
5820                  * while we print it.
5821                  */
5822                 node->tmp_refs++;
5823                 spin_unlock(&binder_dead_nodes_lock);
5824                 if (last_node)
5825                         binder_put_node(last_node);
5826                 binder_node_lock(node);
5827                 print_binder_node_nilocked(m, node);
5828                 binder_node_unlock(node);
5829                 last_node = node;
5830                 spin_lock(&binder_dead_nodes_lock);
5831         }
5832         spin_unlock(&binder_dead_nodes_lock);
5833         if (last_node)
5834                 binder_put_node(last_node);
5835
5836         mutex_lock(&binder_procs_lock);
5837         hlist_for_each_entry(proc, &binder_procs, proc_node)
5838                 print_binder_proc(m, proc, 1);
5839         mutex_unlock(&binder_procs_lock);
5840
5841         return 0;
5842 }
5843
5844 int binder_stats_show(struct seq_file *m, void *unused)
5845 {
5846         struct binder_proc *proc;
5847
5848         seq_puts(m, "binder stats:\n");
5849
5850         print_binder_stats(m, "", &binder_stats);
5851
5852         mutex_lock(&binder_procs_lock);
5853         hlist_for_each_entry(proc, &binder_procs, proc_node)
5854                 print_binder_proc_stats(m, proc);
5855         mutex_unlock(&binder_procs_lock);
5856
5857         return 0;
5858 }
5859
5860 int binder_transactions_show(struct seq_file *m, void *unused)
5861 {
5862         struct binder_proc *proc;
5863
5864         seq_puts(m, "binder transactions:\n");
5865         mutex_lock(&binder_procs_lock);
5866         hlist_for_each_entry(proc, &binder_procs, proc_node)
5867                 print_binder_proc(m, proc, 0);
5868         mutex_unlock(&binder_procs_lock);
5869
5870         return 0;
5871 }
5872
5873 static int proc_show(struct seq_file *m, void *unused)
5874 {
5875         struct binder_proc *itr;
5876         int pid = (unsigned long)m->private;
5877
5878         mutex_lock(&binder_procs_lock);
5879         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5880                 if (itr->pid == pid) {
5881                         seq_puts(m, "binder proc state:\n");
5882                         print_binder_proc(m, itr, 1);
5883                 }
5884         }
5885         mutex_unlock(&binder_procs_lock);
5886
5887         return 0;
5888 }
5889
5890 static void print_binder_transaction_log_entry(struct seq_file *m,
5891                                         struct binder_transaction_log_entry *e)
5892 {
5893         int debug_id = READ_ONCE(e->debug_id_done);
5894         /*
5895          * read barrier to guarantee debug_id_done read before
5896          * we print the log values
5897          */
5898         smp_rmb();
5899         seq_printf(m,
5900                    "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5901                    e->debug_id, (e->call_type == 2) ? "reply" :
5902                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5903                    e->from_thread, e->to_proc, e->to_thread, e->context_name,
5904                    e->to_node, e->target_handle, e->data_size, e->offsets_size,
5905                    e->return_error, e->return_error_param,
5906                    e->return_error_line);
5907         /*
5908          * read-barrier to guarantee read of debug_id_done after
5909          * done printing the fields of the entry
5910          */
5911         smp_rmb();
5912         seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5913                         "\n" : " (incomplete)\n");
5914 }
5915
5916 int binder_transaction_log_show(struct seq_file *m, void *unused)
5917 {
5918         struct binder_transaction_log *log = m->private;
5919         unsigned int log_cur = atomic_read(&log->cur);
5920         unsigned int count;
5921         unsigned int cur;
5922         int i;
5923
5924         count = log_cur + 1;
5925         cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5926                 0 : count % ARRAY_SIZE(log->entry);
5927         if (count > ARRAY_SIZE(log->entry) || log->full)
5928                 count = ARRAY_SIZE(log->entry);
5929         for (i = 0; i < count; i++) {
5930                 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5931
5932                 print_binder_transaction_log_entry(m, &log->entry[index]);
5933         }
5934         return 0;
5935 }
5936
5937 const struct file_operations binder_fops = {
5938         .owner = THIS_MODULE,
5939         .poll = binder_poll,
5940         .unlocked_ioctl = binder_ioctl,
5941         .compat_ioctl = compat_ptr_ioctl,
5942         .mmap = binder_mmap,
5943         .open = binder_open,
5944         .flush = binder_flush,
5945         .release = binder_release,
5946 };
5947
5948 static int __init init_binder_device(const char *name)
5949 {
5950         int ret;
5951         struct binder_device *binder_device;
5952
5953         binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5954         if (!binder_device)
5955                 return -ENOMEM;
5956
5957         binder_device->miscdev.fops = &binder_fops;
5958         binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5959         binder_device->miscdev.name = name;
5960
5961         refcount_set(&binder_device->ref, 1);
5962         binder_device->context.binder_context_mgr_uid = INVALID_UID;
5963         binder_device->context.name = name;
5964         mutex_init(&binder_device->context.context_mgr_node_lock);
5965
5966         ret = misc_register(&binder_device->miscdev);
5967         if (ret < 0) {
5968                 kfree(binder_device);
5969                 return ret;
5970         }
5971
5972         hlist_add_head(&binder_device->hlist, &binder_devices);
5973
5974         return ret;
5975 }
5976
5977 static int __init binder_init(void)
5978 {
5979         int ret;
5980         char *device_name, *device_tmp;
5981         struct binder_device *device;
5982         struct hlist_node *tmp;
5983         char *device_names = NULL;
5984
5985         ret = binder_alloc_shrinker_init();
5986         if (ret)
5987                 return ret;
5988
5989         atomic_set(&binder_transaction_log.cur, ~0U);
5990         atomic_set(&binder_transaction_log_failed.cur, ~0U);
5991
5992         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5993         if (binder_debugfs_dir_entry_root)
5994                 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5995                                                  binder_debugfs_dir_entry_root);
5996
5997         if (binder_debugfs_dir_entry_root) {
5998                 debugfs_create_file("state",
5999                                     0444,
6000                                     binder_debugfs_dir_entry_root,
6001                                     NULL,
6002                                     &binder_state_fops);
6003                 debugfs_create_file("stats",
6004                                     0444,
6005                                     binder_debugfs_dir_entry_root,
6006                                     NULL,
6007                                     &binder_stats_fops);
6008                 debugfs_create_file("transactions",
6009                                     0444,
6010                                     binder_debugfs_dir_entry_root,
6011                                     NULL,
6012                                     &binder_transactions_fops);
6013                 debugfs_create_file("transaction_log",
6014                                     0444,
6015                                     binder_debugfs_dir_entry_root,
6016                                     &binder_transaction_log,
6017                                     &binder_transaction_log_fops);
6018                 debugfs_create_file("failed_transaction_log",
6019                                     0444,
6020                                     binder_debugfs_dir_entry_root,
6021                                     &binder_transaction_log_failed,
6022                                     &binder_transaction_log_fops);
6023         }
6024
6025         if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6026             strcmp(binder_devices_param, "") != 0) {
6027                 /*
6028                 * Copy the module_parameter string, because we don't want to
6029                 * tokenize it in-place.
6030                  */
6031                 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6032                 if (!device_names) {
6033                         ret = -ENOMEM;
6034                         goto err_alloc_device_names_failed;
6035                 }
6036
6037                 device_tmp = device_names;
6038                 while ((device_name = strsep(&device_tmp, ","))) {
6039                         ret = init_binder_device(device_name);
6040                         if (ret)
6041                                 goto err_init_binder_device_failed;
6042                 }
6043         }
6044
6045         ret = init_binderfs();
6046         if (ret)
6047                 goto err_init_binder_device_failed;
6048
6049         return ret;
6050
6051 err_init_binder_device_failed:
6052         hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6053                 misc_deregister(&device->miscdev);
6054                 hlist_del(&device->hlist);
6055                 kfree(device);
6056         }
6057
6058         kfree(device_names);
6059
6060 err_alloc_device_names_failed:
6061         debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6062
6063         return ret;
6064 }
6065
6066 device_initcall(binder_init);
6067
6068 #define CREATE_TRACE_POINTS
6069 #include "binder_trace.h"
6070
6071 MODULE_LICENSE("GPL v2");